PGMAllMap.cpp revision 40b7f880e5d6fd1b89ca55b05b96a972c3630468
/* $Id$ */
/** @file
* PGM - Page Manager and Monitor - All context code.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PGM
#include "PGMInternal.h"
/**
* Maps a range of physical pages at a given virtual address
* in the guest context.
*
* The GC virtual address range must be within an existing mapping.
*
* @returns VBox status code.
* @param pVM The virtual machine.
* @param GCPtr Where to map the page(s). Must be page aligned.
* @param HCPhys Start of the range of physical pages. Must be page aligned.
* @param cbPages Number of bytes to map. Must be page aligned.
* @param fFlags Page flags (X86_PTE_*).
*/
{
/*
* Validate input.
*/
AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
/* hypervisor defaults */
if (!fFlags)
/*
* Find the mapping.
*/
while (pCur)
{
{
{
AssertMsgFailed(("Invalid range!!\n"));
return VERR_INVALID_PARAMETER;
}
/*
* Setup PTE.
*/
/*
* Update the page tables.
*/
for (;;)
{
/* 32-bit */
pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
/* pae */
/* next */
if (!cbPages)
break;
}
return VINF_SUCCESS;
}
/* next */
}
return VERR_INVALID_PARAMETER;
}
/**
* Sets (replaces) the page flags for a range of pages in a mapping.
*
* @returns VBox status.
* @param pVM VM handle.
* @param GCPtr Virtual address of the first page in the range.
* @param cb Size (in bytes) of the range to apply the modification to.
* @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
*/
{
}
/**
* Modify page flags for a range of pages in a mapping.
*
* The existing flags are ANDed with the fMask and ORed with the fFlags.
*
* @returns VBox status code.
* @param pVM VM handle.
* @param GCPtr Virtual address of the first page in the range.
* @param cb Size (in bytes) of the range to apply the modification to.
* @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
* @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
*/
{
/*
* Validate input.
*/
/*
* Align the input.
*/
/*
* Find the mapping.
*/
while (pCur)
{
{
("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
/*
* Perform the requested operation.
*/
while (cb > 0)
{
{
/* 32-Bit */
/* PAE */
/* invalidate tls */
/* next */
iPTE++;
}
}
return VINF_SUCCESS;
}
/* next */
}
return VERR_INVALID_PARAMETER;
}
#ifndef IN_RING0
/**
* Sets all PDEs involved with the mapping in the shadow page table.
*
* @param pVM The VM handle.
* @param pMap Pointer to the mapping in question.
* @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
*/
{
Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
return;
return; /* too early */
/*
* Insert the page tables into the shadow page directories.
*/
iNewPDE += i;
while (i-- > 0)
{
iNewPDE--;
switch (enmShadowMode)
{
case PGMMODE_32_BIT:
{
#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
#endif
/* Free any previous user, unless it's us. */
Assert( (pShw32BitPd->a[iNewPDE].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
/* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags. */
#ifdef IN_RC
/* Unlock dynamic mappings again. */
#endif
break;
}
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
{
#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
#endif
/*
* Get the shadow PD.
* If no PD, sync it (PAE guest) or fake (not present or 32-bit guest).
* Note! The RW, US and A bits are reserved for PAE PDPTEs. Setting the
* accessed bit causes invalid VT-x guest state errors.
*/
if (!pShwPaePd)
{
GstPdpe.u = X86_PDPE_P;
else
{
if (pGstPdpe)
else
GstPdpe.u = X86_PDPE_P;
}
}
#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
#endif
/*
* Mark the page as locked; disallow flushing.
*/
#ifdef VBOX_STRICT
{
Assert(PGMGetGuestMode(pVM) >= PGMMODE_PAE); /** @todo We may hit this during reset, will fix later. */
}
#endif
/*
* Insert our first PT, freeing anything we might be replacing unless it's a mapping (i.e. us).
*/
Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
{
}
/* 2nd 2 MB PDE of the 4 MB region, same as above. */
iPaePde++;
Assert( (pShwPaePd->a[iPaePde].u & (X86_PDE_P | PGM_PDFLAGS_MAPPING)) != (X86_PDE_P | PGM_PDFLAGS_MAPPING)
/*
* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode)
*/
#ifdef IN_RC
/* Unlock dynamic mappings again. */
#endif
break;
}
default:
AssertFailed();
break;
}
}
}
/**
* Clears all PDEs involved with the mapping in the shadow page table.
*
* @param pVM The VM handle.
* @param pShwPageCR3 CR3 root page
* @param pMap Pointer to the mapping in question.
* @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
* @param fDeactivateCR3 Set if it's pgmMapDeactivateCR3 calling.
*/
void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3)
{
Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s), fDeactivateCR3));
return;
# ifdef IN_RC
# endif
iOldPDE += i;
while (i-- > 0)
{
iOldPDE--;
switch(enmShadowMode)
{
case PGMMODE_32_BIT:
{
pShw32BitPd->a[iOldPDE].u = 0;
break;
}
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
{
/*
* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode)
*/
if (fDeactivateCR3)
{
/* See if there are any other mappings here. This is suboptimal code. */
{
break;
}
}
/*
* If the page directory of the old CR3 is reused in the new one, then don't
* clear the hypervisor mappings.
*/
if ( pCurrentShwPdpt
{
break;
}
/*
* Clear the mappings in the PD.
*/
iPaePde++;
/*
* Unlock the shadow pool PD page if the PDPTE no longer holds any mappings.
*/
if ( fDeactivateCR3
{
}
break;
}
default:
AssertFailed();
break;
}
}
}
#endif /* !IN_RING0 */
#if defined(VBOX_STRICT) && !defined(IN_RING0)
/**
* Clears all PDEs involved with the mapping in the shadow page table.
*
* @param pVM The VM handle.
* @param pShwPageCR3 CR3 root page
* @param pMap Pointer to the mapping in question.
* @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
*/
static void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
{
iPDE += i;
while (i-- > 0)
{
iPDE--;
switch (enmShadowMode)
{
case PGMMODE_32_BIT:
{
AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
("Expected %x vs %x; iPDE=%#x %RGv %s\n",
pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
break;
}
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
{
AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
iPaePDE++;
AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
("Expected %RX64 vs %RX64; iPDE=%#x iPdpt=%#x iPaePDE=%#x %RGv %s\n",
pShwPaePd->a[iPaePDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
("%RX64; iPdpt=%#x iPDE=%#x iPaePDE=%#x %RGv %s\n",
break;
}
default:
AssertFailed();
break;
}
}
}
/**
* Check the hypervisor mappings in the active CR3.
*
* @param pVM The virtual machine.
*/
{
/*
* Can skip this if mappings are disabled.
*/
return;
/*
* Iterate mappings.
*/
{
}
}
#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
#ifndef IN_RING0
/**
* Apply the hypervisor mappings to the active CR3.
*
* @returns VBox status.
* @param pVM The virtual machine.
* @param pShwPageCR3 CR3 root page
*/
{
/*
* Can skip this if mappings are disabled.
*/
return VINF_SUCCESS;
/* Note. A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
Log4(("pgmMapActivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
/*
* Iterate mappings.
*/
{
}
return VINF_SUCCESS;
}
/**
* Remove the hypervisor mappings from the specified CR3
*
* @returns VBox status.
* @param pVM The virtual machine.
* @param pShwPageCR3 CR3 root page
*/
{
/*
* Can skip this if mappings are disabled.
*/
return VINF_SUCCESS;
Log4(("pgmMapDeactivateCR3: fixed mappings=%d idxShwPageCR3=%#x\n", pVM->pgm.s.fMappingsFixed, pShwPageCR3 ? pShwPageCR3->idx : NIL_PGMPOOL_IDX));
/*
* Iterate mappings.
*/
{
}
return VINF_SUCCESS;
}
/**
* Checks guest PD for conflicts with VMM GC mappings.
*
* @returns true if conflict detected.
* @returns false if not.
* @param pVM The virtual machine.
*/
{
/*
* Can skip this if mappings are safely fixed.
*/
return false;
/*
* Iterate mappings.
*/
if (enmGuestMode == PGMMODE_32_BIT)
{
/*
* Resolve the page directory.
*/
{
while (iPT-- > 0)
{
#ifdef IN_RING3
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
#else
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
#endif
return true;
}
}
}
else if ( enmGuestMode == PGMMODE_PAE
|| enmGuestMode == PGMMODE_PAE_NX)
{
{
while (iPT-- > 0)
{
{
#ifdef IN_RING3
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
" PDE=%016RX64.\n",
#else
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
" PDE=%016RX64.\n",
#endif
return true;
}
}
}
}
else
AssertFailed();
return false;
}
/**
* Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
*
* @returns VBox status.
* @param pVM The virtual machine.
*/
{
/*
* Can skip this if mappings are safely fixed.
*/
return VINF_SUCCESS;
if (enmGuestMode == PGMMODE_32_BIT)
{
/*
* Resolve the page directory.
*/
/*
* Iterate mappings.
*/
{
while (iPT-- > 0)
{
&& ( pVM->fRawR0Enabled
{
#ifdef IN_RING3
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
break;
#else
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
return VINF_PGM_SYNC_CR3;
#endif
}
}
}
}
else if ( enmGuestMode == PGMMODE_PAE
|| enmGuestMode == PGMMODE_PAE_NX)
{
/*
* Iterate mappings.
*/
{
while (iPT-- > 0)
{
{
#ifdef IN_RING3
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
" PDE=%016RX64.\n",
break;
#else
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
" PDE=%016RX64.\n",
return VINF_PGM_SYNC_CR3;
#endif
}
}
}
}
else
AssertFailed();
return VINF_SUCCESS;
}
#endif /* IN_RING0 */