PGMAllGst.h revision c01526c140e6e82fd08bd5d5adfb0774ba2b6690
/* $Id$ */
/** @file
* VBox - Page Manager, Guest Paging Template - All context code.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
#undef GSTPT
#undef PGSTPT
#undef GSTPTE
#undef PGSTPTE
#undef GSTPD
#undef PGSTPD
#undef GSTPDE
#undef PGSTPDE
#undef GST_BIG_PAGE_SIZE
#undef GST_BIG_PAGE_OFFSET_MASK
#undef GST_PDE_PG_MASK
#undef GST_PDE_BIG_PG_MASK
#undef GST_PD_SHIFT
#undef GST_PD_MASK
#undef GST_PTE_PG_MASK
#undef GST_PT_SHIFT
#undef GST_PT_MASK
#undef GST_TOTAL_PD_ENTRIES
#undef GST_CR3_PAGE_MASK
#undef GST_PDPE_ENTRIES
#undef GST_PDPT_SHIFT
#undef GST_PDPT_MASK
#undef GST_PDPE_PG_MASK
#undef GST_GET_PDE_BIG_PG_GCPHYS
#if PGM_GST_TYPE == PGM_TYPE_REAL \
|| PGM_GST_TYPE == PGM_TYPE_PROT
# define GSTPT SHWPT
# define PGSTPT PSHWPT
# define GSTPTE SHWPTE
# define PGSTPTE PSHWPTE
# define GSTPD SHWPD
# define PGSTPD PSHWPD
# define GSTPDE SHWPDE
# define PGSTPDE PSHWPDE
# define GST_PTE_PG_MASK SHW_PTE_PG_MASK
#elif PGM_GST_TYPE == PGM_TYPE_32BIT
# define GSTPT X86PT
# define PGSTPT PX86PT
# define GSTPTE X86PTE
# define PGSTPTE PX86PTE
# define GSTPD X86PD
# define PGSTPD PX86PD
# define GSTPDE X86PDE
# define PGSTPDE PX86PDE
# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
# define GST_PDE_PG_MASK X86_PDE_PG_MASK
# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst)
# define GST_PD_SHIFT X86_PD_SHIFT
# define GST_PD_MASK X86_PD_MASK
# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
# define GST_PTE_PG_MASK X86_PTE_PG_MASK
# define GST_PT_SHIFT X86_PT_SHIFT
# define GST_PT_MASK X86_PT_MASK
# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
#elif PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
# define GSTPT X86PTPAE
# define PGSTPT PX86PTPAE
# define GSTPTE X86PTEPAE
# define PGSTPTE PX86PTEPAE
# define GSTPD X86PDPAE
# define PGSTPD PX86PDPAE
# define GSTPDE X86PDEPAE
# define PGSTPDE PX86PDEPAE
# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL
# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) (PdeGst.u & GST_PDE_BIG_PG_MASK)
# define GST_PD_SHIFT X86_PD_PAE_SHIFT
# define GST_PD_MASK X86_PD_PAE_MASK
# if PGM_GST_TYPE == PGM_TYPE_PAE
# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
# define GST_PDPT_SHIFT X86_PDPT_SHIFT
# define GST_PDPT_MASK X86_PDPT_MASK_PAE
# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
# else
# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
# define GST_PDPT_SHIFT X86_PDPT_SHIFT
# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
# define GST_PDPT_MASK X86_PDPT_MASK_AMD64
# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK_FULL
# define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK
# endif
# define GST_PT_SHIFT X86_PT_PAE_SHIFT
# define GST_PT_MASK X86_PT_PAE_MASK
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
__BEGIN_DECLS
PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE);
PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
PGM_GST_DECL(int, UnmapCR3)(PVM pVM);
PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
#ifndef IN_RING3
PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
# if PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
# endif
#endif
__END_DECLS
/**
* Gets effective Guest OS page information.
*
* When GCPtr is in a big page, the function will return as if it was a normal
* 4KB page. If the need for distinguishing between big and normal page becomes
* necessary at a later point, a PGMGstGetPage Ex() will be created for that
* purpose.
*
* @returns VBox status.
* @param pVM VM Handle.
* @param GCPtr Guest Context virtual address of the page. Page aligned!
* @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
* @param pGCPhys Where to store the GC physical address of the page.
* This is page aligned. The fact that the
*/
PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
{
#if PGM_GST_TYPE == PGM_TYPE_REAL \
|| PGM_GST_TYPE == PGM_TYPE_PROT
/*
* Fake it.
*/
if (pfFlags)
*pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
if (pGCPhys)
*pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
return VINF_SUCCESS;
#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
/*
* Get the PDE.
*/
# if PGM_GST_TYPE == PGM_TYPE_32BIT
X86PDE Pde = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
#elif PGM_GST_TYPE == PGM_TYPE_PAE
/* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present.
* All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx). */
X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
#elif PGM_GST_TYPE == PGM_TYPE_AMD64
PX86PML4E pPml4e;
X86PDPE Pdpe;
X86PDEPAE Pde = pgmGstGetLongModePDEEx(&pVM->pgm.s, GCPtr, &pPml4e, &Pdpe);
bool fNoExecuteBitValid = !!(CPUMGetGuestEFER(pVM) & MSR_K6_EFER_NXE);
Assert(pPml4e);
if (!(pPml4e->n.u1Present & Pdpe.n.u1Present))
return VERR_PAGE_TABLE_NOT_PRESENT;
/* Merge accessed, write, user and no-execute bits into the PDE. */
Pde.n.u1Accessed &= pPml4e->n.u1Accessed & Pdpe.lm.u1Accessed;
Pde.n.u1Write &= pPml4e->n.u1Write & Pdpe.lm.u1Write;
Pde.n.u1User &= pPml4e->n.u1User & Pdpe.lm.u1User;
Pde.n.u1NoExecute &= pPml4e->n.u1NoExecute & Pdpe.lm.u1NoExecute;
# endif
/*
* Lookup the page.
*/
if (!Pde.n.u1Present)
return VERR_PAGE_TABLE_NOT_PRESENT;
if ( !Pde.b.u1Size
# if PGM_GST_TYPE != PGM_TYPE_AMD64
|| !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
# endif
)
{
PGSTPT pPT;
int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
if (RT_FAILURE(rc))
return rc;
/*
* Get PT entry and check presence.
*/
const GSTPTE Pte = pPT->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
if (!Pte.n.u1Present)
return VERR_PAGE_NOT_PRESENT;
/*
* Store the result.
* RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
* where the PDPE is simplified.
*/
if (pfFlags)
{
*pfFlags = (Pte.u & ~GST_PTE_PG_MASK)
& ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
/* The NX bit is determined by a bitwise OR between the PT and PD */
if (fNoExecuteBitValid)
*pfFlags |= (Pte.u & Pde.u & X86_PTE_PAE_NX);
# endif
}
if (pGCPhys)
*pGCPhys = Pte.u & GST_PTE_PG_MASK;
}
else
{
/*
* Map big to 4k PTE and store the result
*/
if (pfFlags)
{
*pfFlags = (Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
| ((Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT);
# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
/* The NX bit is determined by a bitwise OR between the PT and PD */
if (fNoExecuteBitValid)
*pfFlags |= (Pde.u & X86_PTE_PAE_NX);
# endif
}
if (pGCPhys)
*pGCPhys = GST_GET_PDE_BIG_PG_GCPHYS(Pde) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK));
}
return VINF_SUCCESS;
#else
# error "shouldn't be here!"
/* something else... */
return VERR_NOT_SUPPORTED;
#endif
}
/**
* Modify page flags for a range of pages in the guest's tables
*
* The existing flags are ANDed with the fMask and ORed with the fFlags.
*
* @returns VBox status code.
* @param pVM VM handle.
* @param GCPtr Virtual address of the first page in the range. Page aligned!
* @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
* @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
* @param fMask The AND mask - page flags X86_PTE_*.
*/
PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
for (;;)
{
/*
* Get the PD entry.
*/
# if PGM_GST_TYPE == PGM_TYPE_32BIT
PX86PDE pPde = pgmGstGet32bitPDEPtr(&pVM->pgm.s, GCPtr);
# elif PGM_GST_TYPE == PGM_TYPE_PAE
/* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
* All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
*/
PX86PDEPAE pPde = pgmGstGetPaePDEPtr(&pVM->pgm.s, GCPtr);
Assert(pPde);
if (!pPde)
return VERR_PAGE_TABLE_NOT_PRESENT;
# elif PGM_GST_TYPE == PGM_TYPE_AMD64
/** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
PX86PDEPAE pPde = pgmGstGetLongModePDEPtr(&pVM->pgm.s, GCPtr);
Assert(pPde);
if (!pPde)
return VERR_PAGE_TABLE_NOT_PRESENT;
# endif
GSTPDE Pde = *pPde;
Assert(Pde.n.u1Present);
if (!Pde.n.u1Present)
return VERR_PAGE_TABLE_NOT_PRESENT;
if ( !Pde.b.u1Size
# if PGM_GST_TYPE != PGM_TYPE_AMD64
|| !(CPUMGetGuestCR4(pVM) & X86_CR4_PSE)
# endif
)
{
/*
* 4KB Page table
*
* Walk page tables and pages till we're done.
*/
PGSTPT pPT;
int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
if (RT_FAILURE(rc))
return rc;
unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
while (iPTE < RT_ELEMENTS(pPT->a))
{
GSTPTE Pte = pPT->a[iPTE];
Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
| (fFlags & ~GST_PTE_PG_MASK);
pPT->a[iPTE] = Pte;
/* next page */
cb -= PAGE_SIZE;
if (!cb)
return VINF_SUCCESS;
GCPtr += PAGE_SIZE;
iPTE++;
}
}
else
{
/*
* 4MB Page table
*/
# if PGM_GST_TYPE == PGM_TYPE_32BIT
Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
# else
Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
# endif
| (fFlags & ~GST_PTE_PG_MASK)
| ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
*pPde = Pde;
/* advance */
const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
if (cbDone >= cb)
return VINF_SUCCESS;
cb -= cbDone;
GCPtr += cbDone;
}
}
#else
/* real / protected mode: ignore. */
return VINF_SUCCESS;
#endif
}
/**
* Retrieve guest PDE information
*
* @returns VBox status code.
* @param pVM The virtual machine.
* @param GCPtr Guest context pointer
* @param pPDE Pointer to guest PDE structure
*/
PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE)
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
# if PGM_GST_TYPE == PGM_TYPE_32BIT
X86PDE Pde = pgmGstGet32bitPDE(&pVM->pgm.s, GCPtr);
# elif PGM_GST_TYPE == PGM_TYPE_PAE
X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
# elif PGM_GST_TYPE == PGM_TYPE_AMD64
X86PDEPAE Pde = pgmGstGetLongModePDE(&pVM->pgm.s, GCPtr);
# endif
pPDE->u = (X86PGPAEUINT)Pde.u;
return VINF_SUCCESS;
#else
AssertFailed();
return VERR_NOT_IMPLEMENTED;
#endif
}
/**
* Maps the CR3 into HMA in GC and locate it in HC.
*
* Note that a MapCR3 call is usually not followed by an UnmapCR3 call; whenever
* CR3 is updated we simply call MapCR3 again.
*
* @returns VBox status, no specials.
* @param pVM VM handle.
* @param GCPhysCR3 The physical address in the CR3 register.
*/
PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
LogFlow(("MapCR3: %RGp\n", GCPhysCR3));
/*
* Map the page CR3 points at.
*/
RTHCPHYS HCPhysGuestCR3;
RTHCPTR HCPtrGuestCR3;
int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
if (RT_SUCCESS(rc))
{
rc = PGMMap(pVM, (RTGCPTR)pVM->pgm.s.GCPtrCR3Mapping, HCPhysGuestCR3, PAGE_SIZE, 0);
if (RT_SUCCESS(rc))
{
PGM_INVL_PG(pVM->pgm.s.GCPtrCR3Mapping);
# if PGM_GST_TYPE == PGM_TYPE_32BIT
pVM->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
# endif
pVM->pgm.s.pGst32BitPdRC = (RCPTRTYPE(PX86PD))pVM->pgm.s.GCPtrCR3Mapping;
# elif PGM_GST_TYPE == PGM_TYPE_PAE
unsigned off = GCPhysCR3 & GST_CR3_PAGE_MASK & PAGE_OFFSET_MASK;
pVM->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
# endif
pVM->pgm.s.pGstPaePdptRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off);
Log(("Cached mapping %RGv\n", pVM->pgm.s.pGstPaePdptRC));
/*
* Map the 4 PDs too.
*/
PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
RTGCPTR GCPtr = pVM->pgm.s.GCPtrCR3Mapping + PAGE_SIZE;
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++, GCPtr += PAGE_SIZE)
{
if (pGuestPDPT->a[i].n.u1Present)
{
RTHCPTR HCPtr;
RTHCPHYS HCPhys;
RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
int rc2 = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhys, &HCPtr, &HCPhys);
if (RT_SUCCESS(rc2))
{
rc = PGMMap(pVM, GCPtr, HCPhys & X86_PTE_PAE_PG_MASK, PAGE_SIZE, 0);
AssertRCReturn(rc, rc);
pVM->pgm.s.apGstPaePDsR3[i] = (R3PTRTYPE(PX86PDPAE))HCPtr;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.apGstPaePDsR0[i] = (R0PTRTYPE(PX86PDPAE))HCPtr;
# endif
pVM->pgm.s.apGstPaePDsRC[i] = (RCPTRTYPE(PX86PDPAE))GCPtr;
pVM->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
PGM_INVL_PG(GCPtr); /** @todo This ends up calling HWACCMInvalidatePage, is that correct? */
continue;
}
AssertMsgFailed(("pgmR3Gst32BitMapCR3: rc2=%d GCPhys=%RGp i=%d\n", rc2, GCPhys, i));
}
pVM->pgm.s.apGstPaePDsR3[i] = 0;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.apGstPaePDsR0[i] = 0;
# endif
pVM->pgm.s.apGstPaePDsRC[i] = 0;
pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
PGM_INVL_PG(GCPtr); /** @todo this shouldn't be necessary? */
}
# elif PGM_GST_TYPE == PGM_TYPE_AMD64
pVM->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
# endif
if (!HWACCMIsNestedPagingActive(pVM))
{
PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
if (pVM->pgm.s.CTX_SUFF(pShwAmd64CR3))
{
/* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */
if (pVM->pgm.s.CTX_SUFF(pShwAmd64CR3)->enmKind != PGMPOOLKIND_FREE)
pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3)->GCPhys >> PAGE_SHIFT);
pVM->pgm.s.CTX_SUFF(pShwAmd64CR3) = 0;
pVM->pgm.s.pShwPaePml4R3 = 0;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pShwPaePml4R0 = 0;
# endif
pVM->pgm.s.HCPhysShwPaePml4 = 0;
}
Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
l_try_again:
rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
if (rc == VERR_PGM_POOL_FLUSHED)
{
Log(("MapCR3: Flush pool and try again\n"));
Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
rc = pgmPoolSyncCR3(pVM);
AssertRC(rc);
goto l_try_again;
}
# ifdef IN_RING0
pVM->pgm.s.pShwAmd64CR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
# else
pVM->pgm.s.pShwAmd64CR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
# endif
pVM->pgm.s.pShwPaePml4R3 = (R3PTRTYPE(PX86PML4))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pShwPaePml4R0 = (R0PTRTYPE(PX86PML4))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwAmd64CR3));
# endif
pVM->pgm.s.HCPhysShwPaePml4 = pVM->pgm.s.CTX_SUFF(pShwAmd64CR3)->Core.Key;
}
# endif
}
else
AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
}
else
AssertMsgFailed(("rc=%Rrc GCPhysGuestPD=%RGp\n", rc, GCPhysCR3));
#else /* prot/real stub */
int rc = VINF_SUCCESS;
#endif
return rc;
}
/**
* Unmaps the CR3.
*
* @returns VBox status, no specials.
* @param pVM VM handle.
*/
PGM_GST_DECL(int, UnmapCR3)(PVM pVM)
{
LogFlow(("UnmapCR3\n"));
int rc = VINF_SUCCESS;
#if PGM_GST_TYPE == PGM_TYPE_32BIT
pVM->pgm.s.pGst32BitPdR3 = 0;
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pGst32BitPdR0 = 0;
#endif
pVM->pgm.s.pGst32BitPdRC = 0;
#elif PGM_GST_TYPE == PGM_TYPE_PAE
pVM->pgm.s.pGstPaePdptR3 = 0;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pGstPaePdptR0 = 0;
# endif
pVM->pgm.s.pGstPaePdptRC = 0;
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
pVM->pgm.s.apGstPaePDsR3[i] = 0;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.apGstPaePDsR0[i] = 0;
# endif
pVM->pgm.s.apGstPaePDsRC[i] = 0;
pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
}
#elif PGM_GST_TYPE == PGM_TYPE_AMD64
pVM->pgm.s.pGstAmd64Pml4R3 = 0;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pGstAmd64Pml4R0 = 0;
# endif
if (!HWACCMIsNestedPagingActive(pVM))
{
pVM->pgm.s.pShwPaePml4R3 = 0;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
pVM->pgm.s.pShwPaePml4R0 = 0;
# endif
pVM->pgm.s.HCPhysShwPaePml4 = 0;
if (pVM->pgm.s.CTX_SUFF(pShwAmd64CR3))
{
PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwAmd64CR3)->GCPhys >> PAGE_SHIFT);
pVM->pgm.s.pShwAmd64CR3R3 = 0;
pVM->pgm.s.pShwAmd64CR3R0 = 0;
}
}
#else /* prot/real mode stub */
/* nothing to do */
#endif
return rc;
}
#undef LOG_GROUP
#define LOG_GROUP LOG_GROUP_PGM_POOL
/**
* Registers physical page monitors for the necessary paging
* structures to detect conflicts with our guest mappings.
*
* This is always called after mapping CR3.
* This is never called with fixed mappings.
*
* @returns VBox status, no specials.
* @param pVM VM handle.
* @param GCPhysCR3 The physical address in the CR3 register.
*/
PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
{
Assert(!pVM->pgm.s.fMappingsFixed);
int rc = VINF_SUCCESS;
/*
* Register/Modify write phys handler for guest's CR3 if it changed.
*/
#if PGM_GST_TYPE == PGM_TYPE_32BIT
if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
{
# ifndef PGMPOOL_WITH_MIXED_PT_CR3
const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
else
rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
pVM->pgm.s.pfnRCGstWriteHandlerCR3, 0,
pVM->pgm.s.pszR3GstWriteHandlerCR3);
# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
pVM->pgm.s.enmShadowMode == PGMMODE_PAE
|| pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
? PGMPOOL_IDX_PAE_PD
: PGMPOOL_IDX_PD,
GCPhysCR3);
# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
if (RT_FAILURE(rc))
{
AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
return rc;
}
pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
}
#elif PGM_GST_TYPE == PGM_TYPE_PAE
/* Monitor the PDPT page */
/*
* Register/Modify write phys handler for guest's CR3 if it changed.
*/
# ifndef PGMPOOL_WITH_MIXED_PT_CR3
AssertFailed();
# endif
if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
{
rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);
if (RT_FAILURE(rc))
{
AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
return rc;
}
pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
}
/*
* Do the 4 PDs.
*/
PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
if (pGuestPDPT->a[i].n.u1Present)
{
RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
{
Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);
}
if (RT_FAILURE(rc))
{
AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
return rc;
}
pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
}
else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
{
rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
AssertRC(rc);
pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
}
}
#else
/* prot/real/amd64 mode stub */
#endif
return rc;
}
/**
* Deregisters any physical page monitors installed by MonitorCR3.
*
* @returns VBox status code, no specials.
* @param pVM The VM handle.
*/
PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
{
int rc = VINF_SUCCESS;
/*
* Deregister the access handlers.
*
* PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
* before we enter GC again.
*/
#if PGM_GST_TYPE == PGM_TYPE_32BIT
if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
{
# ifndef PGMPOOL_WITH_MIXED_PT_CR3
rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
AssertRCReturn(rc, rc);
# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
pVM->pgm.s.enmShadowMode == PGMMODE_PAE
|| pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
? PGMPOOL_IDX_PAE_PD
: PGMPOOL_IDX_PD);
AssertRCReturn(rc, rc);
# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
}
#elif PGM_GST_TYPE == PGM_TYPE_PAE
/* The PDPT page */
# ifndef PGMPOOL_WITH_MIXED_PT_CR3
AssertFailed();
# endif
if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
{
rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT);
AssertRC(rc);
}
/* The 4 PDs. */
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
{
Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
AssertRC(rc2);
if (RT_FAILURE(rc2))
rc = rc2;
pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
}
}
#else
/* prot/real/amd64 mode stub */
#endif
return rc;
}
#undef LOG_GROUP
#define LOG_GROUP LOG_GROUP_PGM
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
/**
* Updates one virtual handler range.
*
* @returns 0
* @param pNode Pointer to a PGMVIRTHANDLER.
* @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
*/
static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
{
PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
PPGMHVUSTATE pState = (PPGMHVUSTATE)pvUser;
Assert(pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR);
#if PGM_GST_TYPE == PGM_TYPE_32BIT
PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pState->pVM->pgm.s);
#endif
RTGCPTR GCPtr = pCur->Core.Key;
#if PGM_GST_MODE != PGM_MODE_AMD64
/* skip all stuff above 4GB if not AMD64 mode. */
if (GCPtr >= _4GB)
return 0;
#endif
unsigned offPage = GCPtr & PAGE_OFFSET_MASK;
unsigned iPage = 0;
while (iPage < pCur->cPages)
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT
X86PDE Pde = pPDSrc->a[GCPtr >> X86_PD_SHIFT];
#elif PGM_GST_TYPE == PGM_TYPE_PAE
X86PDEPAE Pde = pgmGstGetPaePDE(&pState->pVM->pgm.s, GCPtr);
#elif PGM_GST_TYPE == PGM_TYPE_AMD64
X86PDEPAE Pde = pgmGstGetLongModePDE(&pState->pVM->pgm.s, GCPtr);
#endif
if (Pde.n.u1Present)
{
if ( !Pde.b.u1Size
# if PGM_GST_TYPE != PGM_TYPE_AMD64
|| !(pState->cr4 & X86_CR4_PSE)
# endif
)
{
/*
* Normal page table.
*/
PGSTPT pPT;
int rc = PGM_GCPHYS_2_PTR(pState->pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
if (RT_SUCCESS(rc))
{
for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
iPTE++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
{
GSTPTE Pte = pPT->a[iPTE];
RTGCPHYS GCPhysNew;
if (Pte.n.u1Present)
GCPhysNew = (RTGCPHYS)(pPT->a[iPTE].u & GST_PTE_PG_MASK) + offPage;
else
GCPhysNew = NIL_RTGCPHYS;
if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
{
if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
#endif
pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
}
}
}
else
{
/* not-present. */
offPage = 0;
AssertRC(rc);
for (unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
iPTE < RT_ELEMENTS(pPT->a) && iPage < pCur->cPages;
iPTE++, iPage++, GCPtr += PAGE_SIZE)
{
if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
{
pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias));
#endif
pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
}
}
}
}
else
{
/*
* 2/4MB page.
*/
RTGCPHYS GCPhys = (RTGCPHYS)(Pde.u & GST_PDE_PG_MASK);
for (unsigned i4KB = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
i4KB < PAGE_SIZE / sizeof(GSTPDE) && iPage < pCur->cPages;
i4KB++, iPage++, GCPtr += PAGE_SIZE, offPage = 0)
{
RTGCPHYS GCPhysNew = GCPhys + (i4KB << PAGE_SHIFT) + offPage;
if (pCur->aPhysToVirt[iPage].Core.Key != GCPhysNew)
{
if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias,
("{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%RGp\n",
pCur->aPhysToVirt[iPage].Core.Key, pCur->aPhysToVirt[iPage].Core.KeyLast,
pCur->aPhysToVirt[iPage].offVirtHandler, pCur->aPhysToVirt[iPage].offNextAlias, GCPhysNew));
#endif
pCur->aPhysToVirt[iPage].Core.Key = GCPhysNew;
pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
}
}
} /* pde type */
}
else
{
/* not-present. */
for (unsigned cPages = (GST_PT_MASK + 1) - ((GCPtr >> GST_PT_SHIFT) & GST_PT_MASK);
cPages && iPage < pCur->cPages;
iPage++, GCPtr += PAGE_SIZE)
{
if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
{
pgmHandlerVirtualClearPage(&pState->pVM->pgm.s, pCur, iPage);
pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS;
pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
}
}
offPage = 0;
}
} /* for pages in virtual mapping. */
return 0;
}
#endif /* 32BIT, PAE and AMD64 */
/**
* Updates the virtual page access handlers.
*
* @returns true if bits were flushed.
* @returns false if bits weren't flushed.
* @param pVM VM handle.
* @param pPDSrc The page directory.
* @param cr4 The cr4 register value.
*/
PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4)
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
/** @todo
* In theory this is not sufficient: the guest can change a single page in a range with invlpg
*/
/*
* Resolve any virtual address based access handlers to GC physical addresses.
* This should be fairly quick.
*/
PGMHVUSTATE State;
pgmLock(pVM);
STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
State.pVM = pVM;
State.fTodo = pVM->pgm.s.fSyncFlags;
State.cr4 = cr4;
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualUpdate), a);
/*
* Set / reset bits?
*/
if (State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL)
{
STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3HandlerVirtualReset), b);
}
pgmUnlock(pVM);
return !!(State.fTodo & PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL);
#else /* real / protected */
return false;
#endif
}
#if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
/**
* Write access handler for the Guest CR3 page in 32-bit mode.
*
* This will try interpret the instruction, if failure fail back to the recompiler.
* Check if the changed PDEs are marked present and conflicts with our
* mappings. If conflict, we'll switch to the host context and resolve it there
*
* @returns VBox status code (appropritate for trap handling and GC return).
* @param pVM VM Handle.
* @param uErrorCode CPU Error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address (cr2).
* @param GCPhysFault The GC physical address corresponding to pvFault.
* @param pvUser User argument.
*/
PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
{
AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
/*
* Try interpret the instruction.
*/
uint32_t cb;
int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
if (RT_SUCCESS(rc) && cb)
{
/*
* Check if the modified PDEs are present and mappings.
*/
const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
const unsigned iPD1 = offPD / sizeof(X86PDE);
const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);
Assert(cb > 0 && cb <= 8);
Assert(iPD1 < X86_PG_ENTRIES);
Assert(iPD2 < X86_PG_ENTRIES);
#ifdef DEBUG
Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD1, iPD1 << X86_PD_SHIFT));
if (iPD1 != iPD2)
Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD2, iPD2 << X86_PD_SHIFT));
#endif
if (!pVM->pgm.s.fMappingsFixed)
{
PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
if ( ( pPDSrc->a[iPD1].n.u1Present
&& pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
|| ( iPD1 != iPD2
&& pPDSrc->a[iPD2].n.u1Present
&& pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
)
{
STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
if (rc == VINF_SUCCESS)
rc = VINF_PGM_SYNC_CR3;
Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
return rc;
}
}
STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
}
else
{
Assert(RT_FAILURE(rc));
if (rc == VERR_EM_INTERPRETER)
rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
}
return rc;
}
#endif /* PGM_TYPE_32BIT && !IN_RING3 */
#if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
/**
* Write access handler for the Guest CR3 page in PAE mode.
*
* This will try interpret the instruction, if failure fail back to the recompiler.
* Check if the changed PDEs are marked present and conflicts with our
* mappings. If conflict, we'll switch to the host context and resolve it there
*
* @returns VBox status code (appropritate for trap handling and GC return).
* @param pVM VM Handle.
* @param uErrorCode CPU Error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address (cr2).
* @param GCPhysFault The GC physical address corresponding to pvFault.
* @param pvUser User argument.
*/
PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
{
AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
/*
* Try interpret the instruction.
*/
uint32_t cb;
int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
if (RT_SUCCESS(rc) && cb)
{
/*
* Check if any of the PDs have changed.
* We'll simply check all of them instead of figuring out which one/two to check.
*/
PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
if ( pGuestPDPT->a[i].n.u1Present
&& (pGuestPDPT->a[i].u & X86_PDPE_PG_MASK)
!= pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
{
/*
* The PDPE has changed.
* We will schedule a monitoring update for the next TLB Flush,
* InvalidatePage or SyncCR3.
*
* This isn't perfect, because a lazy page sync might be dealing with an half
* updated PDPE. However, we assume that the guest OS is disabling interrupts
* and being extremely careful (cmpxchg8b) when updating a PDPE where it's
* executing.
*/
pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",
i, pGuestPDPT->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
}
}
STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
}
else
{
Assert(RT_FAILURE(rc));
STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
if (rc == VERR_EM_INTERPRETER)
rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
}
Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
return rc;
}
/**
* Write access handler for the Guest PDs in PAE mode.
*
* This will try interpret the instruction, if failure fail back to the recompiler.
* Check if the changed PDEs are marked present and conflicts with our
* mappings. If conflict, we'll switch to the host context and resolve it there
*
* @returns VBox status code (appropritate for trap handling and GC return).
* @param pVM VM Handle.
* @param uErrorCode CPU Error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address (cr2).
* @param GCPhysFault The GC physical address corresponding to pvFault.
* @param pvUser User argument.
*/
PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
{
AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
/*
* Try interpret the instruction.
*/
uint32_t cb;
int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
if (RT_SUCCESS(rc) && cb)
{
/*
* Figure out which of the 4 PDs this is.
*/
RTGCPTR i;
PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
if (pGuestPDPT->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
{
PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);
Assert(cb > 0 && cb <= 8);
Assert(iPD1 < X86_PG_PAE_ENTRIES);
Assert(iPD2 < X86_PG_PAE_ENTRIES);
# ifdef LOG_ENABLED
Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n",
i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
if (iPD1 != iPD2)
Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n",
i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
# endif
if (!pVM->pgm.s.fMappingsFixed)
{
if ( ( pPDSrc->a[iPD1].n.u1Present
&& pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
|| ( iPD1 != iPD2
&& pPDSrc->a[iPD2].n.u1Present
&& pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
)
{
Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
return VINF_PGM_SYNC_CR3;
}
}
break; /* ASSUMES no duplicate entries... */
}
Assert(i < 4);
STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
}
else
{
Assert(RT_FAILURE(rc));
if (rc == VERR_EM_INTERPRETER)
rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
else
Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
}
return rc;
}
#endif /* PGM_TYPE_PAE && !IN_RING3 */