PGMAllBth.h revision a5c11842b92303b7b5d0086ee5011a969e321b50
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * VBox - Page Manager, Shadow+Guest Paging Template - All context code.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @remarks The nested page tables on AMD makes use of PGM_SHW_TYPE in
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * {PGM_TYPE_AMD64, PGM_TYPE_PAE and PGM_TYPE_32BIT} and PGM_GST_TYPE
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * set to PGM_TYPE_PROT. Half of the code in this file is not
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * exercised with PGM_SHW_TYPE set to PGM_TYPE_NESTED.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @remarks Extended page tables (intel) are built with PGM_GST_TYPE set to
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * PGM_TYPE_PROT (and PGM_SHW_TYPE set to PGM_TYPE_EPT).
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @remarks This file is one big \#ifdef-orgy!
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * Copyright (C) 2006-2010 Oracle Corporation
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * available from http://www.virtualbox.org. This file is free software;
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * you can redistribute it and/or modify it under the terms of the GNU
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * General Public License (GPL) as published by the Free Software
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync/*******************************************************************************
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync* Internal Functions *
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync*******************************************************************************/
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncPGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncPGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncstatic int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncstatic int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncstatic int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncPGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncPGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncPGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncPGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncPGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * Filter out some illegal combinations of guest and shadow paging, so we can
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * remove redundant checks inside functions.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync#if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync# error "Invalid combination; PAE guest implies PAE shadow"
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync#if (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync# error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync# error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync#if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT) \
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PROT)
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync# error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * Deal with a guest page fault.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @returns Strict VBox status code.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @retval VINF_EM_RAW_GUEST_TRAP
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @retval VINF_EM_RAW_EMULATE_INSTR
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @param pVCpu The current CPU.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @param pGstWalk The guest page table walk result.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * @param uErr The error code.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsyncPGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPU pVCpu, PGSTPTWALK pGstWalk, RTGCUINT uErr)
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync# if !defined(PGM_WITHOUT_MAPPINGS) && (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE)
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * Check for write conflicts with our hypervisor mapping.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * If the guest happens to access a non-present page, where our hypervisor
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * is currently mapped, then we'll create a #PF storm in the guest.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync if ( (uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW)
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync && MMHyperIsInsideArea(pVCpu->CTX_SUFF(pVM), pGstWalk->Core.GCPtr))
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync /* Force a CR3 sync to check for conflicts and emulate the instruction. */
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2GuestTrap; });
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync * Calc the error code for the guest trap.
d606b96aa8a4be8f7d2da410f982889804c27b92vboxsync ? uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID)
LogFlow(("Guest trap; cr2=%RGv uErr=%RGv lvl=%d\n", pGstWalk->Core.GCPtr, uErr, pGstWalk->Core.uLevel));
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2GuestTrap; });
return VINF_EM_RAW_GUEST_TRAP;
static VBOXSTRICTRC PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame,
int rc;
if (pCur)
# ifdef PGM_SYNC_N_PAGES
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndPhys; });
return rc;
# ifdef PGM_WITH_MMIO_OPTIMIZATIONS
&& !pGstWalk->Core.fEffectiveUS /** @todo Remove pGstWalk->Core.fEffectiveUS and X86_PTE_US further down in the sync code. */
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndPhys; });
return rc;
if (uErr & X86_TRAP_PF_RSVD) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersPhysAllOpt);
# ifdef IN_RING0
*pfLockTaken = false;
# ifdef VBOX_WITH_STATISTICS
if (pCur)
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndPhys; });
return rc;
# ifdef PGM_SYNC_N_PAGES
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndVirt; });
return rc;
* r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be
PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
if (pCur)
# ifdef IN_RC
*pfLockTaken = false;
# ifdef VBOX_WITH_STATISTICS
if (pCur)
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndVirt; });
return rc;
unsigned iPage;
if ( pCur
Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == (pGstWalk->Core.GCPhys & X86_PTE_PAE_PG_MASK));
# ifdef IN_RC
*pfLockTaken = false;
# ifdef VBOX_WITH_STATISTICS
pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, GCPtrStart);
if (pCur)
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndVirt; });
return rc;
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndPhys; });
return rc;
/** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndUnhandled; });
return rc;
PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken)
*pfLockTaken = false;
# if ( PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \
int rc;
AssertMsg(GstWalk.Pml4e.u == GstWalk.pPml4e->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pml4e.u, (uint64_t)GstWalk.pPml4e->u));
AssertMsg(GstWalk.Pdpe.u == GstWalk.pPdpe->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pdpe.u, (uint64_t)GstWalk.pPdpe->u));
AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u));
AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u));
# ifdef VBOX_WITH_STATISTICS
("%RX64 %RX64 pPte=%p pPde=%p Pte=%RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u, GstWalk.pPte, GstWalk.pPde, (uint64_t)GstWalk.pPte->u));
GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A}; /** @todo eliminate this */
*pfLockTaken = true;
# ifdef PGM_WITH_MMIO_OPTIMIZATIONS
return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage,
return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage,
pfLockTaken));
const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */
rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, X86_PDPE_P, &pPDDst); /* RW, US and A are reserved in PAE mode. */
AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_4);
rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A,
AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_4);
AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_4);
return VINF_SUCCESS;
AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u));
AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u));
if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2SyncPT; });
LogFlow(("=>SyncPT %04x = %08RX64\n", (pvFault >> GST_PD_SHIFT) & GST_PD_MASK, (uint64_t)GstWalk.Pde.u));
return rc;
return VINF_PGM_SYNC_CR3;
while (iPT-- > 0)
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Mapping; });
return VINF_PGM_SYNC_CR3;
PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->HyperVirtHandlers, pvFault);
if ( pCur
# ifdef IN_RC
rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
AssertFailed();
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Mapping; });
return rc;
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Mapping; });
return VINF_EM_RAW_GUEST_TRAP;
} /* pgmAreMappingsEnabled(&pVM->pgm.s) */
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2InvalidPhys; });
return VINF_EM_RAW_EMULATE_INSTR;
return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken,
&GstWalk));
return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, pfLockTaken));
PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
if (pCur)
("Unexpected trap for virtual handler: %08X (phys=%08x) %R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
# ifdef IN_RC
rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndVirt; });
return rc;
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Ballooned; });
return rc;
pvFault, pRegFrame->eip, GstWalk.Pde.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip)));
/* Note: Can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU. */
# ifdef CSAM_DETECT_NEW_CODE_PAGES
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2CSAM; });
return rc;
# ifdef CSAM_DETECT_NEW_CODE_PAGES
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSync; });
return VINF_SUCCESS;
Log(("PGM #PF: Make writable: %RGp %R[pgmpage] pvFault=%RGp uErr=%#x\n", GCPhys, pPage, pvFault, uErr));
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2MakeWritable; });
return rc;
return VINF_EM_NO_MEMORY;
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2WPEmulation; });
return rc;
# ifdef VBOX_STRICT
LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys2, (uint64_t)fPageGst));
AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCpus > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */,
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2OutOfSyncHndObs; });
return VINF_SUCCESS;
return rc;
return VERR_INTERNAL_ERROR;
* @todo Clean this up! Most of it is (or should be) no longer necessary as we catch all page table accesses.
int rc;
# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
return VINF_SUCCESS;
AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
unsigned iPDSrc = 0;
if (pPDSrc)
PdeSrc.u = 0;
# ifdef IN_RING3
&& fIsBigPage
return VINF_SUCCESS;
# ifndef PGM_WITHOUT_MAPPING
if (!fIsBigPage)
# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
/* This is very unlikely with caching/monitoring enabled. */
LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%RGp iPDDst=%#x\n",
&& ( PdeSrc.b.u1Dirty /** @todo rainy day: What about read-only 4M pages? not very common, but still... */
LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys, uint16_t iPte)
LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%RHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
pRam;
while (iPage-- > 0)
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
if (!u16)
Log2(("SyncPageWorkerTrackAddRef: u16=%#x->%#x iPTDst=%#x\n", u16, PGM_PAGE_GET_TRACKING(pPage), iPTDst));
DECLINLINE(void) PGM_BTH_NAME(SyncHandlerPte)(PVM pVM, PCPGMPAGE pPage, uint32_t fPteSrc, PSHWPTE pPteDst)
/** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No.
* Update: \#PF should deal with this before or after calling the handlers. It has all the info to do the job efficiently. */
(fPteSrc & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
|| (fPteSrc & (X86_PTE_RW | X86_PTE_US)) == X86_PTE_RW) /** @todo Remove X86_PTE_US here and pGstWalk->Core.fEffectiveUS before the sync page test. */
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc,
# if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) \
#ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
# ifdef VBOX_WITH_PAGE_SHARING
&PteDst);
* If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
(PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst));
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", SHW_PTE_LOG64(*pPteDst), SHW_PTE_LOG64(PteDst)));
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
const bool fPdeValid = !fBigPage ? GST_IS_PDE_VALID(pVCpu, PdeSrc) : GST_IS_BIG_PDE_VALID(pVCpu, PdeSrc);
if (!fBigPage)
if ( fPdeValid
if (!fBigPage)
# ifdef PGM_SYNC_N_PAGES
const unsigned offPTSrc = 0;
iPTDst = 0;
RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
#ifndef IN_RING0
# ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
# ifdef VBOX_WITH_PAGE_SHARING
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
return VINF_SUCCESS;
else if (fPdeValid)
/// @todo STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPagePDOutOfSyncAndInvalid));
return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
&& !defined(IN_RC)
# ifdef PGM_SYNC_N_PAGES
return rc;
/* Can happen in the guest SMP case; other VCPU activated this PDE while we were blocking to handle the page fault. */
Log(("CPU%d: SyncPage: Pde (big:%RX64) at %RGv changed behind our back!\n", pVCpu->idCpu, PdeDst.u, GCPtrPage));
return VINF_SUCCESS;
iPTDst = 0;
RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
Log4(("%RGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, SHW_PTE_LOG64(pPTDst->a[iPTDst]) ));
RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
return VINF_SUCCESS;
return VERR_INTERNAL_ERROR;
DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnNP)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel)
return VINF_EM_RAW_GUEST_TRAP;
DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnRSVD)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel)
return VINF_EM_RAW_GUEST_TRAP;
DECLINLINE(int) PGM_BTH_NAME(CheckPageFaultReturnProt)(PVMCPU pVCpu, uint32_t uErr, RTGCPTR GCPtrPage, unsigned uPageFaultLevel)
return VINF_EM_RAW_GUEST_TRAP;
static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage)
/* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
# ifdef IN_RING0
if (pShwPage)
return VINF_PGM_NO_DIRTY_BIT_TRACKING;
return rc;
#ifndef IN_RING0
return VINF_PGM_NO_DIRTY_BIT_TRACKING;
if (pShwPage)
SHW_PTE_SET(PteDst, (SHW_PTE_GET_U(PteDst) | X86_PTE_D | X86_PTE_A) & ~(uint64_t)PGM_PTFLAGS_TRACK_DIRTY);
# ifdef IN_RING0
return VINF_PGM_NO_DIRTY_BIT_TRACKING;
AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%RGv\n", iPDSrc, GCPtrPage));
# ifndef PGM_WITHOUT_MAPPINGS
# ifndef IN_RING3
return VERR_ADDRESS_CONFLICT;
return rc;
if (fPageTable)
const bool fNoExecute = false;
rc = pgmPoolAllocEx(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, enmAccess, pShwPde->idx, iPDDst, &pShwPage);
if (fPageTable)
return VINF_SUCCESS;
return VINF_PGM_SYNC_CR3;
if (fPageTable)
# ifdef PGM_SYNC_N_PAGES
iPTDst = 0;
unsigned iPTDst = 0;
const unsigned offPTSrc = 0;
# ifndef IN_RING0
Log2(("SyncPT: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%RGp\n",
SHW_PTE_IS_TRACK_DIRTY(pPTDst->a[iPTDst]) ? " Track-Dirty" : "", SHW_PTE_LOG64(pPTDst->a[iPTDst]), iPTSrc, PdeSrc.au32[0],
unsigned iPTDst = 0;
# ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
# ifdef VBOX_WITH_PAGE_SHARING
# ifndef IN_RING0
Log3(("SyncPT: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))));
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
(RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), SHW_PTE_IS_P(PteDst), SHW_PTE_IS_RW(PteDst), SHW_PTE_IS_US(PteDst), SHW_PTE_LOG64(PteDst),
iHCPage++;
iPTDst++;
else if (pRam)
iPTDst++;
return rc;
&& !defined(IN_RC)
return rc;
# if defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPde, PGM_PAGE_GET_TRACKING(pPage), pPage, iPDDst);
return VINF_SUCCESS;
rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
return rc;
return VERR_INTERNAL_ERROR;
unsigned iPDSrc;
if (!pPDSrc)
unsigned iPDSrc;
if (!pPDSrc)
const unsigned iPDSrc = 0;
PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
return rc;
return rc;
return rc;
AssertCompile(0);
PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
# ifndef IN_RING0
unsigned iPDSrc = 0;
return VINF_EM_RAW_GUEST_TRAP;
return VINF_EM_RAW_GUEST_TRAP;
const unsigned iPDSrc = 0;
PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
return rc;
/* AMD-V nested paging: Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
return rc;
return rc;
return rc;
return VERR_INTERNAL_ERROR;
# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
return VINF_SUCCESS;
return VINF_SUCCESS;
#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
# ifndef PGM_WITHOUT_MAPPINGS
return VINF_PGM_SYNC_CR3;
return VINF_SUCCESS;
#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
#ifdef VBOX_STRICT
#ifdef IN_RC
#ifdef IN_RING3
VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)
unsigned cErrors = 0;
AssertFailed();
# ifndef IN_RING0
int rc;
AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false);
AssertMsgReturn((cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false);
AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
cErrors++;
AssertMsgFailed(("Physical address doesn't match! iPml4 %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));
cErrors++;
AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
cErrors++;
AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
cErrors++;
AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
AssertMsgFailed(("Physical address doesn't match! iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
cErrors++;
AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
cErrors++;
AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));
cErrors++;
if (!pPoolPage)
cErrors++;
AssertMsgFailed(("PDE flags PWT and/or PCD is set at %RGv! These flags are not virtualized! PdeDst=%#RX64\n",
cErrors++;
cErrors++;
cErrors++;
|| !fBigPagesSupported)
cErrors++;
!= (!PdeSrc.b.u1Size || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))
cErrors++;
if (!pPhysPage)
cErrors++;
cErrors++;
|| !fBigPagesSupported)
AssertMsgFailed(("Cannot map/convert guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
cErrors++;
/// @todo We get here a lot on out-of-sync CR3 entries. The access handler should zap them to avoid false alarms here!
cErrors++;
cErrors++;
const unsigned offPTSrc = 0;
if (!(SHW_PTE_GET_U(PteDst) & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */
# ifdef IN_RING3
AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n",
cErrors++;
uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
# ifdef IN_RING3
cErrors++;
AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
if (!pPhysPage)
cErrors++;
AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp pPhysPage:%R[pgmpage] GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
cErrors++;
cErrors++;
cErrors++;
AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
# ifdef DEBUG_sandervl
AssertMsgFailed(("Flags mismatch at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD;
cErrors++;
cErrors++;
AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("Flags mismatch (B) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("The PTE at %RGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
fIgnoreFlags = X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT | X86_PTE_D | X86_PTE_A | X86_PTE_G | X86_PTE_PAE_NX;
# ifdef IN_RING3
cErrors++;
AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
if (!pPhysPage)
cErrors++;
AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp pPhysPage=%R[pgmpage] GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
&& (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (SHW_PTE_GET_U(PteDst) & ~fIgnoreFlags) /* lazy phys handler dereg. */
AssertMsgFailed(("Flags mismatch (BT) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
# ifdef DEBUG
if (cErrors)
return cErrors;
int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
# ifdef IN_RC
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifdef IN_RC
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifdef IN_RC
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pNewShwPageCR3, true /* lock page */);
# ifdef IN_RC
# ifdef IN_RING0
# ifndef PGM_WITHOUT_MAPPINGS
Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
# ifdef IN_RC
if ( pOldShwPageCR3
&& pOldShwPageCR3 != pNewShwPageCR3 /* @todo can happen due to incorrect syncing between REM & PGM; find the real cause */)
# ifndef PGM_WITHOUT_MAPPINGS
return rc;
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifndef PGM_WITHOUT_MAPPINGS
# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable);
return rc;