PGMAllBth.h revision dac32fabf3180155d9a9b09e778852e5624f7ccd
/* $Id$ */
/** @file
* VBox - Page Manager, Shadow+Guest Paging Template - All context code.
*
* This file is a big challenge!
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage);
#ifdef VBOX_STRICT
PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
#endif
#ifdef PGMPOOL_WITH_USER_TRACKING
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
#endif
/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
#if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT
# error "Invalid combination; PAE guest implies PAE shadow"
#endif
&& !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
# error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
#endif
&& !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT)
# error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
#endif
#if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT) \
|| (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PROT)
# error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
#endif
#ifdef IN_RING0 /* no mappings in VT-x and AMD-V mode */
# define PGM_WITHOUT_MAPPINGS
#endif
#ifndef IN_RING3
/**
* #PF Handler for raw-mode guest execution.
*
* @returns VBox status code (appropriate for trap handling and GC return).
*
* @param pVCpu VMCPU Handle.
* @param uErr The trap error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address.
*/
PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
{
# if defined(IN_RC) && defined(VBOX_STRICT)
# endif
# if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
&& PGM_SHW_TYPE != PGM_TYPE_NESTED \
/*
* Hide the instruction fetch trap indicator for now.
*/
/** @todo NXE will change this and we must fix NXE in the switcher too! */
if (uErr & X86_TRAP_PF_ID)
{
uErr &= ~X86_TRAP_PF_ID;
}
# endif
/*
* Get PDs.
*/
int rc;
# if PGM_GST_TYPE == PGM_TYPE_32BIT
# if PGM_GST_TYPE == PGM_TYPE_PAE
unsigned iPDSrc = 0; /* initialized to shut up gcc */
unsigned iPDSrc = 0; /* initialized to shut up gcc */
# endif
/* Quick check for a valid guest trap. (PAE & AMD64) */
if (!pPDSrc)
{
LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%RGp\n", (int)((pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK));
# else
LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%RGp\n", iPDSrc, CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK));
# endif
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
return VINF_EM_RAW_GUEST_TRAP;
}
# endif
# else /* !PGM_WITH_PAGING */
const unsigned iPDSrc = 0;
# endif /* !PGM_WITH_PAGING */
/* Fetch the guest PDE */
# else
# endif
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */
# if PGM_GST_TYPE != PGM_TYPE_PAE
/* Fake PDPT entry; access control handled on the page table level, so allow everything. */
PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
# endif
if (rc != VINF_SUCCESS)
{
return rc;
}
# if PGM_GST_TYPE == PGM_TYPE_PROT
/* AMD-V nested paging */
/* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
# endif
if (rc != VINF_SUCCESS)
{
return rc;
}
if (rc != VINF_SUCCESS)
{
return rc;
}
# endif
/*
* If we successfully correct the write protection fault due to dirty bit
* tracking, or this page fault is a genuine one, then return immediately.
*/
if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
|| rc == VINF_EM_RAW_GUEST_TRAP)
{
= rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
}
# if 0 /* rarely useful; leave for debugging. */
# endif
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
/*
* A common case is the not-present error caused by lazy page table syncing.
*
* It is IMPORTANT that we weed out any access to non-present shadow PDEs here
* so we can safely assume that the shadow PT is present when calling SyncPage later.
*
* On failure, we ASSUME that SyncPT is out of memory or detected some kind
* of mapping conflict and defer to SyncCR3 in R3.
* (Again, we do NOT support access handlers for non-present guest pages.)
*
*/
if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
)
{
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
if (RT_SUCCESS(rc))
{
return rc;
}
return VINF_PGM_SYNC_CR3;
}
/*
* Check if this address is within any of our mappings.
*
* This is *very* fast and it's gonna save us a bit of effort below and prevent
* us from screwing ourself with MMIO2 pages which have a GC Mapping (VRam).
* (BTW, it's impossible to have physical access handlers in a mapping.)
*/
{
{
break;
{
/*
* The first thing we check is if we've got an undetected conflict.
*/
{
while (iPT-- > 0)
{
return VINF_PGM_SYNC_CR3;
}
}
/*
* Check if the fault address is in a virtual page access handler range.
*/
PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->HyperVirtHandlers, pvFault);
if ( pCur
&& uErr & X86_TRAP_PF_RW)
{
# ifdef IN_RC
rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
# else
AssertFailed();
# endif
return rc;
}
/*
* Pretend we're not here and let the guest handle the trap.
*/
LogFlow(("PGM: Mapping access -> route trap to recompiler!\n"));
return VINF_EM_RAW_GUEST_TRAP;
}
}
} /* pgmAreMappingsEnabled(&pVM->pgm.s) */
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
/*
* Check if this fault address is flagged for special treatment,
* which means we'll have to figure out the physical address and
* check flags associated with it.
*
* ASSUME that we can limit any special access handling to pages
* in page tables which the guest believes to be present.
*/
{
# if PGM_GST_TYPE == PGM_TYPE_AMD64
bool fBigPagesSupported = true;
# else
# endif
else
{
if (RT_SUCCESS(rc))
{
}
}
# else
/* No paging so the fault address is the physical address */
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
/*
* If we have a GC address we'll check if it has any flags set.
*/
if (GCPhys != NIL_RTGCPHYS)
{
if (RT_SUCCESS(rc)) /** just handle the failure immediate (it returns) and make things easier to read. */
{
{
{
/*
* Physical page access handler.
*/
PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
if (pCur)
{
# ifdef PGM_SYNC_N_PAGES
/*
* If the region is write protected and we got a page not present fault, then sync
* the pages. If the fault was caused by a read, then restart the instruction.
* In case of write access continue to the GC write handler.
*
* ASSUMES that there is only one handler per page or that they have similar write properties.
*/
&& !(uErr & X86_TRAP_PF_P))
{
if ( RT_FAILURE(rc)
|| !(uErr & X86_TRAP_PF_RW)
|| rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
{
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
return rc;
}
}
# endif
("Unexpected trap for physical handler: %08X (phys=%08x) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
{
# ifdef IN_RING0
# else
# endif
if (fLeaveLock)
if (fLeaveLock)
# ifdef VBOX_WITH_STATISTICS
pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
if (pCur)
# else
# endif
}
else
# endif
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndPhys; });
return rc;
}
}
else
{
# ifdef PGM_SYNC_N_PAGES
/*
* If the region is write protected and we got a page not present fault, then sync
* the pages. If the fault was caused by a read, then restart the instruction.
* In case of write access continue to the GC write handler.
*/
&& !(uErr & X86_TRAP_PF_P))
{
if ( RT_FAILURE(rc)
|| !(uErr & X86_TRAP_PF_RW))
{
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; });
return rc;
}
}
# endif
/*
* Ok, it's an virtual page access handler.
*
* Since it's faster to search by address, we'll do that first
* and then retry by GCPhys if that fails.
*/
/** @todo r=bird: perhaps we should consider looking up by physical address directly now? */
/** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the
* page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx)
*/
PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
if (pCur)
{
|| !(uErr & X86_TRAP_PF_P)
("Unexpected trap for virtual handler: %RGv (phys=%RGp) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
&& ( uErr & X86_TRAP_PF_RW
{
# ifdef IN_RC
rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
# else
# endif
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
return rc;
}
/* Unhandled part of a monitored page */
}
else
{
/* Check by physical address. */
unsigned iPage;
if ( pCur
&& ( uErr & X86_TRAP_PF_RW
{
# ifdef IN_RC
RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK);
# else
# endif
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
return rc;
}
}
}
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
/*
* There is a handled area of the page, but this fault doesn't belong to it.
* We must emulate the instruction.
*
* To avoid crashing (non-fatal) in the interpreter and go back to the recompiler
* we first check if this was a page-not-present fault for a page with only
* write access handlers. Restart the instruction if it wasn't a write access.
*/
&& !(uErr & X86_TRAP_PF_P))
{
if ( RT_FAILURE(rc)
|| !(uErr & X86_TRAP_PF_RW))
{
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
return rc;
}
}
/** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
* It's writing to an unhandled part of the LDT page several million times.
*/
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; });
return rc;
} /* if any kind of handler */
if (uErr & X86_TRAP_PF_P)
{
/*
* The page isn't marked, but it might still be monitored by a virtual page access handler.
* (ASSUMES no temporary disabling of virtual handlers.)
*/
/** @todo r=bird: Since the purpose is to catch out of sync pages with virtual handler(s) here,
* we should correct both the shadow page table and physical memory flags, and not only check for
* accesses within the handler region but for access to pages with virtual handlers. */
PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
if (pCur)
{
|| !(uErr & X86_TRAP_PF_P)
("Unexpected trap for virtual handler: %08X (phys=%08x) %R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
&& ( uErr & X86_TRAP_PF_RW
{
# ifdef IN_RC
rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
# else
# endif
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
return rc;
}
}
}
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
}
else
{
/*
* When the guest accesses invalid physical memory (e.g. probing
* of RAM or accessing a remapped MMIO range), then we'll fall
* back to the recompiler to emulate the instruction.
*/
return VINF_EM_RAW_EMULATE_INSTR;
}
# ifdef PGM_OUT_OF_SYNC_IN_GC /** @todo remove this bugger. */
/*
* We are here only if page is present in Guest page tables and
* trap is not handled by our handlers.
*
* Check it for page out-of-sync situation.
*/
if (!(uErr & X86_TRAP_PF_P))
{
/*
* Page is not present in our page tables.
* Try to sync it!
* BTW, fPageShw is invalid in this branch!
*/
if (uErr & X86_TRAP_PF_US)
else /* supervisor */
# if defined(LOG_ENABLED) && !defined(IN_RING0)
Log(("Page out of sync: %RGv eip=%08x PdeSrc.n.u1User=%d fPageGst=%08llx GCPhys=%RGp scan=%d\n",
pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst, GCPhys, CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)));
# endif /* LOG_ENABLED */
{
if ( RT_SUCCESS(rc)
&& !(fPageGst & X86_PTE_US))
{
/* Note: can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU */
# ifdef CSAM_DETECT_NEW_CODE_PAGES
# endif /* CSAM_DETECT_NEW_CODE_PAGES */
)
{
if (rc != VINF_SUCCESS)
{
/*
* CSAM needs to perform a job in ring 3.
*
* Sync the page before going to the host context; otherwise we'll end up in a loop if
* CSAM fails (e.g. instruction crosses a page boundary and the next page is not present)
*/
LogFlow(("CSAM ring 3 job\n"));
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2CSAM; });
return rc;
}
}
# ifdef CSAM_DETECT_NEW_CODE_PAGES
else if ( uErr == X86_TRAP_PF_RW
{
/* In case of a write to a non-present supervisor shadow page, we'll take special precautions
* to detect loading of new code pages.
*/
/*
* Decode the instruction.
*/
rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
if (rc == VINF_SUCCESS)
{
/* For now we'll restrict this to rep movsw/d instructions */
if ( rc == VINF_SUCCESS
{
}
}
}
# endif /* CSAM_DETECT_NEW_CODE_PAGES */
/*
* Mark this page as safe.
*/
/** @todo not correct for pages that contain both code and data!! */
}
}
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
if (RT_SUCCESS(rc))
{
/* The page was successfully synced, return to the guest. */
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSync; });
return VINF_SUCCESS;
}
}
else /* uErr & X86_TRAP_PF_P: */
{
/*
* Write protected pages are make writable when the guest makes the first
* write to it. This happens for pages that are shared, write monitored
* and not yet allocated.
*
* Also, a side effect of not flushing global PDEs are out of sync pages due
* to physical monitored regions, that are no longer valid.
*/
{
{
Log(("PGM #PF: Make writable: %RGp %R[pgmpage] pvFault=%RGp uErr=%#x\n",
if (rc != VINF_SUCCESS)
{
return rc;
}
return VINF_EM_NO_MEMORY;
}
/* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */
{
if ( RT_SUCCESS(rc)
&& !(fPageGst & X86_PTE_RW))
{
if (RT_SUCCESS(rc))
else
return rc;
}
AssertMsg(RT_SUCCESS(rc), ("Unexpected r/w page %RGv flag=%x rc=%Rrc\n", pvFault, (uint32_t)fPageGst, rc));
}
# endif
/// @todo count the above case; else
if (uErr & X86_TRAP_PF_US)
else /* supervisor */
/*
* Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the
* page is not present, which is not true in this case.
*/
if (RT_SUCCESS(rc))
{
/*
* Page was successfully synced, return to guest.
* First invalidate the page as it might be in the TLB.
*/
# if PGM_SHW_TYPE == PGM_TYPE_EPT
# else
# endif
# ifdef VBOX_STRICT
if (!HWACCMIsNestedPagingActive(pVM))
{
LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys, (uint64_t)fPageGst));
}
AssertMsg((RT_SUCCESS(rc) && (fPageShw & X86_PTE_RW)) || pVM->cCpus > 1 /* new monitor can be installed/page table flushed between the trap exit and PGMTrap0eHandler */, ("rc=%Rrc fPageShw=%RX64\n", rc, fPageShw));
# endif /* VBOX_STRICT */
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndObs; });
return VINF_SUCCESS;
}
}
# ifdef VBOX_STRICT
/*
* Check for VMM page flags vs. Guest page flags consistency.
* Currently only for debug purposes.
*/
if (RT_SUCCESS(rc))
{
/* Get guest page flags. */
if (RT_SUCCESS(rc))
{
/*
* Compare page flags.
* Note: we have AVL, A, D bits desynched.
*/
AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)),
("Page flags mismatch! pvFault=%RGv uErr=%x GCPhys=%RGp fPageShw=%RX64 fPageGst=%RX64\n", pvFault, (uint32_t)uErr, GCPhys, fPageShw, fPageGst));
}
else
}
else
# endif /* VBOX_STRICT */
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
}
# endif /* PGM_OUT_OF_SYNC_IN_GC */
}
else /* GCPhys == NIL_RTGCPHYS */
{
/*
* Page not present in Guest OS or invalid page table address.
* This is potential virtual page access handler food.
*
* For the present we'll say that our access handlers don't
* work for this case - we've already discarded the page table
* not present case which is identical to this.
*
* When we perchance find we need this, we will probably have AVL
* trees (offset based) to operate on and we can measure their speed
* agains mapping a page table and probably rearrange this handling
* a bit. (Like, searching virtual ranges before checking the
* physical address.)
*/
}
}
/* else: !present (guest) */
/*
* Conclusion, this is a guest trap.
*/
LogFlow(("PGM: Unhandled #PF -> route trap to recompiler!\n"));
return VINF_EM_RAW_GUEST_TRAP;
# else
/* present, but not a monitored page; perhaps the guest is probing physical memory */
return VINF_EM_RAW_EMULATE_INSTR;
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
return VERR_INTERNAL_ERROR;
# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
}
#endif /* !IN_RING3 */
/**
* Emulation of the invlpg instruction.
*
*
* @returns VBox status code.
*
* @param pVCpu The VMCPU handle.
* @param GCPtrPage Page to invalidate.
*
* @remark ASSUMES that the guest is updating before invalidating. This order
* isn't required by the CPU, so this is speculative and could cause
* trouble.
* @remark No TLB shootdown is done on any other VCPU as we assume that
* invlpg emulation is the *only* reason for calling this function.
* (The guest has to shoot down TLB entries on other CPUs itself)
* Currently true, but keep in mind!
*
* @todo Flush page or page directory only if necessary!
* @todo Add a #define for simply invalidating the page.
*/
{
&& PGM_SHW_TYPE != PGM_TYPE_NESTED \
&& PGM_SHW_TYPE != PGM_TYPE_EPT
int rc;
# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
if (pPool->cDirtyPages)
# endif
/*
* Get the shadow PD entry and skip out if this PD isn't present.
* (Guessing that it is frequent for a shadow PDE to not be present, do this first.)
*/
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
/* Fetch the pgm pool shadow descriptor. */
/* If the shadow PDPE isn't present, then skip the invalidate. */
{
return VINF_SUCCESS;
}
/* Fetch the pgm pool shadow descriptor. */
# else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
/* PML4 */
if (rc != VINF_SUCCESS)
{
AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Rrc\n", rc));
return VINF_SUCCESS;
}
{
return VINF_SUCCESS;
}
# endif /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
{
return VINF_SUCCESS;
}
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
/*
* Get the guest PD entry and calc big page.
*/
# if PGM_GST_TYPE == PGM_TYPE_32BIT
# else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
unsigned iPDSrc = 0;
# if PGM_GST_TYPE == PGM_TYPE_PAE
# else /* AMD64 */
# endif
if (pPDSrc)
else
PdeSrc.u = 0;
# endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
# if PGM_GST_TYPE == PGM_TYPE_AMD64
# else
# endif
# ifdef IN_RING3
/*
* If a CR3 Sync is pending we may ignore the invalidate page operation
* depending on the kind of sync and if it's a global page or not.
*/
&& fIsBigPage
)
)
# else
# endif
{
return VINF_SUCCESS;
}
# endif /* IN_RING3 */
# if PGM_GST_TYPE == PGM_TYPE_AMD64
/* Fetch the pgm pool shadow descriptor. */
/* Fetch the pgm pool shadow descriptor. */
{
LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
return VINF_SUCCESS;
}
{
/*
* Mark not present so we can resync the PML4E when it's used.
*/
LogFlow(("InvalidatePage: Out-of-sync PML4E at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
}
else if (!pPml4eSrc->n.u1Accessed)
{
/*
* Mark not present so we can set the accessed bit.
*/
LogFlow(("InvalidatePage: Out-of-sync PML4E (A) at %RGv Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
}
/* Check if the PDPT entry has changed. */
{
LogFlow(("InvalidatePage: Out-of-sync PDPE (P/GCPhys) at %RGv GCPhys=%RGp vs %RGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
return VINF_SUCCESS;
}
{
/*
* Mark not present so we can resync the PDPTE when it's used.
*/
LogFlow(("InvalidatePage: Out-of-sync PDPE at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
}
{
/*
* Mark not present so we can set the accessed bit.
*/
LogFlow(("InvalidatePage: Out-of-sync PDPE (A) at %RGv PdpeSrc=%RX64 PdpeDst=%RX64\n",
}
# endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
/*
* Deal with the Guest PDE.
*/
rc = VINF_SUCCESS;
{
# ifndef PGM_WITHOUT_MAPPING
if (PdeDst.u & PGM_PDFLAGS_MAPPING)
{
/*
* Conflict - Let SyncPT deal with it to avoid duplicate code.
*/
}
else
# endif /* !PGM_WITHOUT_MAPPING */
{
/*
* Mark not present so we can resync the PDE when it's used.
*/
LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
}
else if (!PdeSrc.n.u1Accessed)
{
/*
* Mark not present so we can set the accessed bit.
*/
LogFlow(("InvalidatePage: Out-of-sync (A) at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
}
else if (!fIsBigPage)
{
/*
* 4KB - page.
*/
/* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
# endif
{
# if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */
{
# ifdef PGMPOOL_WITH_USER_TRACKING
/* This is very unlikely with caching/monitoring enabled. */
# endif
}
# else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
if (RT_SUCCESS(rc))
rc = VINF_SUCCESS;
# endif
}
else
{
/*
* The page table address changed.
*/
LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%RGp iPDDst=%#x\n",
}
}
else
{
/*
* 2/4MB - page.
*/
/* Before freeing the page, check if anything really changed. */
/* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
# endif
{
/* ASSUMES a the given bits are identical for 4M and normal PDEs */
/** @todo PAT */
&& ( PdeSrc.b.u1Dirty /** @todo rainy day: What about read-only 4M pages? not very common, but still... */
|| (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)))
{
LogFlow(("Skipping flush for big page containing %RGv (PD=%X .u=%RX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
return VINF_SUCCESS;
}
}
/*
* Ok, the page table is present and it's been changed in the guest.
* If we're in host context, we'll just mark it as not present taking the lazy approach.
* We could do this for some flushes in GC too, but we need an algorithm for
* deciding which 4MB pages containing code likely to be executed very soon.
*/
LogFlow(("InvalidatePage: Out-of-sync PD at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
}
}
else
{
/*
* Page directory is not present, mark shadow PDE not present.
*/
if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
{
}
else
{
}
}
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
return rc;
#else /* guest real and protected mode */
/* There's no such thing as InvalidatePage when paging is disabled, so just ignore. */
return VINF_SUCCESS;
#endif
}
#ifdef PGMPOOL_WITH_USER_TRACKING
/**
* Update the tracking of shadowed pages.
*
* @param pVCpu The VMCPU handle.
* @param pShwPage The shadow page.
* @param HCPhys The physical page we is being dereferenced.
*/
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
{
# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%RHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
/** @todo If this turns out to be a bottle neck (*very* likely) two things can be done:
* 1. have a medium sized HCPhys -> GCPhys TLB (hash?)
* 2. write protect all shadowed pages. I.e. implement caching.
*/
/*
* Find the guest address.
*/
pRam;
{
while (iPage-- > 0)
{
{
return;
}
}
}
for (;;)
# else /* !PGMPOOL_WITH_GCPHYS_TRACKING */
# endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */
}
/**
* Update the tracking of shadowed pages.
*
* @param pVCpu The VMCPU handle.
* @param pShwPage The shadow page.
* @param u16 The top 16-bit of the pPage->HCPhys.
* @param pPage Pointer to the guest page. this will be modified.
* @param iPTDst The index into the shadow table.
*/
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
{
# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
/*
* Just deal with the simple first time here.
*/
if (!u16)
{
}
else
/* write back */
Log2(("SyncPageWorkerTrackAddRef: u16=%#x->%#x iPTDst=%#x\n", u16, PGM_PAGE_GET_TRACKING(pPage), iPTDst));
# endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
/* update statistics. */
}
#endif /* PGMPOOL_WITH_USER_TRACKING */
/**
* Creates a 4K shadow page for a guest page.
*
* For 4M pages the caller must convert the PDE4M to a PTE, this includes adjusting the
* physical address. The PdeSrc argument only the flags are used. No page structured
* will be mapped in this function.
*
* @param pVCpu The VMCPU handle.
* @param pPteDst Destination page table entry.
* @param PdeSrc Source page directory entry (i.e. Guest OS page directory entry).
* Can safely assume that only the flags are being used.
* @param PteSrc Source page table entry (i.e. Guest OS page table entry).
* @param pShwPage Pointer to the shadow page.
* @param iPTDst The index into the shadow table.
*
* @remark Not used for 2/4MB pages!
*/
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
{
{
# if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) \
{
}
# endif
/*
* Find the ram range.
*/
if (RT_SUCCESS(rc))
{
#ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
/* Try make the page writable if necessary. */
{
}
#endif
/** @todo investiage PWT, PCD and PAT. */
/*
* Make page table entry.
*/
{
/** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */
{
#if PGM_SHW_TYPE == PGM_TYPE_EPT
/* PteDst.n.u1Write = 0 && PteDst.n.u1Size = 0 */
#else
PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
#endif
}
else
{
LogFlow(("SyncPageWorker: monitored page (%RHp) -> mark not present\n", PGM_PAGE_GET_HCPHYS(pPage)));
PteDst.u = 0;
}
/** @todo count these two kinds. */
}
else
{
/*
* If the page or page directory entry is not marked accessed,
* we mark the page not present.
*/
{
LogFlow(("SyncPageWorker: page and or page directory not accessed -> mark not present\n"));
PteDst.u = 0;
}
else
/*
* If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
* when the page is modified.
*/
{
PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
}
else
#endif
{
#if PGM_SHW_TYPE == PGM_TYPE_EPT
/* PteDst.n.u1Size = 0 */
#else
PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
#endif
}
}
/*
* Make sure only allocated pages are mapped writable.
*/
{
Log3(("SyncPageWorker: write-protecting %RGp pPage=%R[pgmpage]at iPTDst=%d\n", (RTGCPHYS)(PteSrc.u & X86_PTE_PAE_PG_MASK), pPage, iPTDst));
}
#ifdef PGMPOOL_WITH_USER_TRACKING
/*
* Keep user track up to date.
*/
{
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
{
Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
}
}
{
}
#endif /* PGMPOOL_WITH_USER_TRACKING */
/*
* Update statistics and commit the entry.
*/
pShwPage->fSeenNonGlobal = true;
#endif
}
/* else MMIO or invalid page, we must handle them manually in the #PF handler. */
/** @todo count these. */
}
else
{
/*
* Page not-present.
*/
Log2(("SyncPageWorker: page not present in Pte\n"));
#ifdef PGMPOOL_WITH_USER_TRACKING
/* Keep user track up to date. */
{
}
#endif /* PGMPOOL_WITH_USER_TRACKING */
/** @todo count these. */
}
}
/**
* Syncs a guest OS page.
*
* There are no conflicts at this point, neither is there any need for
* page table allocations.
*
* @returns VBox status code.
* @returns VINF_PGM_SYNCPAGE_MODIFIED_PDE if it modifies the PDE in any way.
* @param pVCpu The VMCPU handle.
* @param PdeSrc Page directory entry of the guest.
* @param GCPtrPage Guest context page address.
* @param cPages Number of pages to sync (PGM_SYNC_N_PAGES) (default=1).
* @param uErr Fault error (X86_TRAP_PF_*).
*/
PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
{
#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64) \
&& PGM_SHW_TYPE != PGM_TYPE_NESTED \
&& PGM_SHW_TYPE != PGM_TYPE_EPT
# endif
/*
* Assert preconditions.
*/
# if 0 /* rarely useful; leave for debugging. */
# endif
/*
* Get the shadow PDE, find the shadow page table in the pool.
*/
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
/* Fetch the pgm pool shadow descriptor. */
/* Fetch the pgm pool shadow descriptor. */
# endif
{
return VINF_SUCCESS; /* force the instruction to be executed again. */
}
# if PGM_GST_TYPE == PGM_TYPE_AMD64
/* Fetch the pgm pool shadow descriptor. */
# endif
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
/*
* Check that the page is present and that the shadow PDE isn't out of sync.
*/
# if PGM_GST_TYPE == PGM_TYPE_AMD64
# else
# endif
if (!fBigPage)
{
/* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
# endif
}
else
{
/* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
# endif
}
# endif
)
{
/*
* Check that the PDE is marked accessed already.
* Since we set the accessed bit *before* getting here on a #PF, this
* check is only meant for dealing with non-#PF'ing paths.
*/
if (PdeSrc.n.u1Accessed)
{
if (!fBigPage)
{
/*
* 4KB Page - Map the guest page table.
*/
if (RT_SUCCESS(rc))
{
# ifdef PGM_SYNC_N_PAGES
if ( cPages > 1
&& !(uErr & X86_TRAP_PF_P)
{
/*
* This code path is currently only taken when the caller is PGMTrap0eHandler
* for non-present pages!
*
* We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
* deal with locality.
*/
/* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
# else
const unsigned offPTSrc = 0;
# endif
iPTDst = 0;
else
{
{
RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
#ifndef IN_RING0
/*
* Assuming kernel code will be marked as supervisor - and not as user level
* and executed using a conforming code selector - And marked as readonly.
* Also assume that if we're monitoring a page, it's of no interest to CSAM.
*/
)
#endif /* else: CSAM not active */
Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
}
}
}
else
# endif /* PGM_SYNC_N_PAGES */
{
Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx %s\n",
}
}
else /* MMIO or invalid page: emulated in #PF handler. */
{
}
}
else
{
/*
* 4/2MB page - lazy syncing shadow 4K pages.
* (There are many causes of getting here, it's no longer only CSAM.)
*/
/* Calculate the GC physical address of this 4KB shadow page. */
/* Find ram range. */
if (RT_SUCCESS(rc))
{
# ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
/* Try make the page writable if necessary. */
{
}
# endif
/*
* Make shadow PTE entry.
*/
PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
{
else
PteDst.u = 0;
}
# ifdef PGMPOOL_WITH_USER_TRACKING
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
# endif
/* Make sure only allocated pages are mapped writable. */
{
}
/*
* If the page is not flagged as dirty and is writable, then make it read-only
* at PD level, so we can set the dirty bit when the page is modified.
*
* ASSUMES that page access handlers are implemented on page table entry level.
* Thus we will first catch the dirty access and set PDE.D and restart. If
* there is an access handler, we'll trap again and let it work on the problem.
*/
/** @todo r=bird: figure out why we need this here, SyncPT should've taken care of this already.
* As for invlpg, it simply frees the whole shadow PT.
* ...It's possibly because the guest clears it and the guest doesn't really tell us... */
{
}
else
{
}
Log2(("SyncPage: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} GCPhys=%RGp%s\n",
}
else
}
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
return VINF_SUCCESS;
}
}
else
{
Log2(("SyncPage: Out-Of-Sync PDE at %RGp PdeSrc=%RX64 PdeDst=%RX64 (GCPhys %RGp vs %RGp)\n",
}
/*
* Mark the PDE not present. Restart the instruction and let #PF call SyncPT.
* Yea, I'm lazy.
*/
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
&& PGM_SHW_TYPE != PGM_TYPE_NESTED \
&& !defined(IN_RC)
# ifdef PGM_SYNC_N_PAGES
/*
* Get the shadow PDE, find the shadow page table in the pool.
*/
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
if (rc != VINF_SUCCESS)
{
return rc;
}
# endif
if ( cPages > 1
&& !(uErr & X86_TRAP_PF_P)
{
/*
* This code path is currently only taken when the caller is PGMTrap0eHandler
* for non-present pages!
*
* We're setting PGM_SYNC_NR_PAGES pages around the faulting page to sync it and
* deal with locality.
*/
iPTDst = 0;
else
{
{
RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
/* Fake the page table entry */
PteSrc.u = GCPtrCurPage;
Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
break;
}
else
Log4(("%RGv iPTDst=%x pPTDst->a[iPTDst] %RX64\n", (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT), iPTDst, pPTDst->a[iPTDst].u));
}
}
else
# endif /* PGM_SYNC_N_PAGES */
{
RTGCPTR GCPtrCurPage = (GCPtrPage & ~(RTGCPTR)(SHW_PT_MASK << SHW_PT_SHIFT)) | (iPTDst << PAGE_SHIFT);
/* Fake the page table entry */
PteSrc.u = GCPtrCurPage;
Log2(("SyncPage: 4K %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}PteDst=%08llx%s\n",
}
return VINF_SUCCESS;
#else
return VERR_INTERNAL_ERROR;
#endif
}
/**
* Investigate page fault and handle write protection page faults caused by
* dirty bit tracking.
*
* @returns VBox status code.
* @param pVCpu The VMCPU handle.
* @param uErr Page fault error code.
* @param pPdeDst Shadow page directory entry.
* @param pPdeSrc Guest page directory entry.
* @param GCPtrPage Guest context page address.
*/
PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
{
# if PGM_GST_TYPE == PGM_TYPE_AMD64
bool fBigPagesSupported = true;
# else
# endif
# endif
unsigned uPageFaultLevel;
int rc;
# if PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
# if PGM_GST_TYPE == PGM_TYPE_AMD64
/*
* Real page fault? (PML4E level)
*/
if ( (uErr & X86_TRAP_PF_RSVD)
)
{
uPageFaultLevel = 0;
goto l_UpperLevelPageFault;
}
# else /* PAE */
# endif /* PAE */
/*
* Real page fault? (PDPE level)
*/
if ( (uErr & X86_TRAP_PF_RSVD)
# endif
)
{
uPageFaultLevel = 1;
goto l_UpperLevelPageFault;
}
# endif
/*
* Real page fault? (PDE level)
*/
if ( (uErr & X86_TRAP_PF_RSVD)
# endif
{
uPageFaultLevel = 2;
goto l_UpperLevelPageFault;
}
/*
* First check the easy case where the page directory has been marked read-only to track
* the dirty bit of an emulated BIG page
*/
{
/* Mark guest page directory as accessed */
# if PGM_GST_TYPE == PGM_TYPE_AMD64
# endif
/*
* Only write protection page faults are relevant here.
*/
if (fWriteFault)
{
/* Mark guest page directory as dirty (BIG page only). */
{
if (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)
{
/* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
* fault again and take this path to only invalidate the entry.
*/
return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */
}
# ifdef IN_RING0
else
/* Check for stale TLB entry; only applies to the SMP guest case. */
&& pPdeDst->n.u1Accessed)
{
if (pShwPage)
{
{
/* Stale TLB entry. */
return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */
}
}
}
# endif /* IN_RING0 */
}
}
return VINF_PGM_NO_DIRTY_BIT_TRACKING;
}
/* else: 4KB page table */
/*
* Map the guest page table.
*/
if (RT_SUCCESS(rc))
{
/*
* Real page fault?
*/
# endif
)
{
/* Check the present bit as the shadow tables can cause different error codes by being out of sync.
* See the 2nd case above as well.
*/
return VINF_EM_RAW_GUEST_TRAP;
}
/*
* Set the accessed bits in the page directory and the page table.
*/
# if PGM_GST_TYPE == PGM_TYPE_AMD64
# endif
/*
* Only write protection page faults are relevant here.
*/
if (fWriteFault)
{
/* Write access, so mark guest entry as dirty. */
# ifdef VBOX_WITH_STATISTICS
else
# endif
{
#ifndef IN_RING0
/* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below.
* Our individual shadow handlers will provide more information and force a fatal exit.
*/
{
return VINF_SUCCESS;
}
#endif
/*
* Map shadow page table.
*/
if (pShwPage)
{
{
if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)
{
/* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply
* fault again and take this path to only invalidate the entry.
*/
if ( pPage
{
/* Assuming write handlers here as the PTE is present (otherwise we wouldn't be here). */
}
else
return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */
}
# ifdef IN_RING0
else
/* Check for stale TLB entry; only applies to the SMP guest case. */
{
/* Stale TLB entry. */
return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */
}
# endif
}
}
else
}
}
/** @todo Optimize accessed bit emulation? */
# ifdef VBOX_STRICT
/*
* Sanity check.
*/
{
}
# endif /* VBOX_STRICT */
return VINF_PGM_NO_DIRTY_BIT_TRACKING;
}
return rc;
/*
* Pagefault detected while checking the PML4E, PDPE or PDE.
* Single exit handler to get rid of duplicate code paths.
*/
if (
# if PGM_GST_TYPE == PGM_TYPE_AMD64
# endif
# endif
{
/* Check the present bit as the shadow tables can cause different error codes by being out of sync. */
{
}
else
{
/*
* Map the guest page table.
*/
if (RT_SUCCESS(rc))
{
}
}
}
return VINF_EM_RAW_GUEST_TRAP;
}
#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
/**
* Sync a shadow page table.
*
* The shadow page table is not present. This includes the case where
* there is a conflict with a mapping.
*
* @returns VBox status code.
* @param pVCpu The VMCPU handle.
* @param iPD Page directory index.
* @param pPDSrc Source page directory (i.e. Guest OS page directory).
* Assume this is a temporary mapping.
* @param GCPtrPage GC Pointer of the page that caused the fault
*/
{
#if 0 /* rarely useful; leave for debugging. */
#endif
#if ( PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64) \
&& PGM_SHW_TYPE != PGM_TYPE_NESTED \
&& PGM_SHW_TYPE != PGM_TYPE_EPT
int rc = VINF_SUCCESS;
/*
* Validate input a little bit.
*/
AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%RGv\n", iPDSrc, GCPtrPage));
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
/* Fetch the pgm pool shadow descriptor. */
/* Fetch the pgm pool shadow descriptor. */
# endif
# if PGM_GST_TYPE == PGM_TYPE_AMD64
/* Fetch the pgm pool shadow descriptor. */
# endif
# ifndef PGM_WITHOUT_MAPPINGS
/*
* Check for conflicts.
* GC: In case of a conflict we'll go to Ring-3 and do a full SyncCR3.
* HC: Simply resolve the conflict.
*/
if (PdeDst.u & PGM_PDFLAGS_MAPPING)
{
# ifndef IN_RING3
return VERR_ADDRESS_CONFLICT;
# else
# if PGM_GST_TYPE == PGM_TYPE_32BIT
int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
# else
AssertFailed(); /* can't happen for amd64 */
# endif
if (RT_FAILURE(rc))
{
return rc;
}
# endif
}
# else /* PGM_WITHOUT_MAPPINGS */
# endif /* PGM_WITHOUT_MAPPINGS */
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
/*
* Sync page directory entry.
*/
{
/*
* Allocate & map the page table.
*/
# if PGM_GST_TYPE == PGM_TYPE_AMD64
# else
# endif
if (fPageTable)
{
/* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
# endif
}
else
{
# else
const bool fNoExecute = false;
# endif
/* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
# endif
/* Determine the right kind of large page to avoid incorrect cached entry reuse. */
{
else
}
else
{
else
}
rc = pgmPoolAllocEx(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, enmAccess, pShwPde->idx, iPDDst, &pShwPage);
}
if (rc == VINF_SUCCESS)
else if (rc == VINF_PGM_CACHED_PAGE)
{
/*
* The PT was cached, just hook it up.
*/
if (fPageTable)
| (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
else
{
| (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
/* (see explanation and assumptions further down.) */
{
}
}
# if defined(IN_RC)
# endif
return VINF_SUCCESS;
}
else if (rc == VERR_PGM_POOL_FLUSHED)
{
# if defined(IN_RC)
# endif
return VINF_PGM_SYNC_CR3;
}
else
PdeDst.u &= X86_PDE_AVL_MASK;
/*
* Page directory has been accessed (this is a fault situation, remember).
*/
if (fPageTable)
{
/*
* Page table - 4KB.
*
* Sync all or just a few entries depending on PGM_SYNC_N_PAGES.
*/
Log2(("SyncPT: 4K %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx}\n",
if (RT_SUCCESS(rc))
{
/*
* Start by syncing the page directory entry so CSAM's TLB trick works.
*/
| (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
# if defined(IN_RC)
# endif
/*
*
* Directory Page Combined
* U/S U/S U/S
* 0 0 0
* 0 1 0
* 1 0 0
* 1 1 1
*
* Simple AND operation. Table listed for completeness.
*
*/
# ifdef PGM_SYNC_N_PAGES
iPTDst = 0;
else
# else /* !PGM_SYNC_N_PAGES */
unsigned iPTDst = 0;
# endif /* !PGM_SYNC_N_PAGES */
/* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
# else
const unsigned offPTSrc = 0;
# endif
{
{
# ifndef IN_RING0
/*
* Assuming kernel code will be marked as supervisor - and not as user level
* and executed using a conforming code selector - And marked as readonly.
* Also assume that if we're monitoring a page, it's of no interest to CSAM.
*/
)
# endif
Log2(("SyncPT: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%RGp\n",
pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0],
}
} /* for PTEs */
}
}
else
{
/*
* Big page - 2/4MB.
*
* We'll walk the ram range list in parallel and optimize lookups.
* We will only sync on shadow page table at a time.
*/
/**
* @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
*/
/*
* Start by syncing the page directory entry.
*/
| (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
/*
* If the page is not flagged as dirty and is writable, then make it read-only
* at PD level, so we can set the dirty bit when the page is modified.
*
* ASSUMES that page access handlers are implemented on page table entry level.
* Thus we will first catch the dirty access and set PDE.D and restart. If
* there is an access handler, we'll trap again and let it work on the problem.
*/
/** @todo move the above stuff to a section in the PGM documentation. */
{
}
# if defined(IN_RC)
# endif
/*
* Fill the shadow page table.
*/
/* Get address and flags from the source PDE. */
PteDstBase.u = PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);
/* Loop thru the entries in the shadow PT. */
Log2(("SyncPT: BIG %RGv PdeSrc:{P=%d RW=%d U=%d raw=%08llx} Shw=%RGv GCPhys=%RGp %s\n",
unsigned iPTDst = 0;
{
/* Advance ram range list. */
{
do
{
/* Make shadow PTE. */
# ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
/* Try make the page writable if necessary. */
if ( PteDstBase.n.u1Write
{
break;
}
# endif
{
{
}
else
PteDst.u = 0;
}
# ifndef IN_RING0
/*
* Assuming kernel code will be marked as supervisor and not as user level and executed
* using a conforming code selector. Don't check for readonly, as that implies the whole
* 4MB can be code or readonly data. Linux enables write access for its large pages.
*/
PteDst.u = 0;
# endif
else
/* Only map writable pages writable. */
{
Log3(("SyncPT: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))));
}
# ifdef PGMPOOL_WITH_USER_TRACKING
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
# endif
/* commit it */
Log4(("SyncPT: BIG %RGv PteDst:{P=%d RW=%d U=%d raw=%08llx}%s\n",
(RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), PteDst.n.u1Present, PteDst.n.u1Write, PteDst.n.u1User, (uint64_t)PteDst.u,
/* advance */
iHCPage++;
iPTDst++;
}
else if (pRam)
{
do
{
iPTDst++;
}
else
{
}
} /* while more PTEs */
} /* 4KB / 4MB */
}
else
if (RT_FAILURE(rc))
return rc;
&& PGM_SHW_TYPE != PGM_TYPE_NESTED \
&& !defined(IN_RC)
/*
* Validate input a little bit.
*/
int rc = VINF_SUCCESS;
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
/* Fetch the pgm pool shadow descriptor. */
/* Fetch the pgm pool shadow descriptor. */
/* Fetch the pgm pool shadow descriptor. */
if (rc != VINF_SUCCESS)
{
return rc;
}
/* Fetch the pgm pool shadow descriptor. */
# endif
/*
* Allocate & map the page table.
*/
/* Virtual address = physical address */
rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
if ( rc == VINF_SUCCESS
|| rc == VINF_PGM_CACHED_PAGE)
else
PdeDst.u &= X86_PDE_AVL_MASK;
# if PGM_SHW_TYPE == PGM_TYPE_EPT
# else
# endif
return rc;
#else
return VERR_INTERNAL_ERROR;
#endif
}
/**
*
* Typically used to sync commonly used pages before entering raw mode
* after a CR3 reload.
*
* @returns VBox status code.
* @param pVCpu The VMCPU handle.
* @param GCPtrPage Page to invalidate.
*/
{
#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
/*
* Check that all Guest levels thru the PDE are present, getting the
* PD and PDE in the processes.
*/
int rc = VINF_SUCCESS;
# if PGM_GST_TYPE == PGM_TYPE_32BIT
unsigned iPDSrc;
if (!pPDSrc)
return VINF_SUCCESS; /* not present */
unsigned iPDSrc;
if (!pPDSrc)
return VINF_SUCCESS; /* not present */
# endif
# else
const unsigned iPDSrc = 0;
# endif
{
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
# if PGM_GST_TYPE != PGM_TYPE_PAE
/* Fake PDPT entry; access control handled on the page table level, so allow everything. */
PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
# endif
if (rc != VINF_SUCCESS)
{
return rc;
}
# if PGM_GST_TYPE == PGM_TYPE_PROT
/* AMD-V nested paging */
/* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
# endif
if (rc != VINF_SUCCESS)
{
return rc;
}
# endif
if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
{
{
/** r=bird: This guy will set the A bit on the PDE, probably harmless. */
}
else
{
/** @note We used to sync PGM_SYNC_NR_PAGES pages, which triggered assertions in CSAM, because
* R/W attributes of nearby pages were reset. Not sure how that could happen. Anyway, it
* makes no sense to prefetch more than one page.
*/
if (RT_SUCCESS(rc))
rc = VINF_SUCCESS;
}
}
}
return rc;
return VINF_SUCCESS; /* ignore */
#endif
}
/**
* Syncs a page during a PGMVerifyAccess() call.
*
* @returns VBox status code (informational included).
* @param pVCpu The VMCPU handle.
* @param GCPtrPage The address of the page to sync.
* @param fPage The effective guest page flags.
* @param uErr The trap error code.
*/
PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
{
#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \
# ifndef IN_RING0
if (!(fPage & X86_PTE_US))
{
/*
* Mark this page as safe.
*/
/** @todo not correct for pages that contain both code and data!! */
}
# endif
/*
* Get guest PD and index.
*/
# if PGM_GST_TYPE == PGM_TYPE_32BIT
unsigned iPDSrc = 0;
if (pPDSrc)
{
return VINF_EM_RAW_GUEST_TRAP;
}
unsigned iPDSrc;
if (!pPDSrc)
{
return VINF_EM_RAW_GUEST_TRAP;
}
# endif
# else
const unsigned iPDSrc = 0;
# endif
int rc = VINF_SUCCESS;
/*
* First check if the shadow pd is present.
*/
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
# if PGM_GST_TYPE != PGM_TYPE_PAE
/* Fake PDPT entry; access control handled on the page table level, so allow everything. */
PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
# endif
if (rc != VINF_SUCCESS)
{
return rc;
}
# if PGM_GST_TYPE == PGM_TYPE_PROT
/* AMD-V nested paging */
/* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
# endif
if (rc != VINF_SUCCESS)
{
return rc;
}
# endif
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
{
if (rc != VINF_SUCCESS)
{
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
return rc;
}
}
/* Check for dirty bit fault */
if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
Log(("PGMVerifyAccess: success (dirty)\n"));
else
{
# else
{
# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
if (uErr & X86_TRAP_PF_US)
else /* supervisor */
if (RT_SUCCESS(rc))
{
/* Page was successfully synced */
Log2(("PGMVerifyAccess: success (sync)\n"));
rc = VINF_SUCCESS;
}
else
{
}
}
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
return rc;
#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
return VERR_INTERNAL_ERROR;
#endif /* PGM_GST_TYPE != PGM_TYPE_32BIT */
}
#define MY_STAM_COUNTER_INC(a) do { } while (0)
/**
* Syncs the paging hierarchy starting at CR3.
*
* @returns VBox status code, no specials.
* @param pVCpu The VMCPU handle.
* @param cr0 Guest context CR0 register
* @param cr3 Guest context CR3 register
* @param cr4 Guest context CR4 register
* @param fGlobal Including global page directories or not
*/
{
fGlobal = true; /* Change this CR3 reload to be a global one. */
# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
if (pPool->cDirtyPages)
# endif
/*
* Update page access handlers.
* The virtual are always flushed, while the physical are only on demand.
* WARNING: We are incorrectly not doing global flushing on Virtual Handler updates. We'll
* have to look into that later because it will have a bad influence on the performance.
* @note SvL: There's no need for that. Just invalidate the virtual range(s).
* bird: Yes, but that won't work for aliases.
*/
/** @todo this MUST go away. See #1557. */
#endif
/*
* Nested / EPT - almost no work.
*/
/** @todo check if this is really necessary; the call does it as well... */
return VINF_SUCCESS;
/*
* AMD64 (Shw & Gst) - No need to check all paging levels; we zero
* out the shadow parts when the guest modifies its tables.
*/
return VINF_SUCCESS;
#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
# ifdef PGM_WITHOUT_MAPPINGS
return VINF_SUCCESS;
# else
/* Nothing to do when mappings are fixed. */
return VINF_SUCCESS;
if (rc == VINF_PGM_SYNC_CR3)
{
LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
return VINF_PGM_SYNC_CR3;
}
# endif
return VINF_SUCCESS;
#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
}
#ifdef VBOX_STRICT
#ifdef IN_RC
# define AssertMsgFailed Log
#endif
#ifdef IN_RING3
/**
*
* @returns VBox status code (VINF_SUCCESS).
* @param cr3 The root of the hierarchy.
* @param crr The cr4, only PAE and PSE is currently used.
* @param fLongMode Set if long mode, false if not long mode.
* @param cMaxDepth Number of levels to dump.
* @param pHlp Pointer to the output functions.
*/
VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
#endif
/**
* Checks that the shadow page table is in sync with the guest one.
*
* @returns The number of errors.
* @param pVM The virtual machine.
* @param pVCpu The VMCPU handle.
* @param cr3 Guest context CR3 register
* @param cr4 Guest context CR4 register
* @param GCPtr Where to start. Defaults to 0.
* @param cb How much to check. Defaults to everything.
*/
PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)
{
return 0;
#else
unsigned cErrors = 0;
#if PGM_GST_TYPE == PGM_TYPE_PAE
/** @todo currently broken; crashes below somewhere */
AssertFailed();
#endif
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
# if PGM_GST_TYPE == PGM_TYPE_AMD64
bool fBigPagesSupported = true;
# else
# endif
# ifndef IN_RING0
# endif
int rc;
/*
* Check that the Guest CR3 and all its mappings are correct.
*/
false);
# if PGM_GST_TYPE == PGM_TYPE_32BIT
# else
# endif
AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false);
AssertMsgReturn((cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%RGp cr3=%RGp\n", GCPhys, (RTGCPHYS)cr3), false);
# endif
# endif /* !IN_RING0 */
/*
* Get and check the Shadow CR3.
*/
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
unsigned cPDEs = X86_PG_ENTRIES;
# if PGM_GST_TYPE == PGM_TYPE_32BIT
# else
unsigned cPDEs = X86_PG_PAE_ENTRIES;
# endif
unsigned cPDEs = X86_PG_PAE_ENTRIES;
# endif
/** @todo call the other two PGMAssert*() functions. */
# if PGM_GST_TYPE == PGM_TYPE_AMD64
{
/* Fetch the pgm pool shadow descriptor if the shadow pml4e is present. */
{
continue;
}
{
AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
cErrors++;
continue;
}
{
AssertMsgFailed(("Physical address doesn't match! iPml4 %d pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));
cErrors++;
continue;
}
{
AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
cErrors++;
continue;
}
# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
{
# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
/*
* Check the PDPTEs too.
*/
{
unsigned iPDSrc = 0; /* initialized to shut up gcc */
# if PGM_GST_TYPE == PGM_TYPE_PAE
# else
if (rc != VINF_SUCCESS)
{
continue; /* next PDPTE */
}
# endif
{
continue; /* next PDPTE */
}
{
AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
cErrors++;
continue;
}
{
# if PGM_GST_TYPE == PGM_TYPE_AMD64
AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
# else
AssertMsgFailed(("Physical address doesn't match! iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
# endif
cErrors++;
continue;
}
# if PGM_GST_TYPE == PGM_TYPE_AMD64
{
AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
cErrors++;
continue;
}
# endif
# else /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
{
# endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
# if PGM_GST_TYPE == PGM_TYPE_32BIT
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
# endif
# endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
/*
* Iterate the shadow page directory.
*/
for (;
{
# if PGM_SHW_TYPE == PGM_TYPE_PAE
# else
# endif
if (PdeDst.u & PGM_PDFLAGS_MAPPING)
{
{
AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));
cErrors++;
continue;
}
}
)
{
if (!pPoolPage)
{
AssertMsgFailed(("Invalid page table address %RHp at %RGv! PdeDst=%#RX64\n",
cErrors++;
continue;
}
{
AssertMsgFailed(("PDE flags PWT and/or PCD is set at %RGv! These flags are not virtualized! PdeDst=%#RX64\n",
cErrors++;
}
{
AssertMsgFailed(("4K PDE reserved flags at %RGv! PdeDst=%#RX64\n",
cErrors++;
}
{
AssertMsgFailed(("Guest PDE at %RGv is not present! PdeDst=%#RX64 PdeSrc=%#RX64\n",
cErrors++;
continue;
}
|| !fBigPagesSupported)
{
# endif
}
else
{
# if PGM_GST_TYPE == PGM_TYPE_32BIT
if (PdeSrc.u & X86_PDE4M_PG_HIGH_MASK)
{
AssertMsgFailed(("Guest PDE at %RGv is using PSE36 or similar! PdeSrc=%#RX64\n",
cErrors++;
continue;
}
# endif
# endif
}
!= (!PdeSrc.b.u1Size || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))
{
AssertMsgFailed(("Invalid shadow page table kind %d at %RGv! PdeSrc=%#RX64\n",
cErrors++;
}
if (!pPhysPage)
{
AssertMsgFailed(("Cannot find guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
cErrors++;
continue;
}
{
AssertMsgFailed(("GCPhysGst=%RGp != pPage->GCPhys=%RGp at %RGv\n",
cErrors++;
continue;
}
|| !fBigPagesSupported)
{
/*
* Page Table.
*/
if (RT_FAILURE(rc))
{
AssertMsgFailed(("Cannot map/convert guest physical address %RGp in the PDE at %RGv! PdeSrc=%#RX64\n",
cErrors++;
continue;
}
{
/// @todo We get here a lot on out-of-sync CR3 entries. The access handler should zap them to avoid false alarms here!
AssertMsgFailed(("4K PDE flags mismatch at %RGv! PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
continue;
}
if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
{
AssertMsgFailed(("4K PDEs cannot have PGM_PDFLAGS_TRACK_DIRTY set! GCPtr=%RGv PdeDst=%#RX64\n",
cErrors++;
continue;
}
/* iterate the page table. */
/* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
# else
const unsigned offPTSrc = 0;
# endif
{
/* skip not-present entries. */
if (!(PteDst.u & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */
continue;
{
# ifdef IN_RING3
# endif
AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n",
cErrors++;
continue;
}
uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
# if 1 /** @todo sync accessed bit properly... */
# endif
/* match the physical addresses */
# ifdef IN_RING3
if (RT_FAILURE(rc))
{
{
AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
}
{
AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
# endif
if (!pPhysPage)
{
# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
{
AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
# endif
{
AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
}
}
{
AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp pPhysPage:%R[pgmpage] GCPhysGst=%RGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
/* flags */
{
{
{
AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
}
else
{
{
AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
}
}
else
{
{
{
AssertMsgFailed(("!DIRTY page at %RGv is writable! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
if (!(PteDst.u & PGM_PTFLAGS_TRACK_DIRTY))
{
AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
{
AssertMsgFailed(("!DIRTY page at %RGv is marked DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
}
# if 0 /** @todo sync access bit properly... */
{
AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
}
# else
# endif
}
else if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
{
/* access bit emulation (not implemented). */
{
AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
if (!PteDst.n.u1Accessed)
{
AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
}
}
# ifdef DEBUG_sandervl
# endif
}
)
{
AssertMsgFailed(("Flags mismatch at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
} /* foreach PTE */
}
else
{
/*
* Big Page.
*/
uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD;
{
{
AssertMsgFailed(("!DIRTY page at %RGv is writable! PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
continue;
}
if (!(PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY))
{
AssertMsgFailed(("!DIRTY page at %RGv is not marked TRACK_DIRTY! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
# if 0 /** @todo sync access bit properly... */
{
AssertMsgFailed(("!DIRTY page at %RGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
}
# else
# endif
}
else if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
{
/* access bit emulation (not implemented). */
{
AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %RGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
continue;
}
if (!PdeDst.n.u1Accessed)
{
AssertMsgFailed(("!ACCESSED page at %RGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
}
}
{
AssertMsgFailed(("Flags mismatch (B) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
}
/* iterate the page table. */
{
if (PteDst.u & PGM_PTFLAGS_TRACK_DIRTY)
{
AssertMsgFailed(("The PTE at %RGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
}
/* skip not-present entries. */
continue;
fIgnoreFlags = X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT | X86_PTE_D | X86_PTE_A | X86_PTE_G | X86_PTE_PAE_NX;
/* match the physical addresses */
# ifdef IN_RING3
if (RT_FAILURE(rc))
{
{
AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
}
}
{
AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp HCPhys=%RHp GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
# endif
if (!pPhysPage)
{
# ifdef IN_RING3 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
{
AssertMsgFailed(("Cannot find guest physical address %RGp at %RGv! PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
# endif
{
AssertMsgFailed(("Invalid guest page at %RGv is writable! GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
}
}
{
AssertMsgFailed(("Out of sync (phys) at %RGv! HCPhysShw=%RHp pPhysPage=%R[pgmpage] GCPhysGst=%RGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
/* flags */
{
{
{
{
AssertMsgFailed(("WRITE access flagged at %RGv but the page is writable! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
}
}
else
{
{
AssertMsgFailed(("ALL access flagged at %RGv but the page is present! pPhysPage=%R[pgmpage] PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
}
}
&& (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags) /* lazy phys handler dereg. */
)
{
AssertMsgFailed(("Flags mismatch (BT) at %RGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
continue;
}
} /* for each PTE */
}
}
/* not present */
} /* for each PDE */
} /* for each PDPTE */
} /* for each PML4E */
# ifdef DEBUG
if (cErrors)
# endif
#endif /* GST == 32BIT, PAE or AMD64 */
return cErrors;
#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
}
#endif /* VBOX_STRICT */
/**
* Sets up the CR3 for shadow paging
*
* @returns Strict VBox status code.
* @retval VINF_SUCCESS.
*
* @param pVCpu The VMCPU handle.
* @param GCPhysCR3 The physical address in the CR3 register.
*/
{
/* Update guest paging info. */
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
/*
* Map the page CR3 points at.
*/
/** @todo this needs some reworking wrt. locking. */
# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
int rc = VINF_SUCCESS;
# else
int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
# endif
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
# ifdef IN_RC
# endif
# if PGM_GST_TYPE == PGM_TYPE_32BIT
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
pVCpu->pgm.s.pGstPaePdptRC = (RCPTRTYPE(PX86PDPT))((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + off);
/*
* Map the 4 PDs too.
*/
{
if (pGuestPDPT->a[i].n.u1Present)
{
# if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
HCPtr = NIL_RTHCPTR;
int rc2 = VINF_SUCCESS;
# else
# endif
if (RT_SUCCESS(rc2))
{
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
# ifdef IN_RC
# endif
continue;
}
}
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
# ifdef IN_RC
# endif
}
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
# endif
}
else
}
else
int rc = VINF_SUCCESS;
#endif
/* Update shadow paging info for guest modes with paging (32, pae, 64). */
# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
|| PGM_SHW_TYPE == PGM_TYPE_PAE \
|| PGM_SHW_TYPE == PGM_TYPE_AMD64) \
&& ( PGM_GST_TYPE != PGM_TYPE_REAL \
&& PGM_GST_TYPE != PGM_TYPE_PROT))
/*
* Update the shadow root page as well since that's not fixed.
*/
# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
if (pPool->cDirtyPages)
# endif
rc = pgmPoolAlloc(pVM, GCPhysCR3 & GST_CR3_PAGE_MASK, BTH_PGMPOOLKIND_ROOT, SHW_POOL_ROOT_IDX, GCPhysCR3 >> PAGE_SHIFT, &pNewShwPageCR3, true /* lock page */);
rc = VINF_SUCCESS;
# ifdef IN_RC
/*
* WARNING! We can't deal with jumps to ring 3 in the code below as the
* state will be inconsistent! Flush important things now while
* we still can and then make sure there are no ring-3 calls.
*/
# endif
# ifdef IN_RING0
# else
# endif
# ifndef PGM_WITHOUT_MAPPINGS
/*
* Apply all hypervisor mappings to the new CR3.
* Note that SyncCR3 will be executed in case CR3 is changed in a guest paging mode; this will
* make sure we check for conflicts in the new CR3 root.
*/
Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
# endif
# endif
/* Set the current hypervisor CR3. */
# ifdef IN_RC
/* NOTE: The state is consistent again. */
# endif
/* Clean up the old CR3 root. */
if ( pOldShwPageCR3
&& pOldShwPageCR3 != pNewShwPageCR3 /* @todo can happen due to incorrect syncing between REM & PGM; find the real cause */)
{
# ifndef PGM_WITHOUT_MAPPINGS
/* Remove the hypervisor mappings from the shadow page table. */
# endif
/* Mark the page as unlocked; allow flushing again. */
}
# endif
return rc;
}
/**
* Unmaps the shadow CR3.
*
* @returns VBox status, no specials.
* @param pVCpu The VMCPU handle.
*/
{
LogFlow(("UnmapCR3\n"));
int rc = VINF_SUCCESS;
/*
* Update guest paging info.
*/
#if PGM_GST_TYPE == PGM_TYPE_32BIT
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
}
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
/* nothing to do */
#endif
#if !defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */
/*
* Update shadow paging info.
*/
# if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \
|| PGM_SHW_TYPE == PGM_TYPE_PAE \
|| PGM_SHW_TYPE == PGM_TYPE_AMD64))
# if PGM_GST_TYPE != PGM_TYPE_REAL
# endif
# ifndef PGM_WITHOUT_MAPPINGS
/* Remove the hypervisor mappings from the shadow page table. */
# endif
{
/* Mark the page as unlocked; allow flushing again. */
pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), pVCpu->pgm.s.iShwUser, pVCpu->pgm.s.iShwUserTable);
}
# endif
#endif /* !IN_RC*/
return rc;
}