PGMAllBth.h revision 21ef63137974c64ee16b2458738ac331dc582d31
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * VBox - Page Manager, Shadow+Guest Paging Template - All context code.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * This file is a big challenge!
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * This file is part of VirtualBox Open Source Edition (OSE), as
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * available from http://www.virtualbox.org. This file is free software;
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * you can redistribute it and/or modify it under the terms of the GNU
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * General Public License (GPL) as published by the Free Software
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * Foundation, in version 2 as it comes in the "COPYING" file of the
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * Clara, CA 95054 USA or visit http://www.sun.com if you need
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * additional information or have any questions.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster/*******************************************************************************
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster* Internal Functions *
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster*******************************************************************************/
8af80418ba1ec431c8027fa9668e5678658d3611Allan FosterPGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
8af80418ba1ec431c8027fa9668e5678658d3611Allan FosterPGM_BTH_DECL(int, InvalidatePage)(PVM pVM, RTGCUINTPTR GCPtrPage);
8af80418ba1ec431c8027fa9668e5678658d3611Allan FosterPGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uErr);
8af80418ba1ec431c8027fa9668e5678658d3611Allan FosterPGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCUINTPTR GCPtrPage);
1d407e39b7d8f68d9a2b1e178f35fab037d9835aRobert WapshottPGM_BTH_DECL(int, SyncPT)(PVM pVM, unsigned iPD, PGSTPD pPDSrc, RTGCUINTPTR GCPtrPage);
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert WapshottPGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCUINTPTR Addr, unsigned fPage, unsigned uErr);
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert WapshottPGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCUINTPTR GCPtrPage);
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert WapshottPGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
8af80418ba1ec431c8027fa9668e5678658d3611Allan FosterPGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr = 0, RTGCUINTPTR cb = ~(RTGCUINTPTR)0);
8af80418ba1ec431c8027fa9668e5678658d3611Allan FosterDECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster#if PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_NESTED
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster# error "Invalid combination; PAE guest implies PAE shadow"
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott#if (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED)
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster# error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED)
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott# error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott#if (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_NESTED) \
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PROT)
1d407e39b7d8f68d9a2b1e178f35fab037d9835aRobert Wapshott# error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
1d407e39b7d8f68d9a2b1e178f35fab037d9835aRobert Wapshott#ifdef IN_RING0 /* no mappings in VT-x and AMD-V mode */
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott * #PF Handler for raw-mode guest execution.
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott * @returns VBox status code (appropriate for trap handling and GC return).
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott * @param pVM VM Handle.
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott * @param uErr The trap error code.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * @param pRegFrame Trap register frame.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * @param pvFault The fault address.
8af80418ba1ec431c8027fa9668e5678658d3611Allan FosterPGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
7ea769c9edcf1a585b7e3b0b532f790efed64b21David Luna# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott * Hide the instruction fetch trap indicator for now.
35ab1c5bca11317474fe12bdd8d22c17cdaf2697Robert Wapshott /** @todo NXE will change this and we must fix NXE in the switcher too! */
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster const unsigned iPDSrc = (RTGCUINTPTR)pvFault >> GST_PD_SHIFT;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell PGSTPD pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster# elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCUINTPTR)pvFault, &iPDSrc);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell pPDSrc = pgmGstGetLongModePDPtr(&pVM->pgm.s, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell /* Quick check for a valid guest trap. */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell LogFlow(("Trap0eHandler: guest PDPTR not present CR3=%VGp\n", (CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK)));
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eGuestTrap; });
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell const unsigned iPDSrc = 0;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell const unsigned iPDDst = (RTGCUINTPTR)pvFault >> SHW_PD_SHIFT;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell PX86PD pPDDst = pVM->pgm.s.CTXMID(p,32BitPD);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell const unsigned iPDDst = (RTGCUINTPTR)pvFault >> SHW_PD_SHIFT;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; /* We treat this as a PD with 2048 entries, so no need to and with SHW_PD_MASK to get iPDDst */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell /* Did we mark the PDPT as not present in SyncCR3? */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell unsigned iPDPTE = ((RTGCUINTPTR)pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell if (!pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPDPTE].n.u1Present)
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell pVM->pgm.s.CTXMID(p,PaePDPT)->a[iPDPTE].n.u1Present = 1;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell const unsigned iPDDst = (((RTGCUINTPTR)pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell /* AMD-V nested paging */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_NX | X86_PML4E_A;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_NX | X86_PDPE_A;
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster rc = PGMShwSyncLongModePDPtr(pVM, (RTGCUINTPTR)pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell AssertMsg(rc == VINF_PGM_SYNC_CR3, ("Unexpected rc=%Vrc\n", rc));
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * If we successfully correct the write protection fault due to dirty bit
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * tracking, or this page fault is a genuine one, then return immediately.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_START(&pVM->pgm.s.StatCheckPageFault, e);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell rc = PGM_BTH_NAME(CheckPageFault)(pVM, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], (RTGCUINTPTR)pvFault);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_STOP(&pVM->pgm.s.StatCheckPageFault, e);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution)
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVM->pgm.s.StatTrap0eDirtyAndAccessedBits : &pVM->pgm.s.StatTrap0eGuestTrap; });
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP"));
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0ePD[iPDSrc]);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell# endif /* PGM_WITH_PAGING(PGM_GST_TYPE) */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * A common case is the not-present error caused by lazy page table syncing.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * It is IMPORTANT that we weed out any access to non-present shadow PDEs here
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * so we can safely assume that the shadow PT is present when calling SyncPage later.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * On failure, we ASSUME that SyncPT is out of memory or detected some kind
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * of mapping conflict and defer to SyncCR3 in R3.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * (Again, we do NOT support access handlers for non-present guest pages.)
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell PdeSrc.au32[0] = 0; /* faked so we don't have to #ifdef everything */
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford if ( !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eSyncPT; });
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_START(&pVM->pgm.s.StatLazySyncPT, f);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell rc = PGM_BTH_NAME(SyncPT)(pVM, iPDSrc, pPDSrc, (RTGCUINTPTR)pvFault);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_STOP(&pVM->pgm.s.StatLazySyncPT, f);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster STAM_PROFILE_STOP(&pVM->pgm.s.StatLazySyncPT, f);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * Check if this address is within any of our mappings.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * This is *very* fast and it's gonna save us a bit of effort below and prevent
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford * us from screwing ourself with MMIO2 pages which have a GC Mapping (VRam).
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford * (BTW, it's impossible to have physical access handlers in a mapping.)
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_START(&pVM->pgm.s.StatMapping, a);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell for ( ; pMapping; pMapping = CTXALLSUFF(pMapping->pNext))
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell if ((RTGCUINTPTR)pvFault < (RTGCUINTPTR)pMapping->GCPtr)
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford if ((RTGCUINTPTR)pvFault - (RTGCUINTPTR)pMapping->GCPtr < pMapping->cb)
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford * The first thing we check is if we've got an undetected conflict.
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford while (iPT-- > 0)
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eConflicts);
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford Log(("Trap0e: Detected Conflict %VGv-%VGv\n", pMapping->GCPtr, pMapping->GCPtrLast));
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync,right? */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * Check if the fault address is in a virtual page access handler range.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->HyperVirtHandlers, pvFault);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell && (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr < pCur->cb
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell rc = CTXSUFF(pCur->pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->GCPtr, (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell rc = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_COUNTER_INC(&pVM->pgm.s.StatTrap0eMapHandler);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_STOP(&pVM->pgm.s.StatMapping, a);
1d407e39b7d8f68d9a2b1e178f35fab037d9835aRobert Wapshott * Pretend we're not here and let the guest handle the trap.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell TRPMSetErrorCode(pVM, uErr & ~X86_TRAP_PF_P);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eMap);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell LogFlow(("PGM: Mapping access -> route trap to recompiler!\n"));
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_STOP(&pVM->pgm.s.StatMapping, a);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_STOP(&pVM->pgm.s.StatMapping, a);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell } /* pgmAreMappingsEnabled(&pVM->pgm.s) */
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster# endif /* PGM_WITH_PAGING(PGM_GST_TYPE) */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * Check if this fault address is flagged for special treatment,
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * which means we'll have to figure out the physical address and
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * check flags associated with it.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * ASSUME that we can limit any special access handling to pages
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * in page tables which the guest believes to be present.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster unsigned iPTESrc = ((RTGCUINTPTR)pvFault >> GST_PT_SHIFT) & GST_PT_MASK;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell GCPhys = pPTSrc->a[iPTESrc].u & GST_PTE_PG_MASK;
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell /* No paging so the fault address is the physical address */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell GCPhys = (RTGCPHYS)((RTGCUINTPTR)pvFault & ~PAGE_OFFSET_MASK);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell# endif /* PGM_WITH_PAGING(PGM_GST_TYPE) */
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford * If we have a GC address we'll check if it has any flags set.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster STAM_PROFILE_START(&pVM->pgm.s.StatHandlers, b);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * Physical page access handler.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster const RTGCPHYS GCPhysFault = GCPhys | ((RTGCUINTPTR)pvFault & PAGE_OFFSET_MASK);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->PhysHandlers, GCPhysFault);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * If the region is write protected and we got a page not present fault, then sync
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * the pages. If the fault was caused by a read, then restart the instruction.
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * In case of write access continue to the GC write handler.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * ASSUMES that there is only one handler per page or that they have similar write properties.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell if ( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)pvFault, PGM_SYNC_NR_PAGES, uErr);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersOutOfSync);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSyncHndPhys; });
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell AssertMsg( pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)),
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell ("Unexpected trap for physical handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell rc = pCur->CTXALLSUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, GCPhysFault, CTXALLSUFF(pCur->pvUser));
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersPhysical);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eHndPhys; });
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * If the region is write protected and we got a page not present fault, then sync
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * the pages. If the fault was caused by a read, then restart the instruction.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * In case of write access continue to the GC write handler.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell if ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)pvFault, PGM_SYNC_NR_PAGES, uErr);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersOutOfSync);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSyncHndVirt; });
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * Ok, it's an virtual page access handler.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell * Since it's faster to search by address, we'll do that first
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * and then retry by GCPhys if that fails.
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx)
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->VirtHandlers, pvFault);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster AssertMsg(!((RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr < pCur->cb)
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford ("Unexpected trap for virtual handler: %VGv (phys=%VGp) HCPhys=%HGp uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford if ( (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr < pCur->cb
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
d552ef9965b495ec6fa5f89b12ad638ad4cc87f4Tony Bamford rc = CTXSUFF(pCur->pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->GCPtr, (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr);
4e28a378d7cafffd7988c4c3783c820db42ffb9dAndrew Forrest rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster STAM_COUNTER_INC(&pVM->pgm.s.StatHandlersVirtual);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell STAM_PROFILE_STOP(&pVM->pgm.s.StatHandlers, b);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eHndVirt; });
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster /* Unhandled part of a monitored page */
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster /* Check by physical address. */
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + ((RTGCUINTPTR)pvFault & PAGE_OFFSET_MASK),
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster RTGCUINTPTR off = (iPage << PAGE_SHIFT) + ((RTGCUINTPTR)pvFault & PAGE_OFFSET_MASK) - ((RTGCUINTPTR)pCur->GCPtr & PAGE_OFFSET_MASK);
786bac66d599daf6355e45e64da84c846a857552Craig McDonnell rc = CTXSUFF(pCur->pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->GCPtr, off);
8af80418ba1ec431c8027fa9668e5678658d3611Allan Foster rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
return rc;
STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSyncHndPhys; });
return rc;
/** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
return rc;
PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->VirtHandlers, pvFault);
if (pCur)
("Unexpected trap for virtual handler: %08X (phys=%08x) HCPhys=%X uErr=%X, enum=%d\n", pvFault, GCPhys, pPage->HCPhys, uErr, pCur->enmType));
# ifdef IN_GC
rc = CTXSUFF(pCur->pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->GCPtr, (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->GCPtr);
return rc;
/* When the guest accesses invalid physical memory (e.g. probing of RAM or accessing a remapped MMIO range), then we'll fall
return VINF_EM_RAW_EMULATE_INSTR;
# ifdef PGM_OUT_OF_SYNC_IN_GC
pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst, GCPhys, CSAMDoesPageNeedScanning(pVM, (RTRCPTR)pRegFrame->eip)));
/* Note: can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU */
# ifdef CSAM_DETECT_NEW_CODE_PAGES
return rc;
# ifdef CSAM_DETECT_NEW_CODE_PAGES
rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
return VINF_SUCCESS;
* Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the page is not present, which is not true in this case.
# ifdef VBOX_STRICT
LogFlow(("Obsolete physical monitor page out of sync %VGv - phys %VGp flags=%08llx\n", pvFault, GCPhys, (uint64_t)fPageGst));
STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eOutOfSyncObsHnd; });
return VINF_SUCCESS;
return rc;
# ifdef VBOX_STRICT
AssertMsg((fPageShw & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)) == (fPageGst & ~(X86_PTE_A | X86_PTE_D | X86_PTE_AVL_MASK)),
("Page flags mismatch! pvFault=%VGv GCPhys=%VGp fPageShw=%08llx fPageGst=%08llx\n", pvFault, GCPhys, fPageShw, fPageGst));
return VINF_EM_RAW_GUEST_TRAP;
return VINF_EM_RAW_EMULATE_INSTR;
return VERR_INTERNAL_ERROR;
int rc;
const unsigned iPdPte = (GCPtrPage >> X86_PDPT_SHIFT); /* no mask; flat index into the 2048 entry array. */
AssertMsg(rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT, ("Unexpected rc=%Vrc\n", rc));
return VINF_SUCCESS;
return VINF_SUCCESS;
unsigned iPDSrc;
if (pPDSrc)
PdeSrc.u = 0;
# ifdef IN_RING3
&& fIsBigPage
return VINF_SUCCESS;
pPml4eDst->u = 0;
pPml4eDst->u = 0;
pPml4eDst->u = 0;
return VINF_SUCCESS;
LogFlow(("InvalidatePage: Out-of-sync PML4E (GCPhys) at %VGv %VGp vs %VGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n",
pPml4eDst->u = 0;
return VINF_SUCCESS;
pPdpeDst->u = 0;
pPdpeDst->u = 0;
pPdpeDst->u = 0;
return VINF_SUCCESS;
LogFlow(("InvalidatePage: Out-of-sync PDPE (GCPhys) at %VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
pPdpeDst->u = 0;
return VINF_SUCCESS;
pPdeDst->u = 0;
pPdeDst->u = 0;
else if (!fIsBigPage)
# ifdef PGMPOOL_WITH_USER_TRACKING
/* This is very unlikely with caching/monitoring enabled. */
LogFlow(("InvalidatePage: Out-of-sync at %VGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%VGp iPDDst=%#x\n",
pPdeDst->u = 0;
&& ( PdeSrc.b.u1Dirty /** @todo rainy day: What about read-only 4M pages? not very common, but still... */
LogFlow(("Skipping flush for big page containing %VGv (PD=%X .u=%VX64)-> nothing has changed!\n", GCPtrPage, iPDSrc, PdeSrc.u));
return VINF_SUCCESS;
pPdeDst->u = 0;
pPdeDst->u = 0;
return rc;
return VINF_SUCCESS;
#ifdef PGMPOOL_WITH_USER_TRACKING
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%VHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
pRam;
while (iPage-- > 0)
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVM pVM, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
if (!u16)
u16, pPage->HCPhys, (pPage->HCPhys & MM_RAM_FLAGS_NO_REFS_MASK) | ((uint64_t)u16 << MM_RAM_FLAGS_CREFS_SHIFT), iPTDst));
DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVM pVM, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
/** @todo r=bird: Are we actually handling dirty and access bits for pages with access handlers correctly? No. */
PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
PteDst.u = 0;
PteDst.u = 0;
* If the page is not flagged as dirty and is writable, then make it read-only, so we can set the dirty bit
PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT | X86_PTE_RW))
PteDst.u = (PteSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
#ifdef PGMPOOL_WITH_USER_TRACKING
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
#ifdef PGMPOOL_WITH_USER_TRACKING
pPteDst->u = 0;
PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uErr)
const unsigned iPdPte = (GCPtrPage >> X86_PDPT_SHIFT); /* no mask; flat index into the 2048 entry array. */
if (!fBigPage)
if (!fBigPage)
# ifdef PGM_SYNC_N_PAGES
const unsigned offPTSrc = 0;
iPTDst = 0;
RTGCUINTPTR GCPtrCurPage = ((RTGCUINTPTR)GCPtrPage & ~(RTGCUINTPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
#ifndef IN_RING0
RTGCPHYS GCPhys = (PdeSrc.u & GST_PDE_BIG_PG_MASK) | ((RTGCUINTPTR)GCPtrPage & GST_BIG_PAGE_OFFSET_MASK);
PteDst.u = (PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
PteDst.u = 0;
# ifdef PGMPOOL_WITH_USER_TRACKING
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst);
return VINF_SUCCESS;
return VINF_PGM_SYNCPAGE_MODIFIED_PDE;
# ifdef PGM_SYNC_N_PAGES
const unsigned offPTSrc = 0;
iPTDst = 0;
RTGCUINTPTR GCPtrCurPage = ((RTGCUINTPTR)GCPtrPage & ~(RTGCUINTPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
RTGCUINTPTR GCPtrCurPage = ((RTGCUINTPTR)GCPtrPage & ~(RTGCUINTPTR)(GST_PT_MASK << GST_PT_SHIFT)) | ((offPTSrc + iPTDst) << PAGE_SHIFT);
return VINF_SUCCESS;
return VERR_INTERNAL_ERROR;
PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCUINTPTR GCPtrPage)
bool fBigPagesSupported = true;
unsigned uPageFaultLevel;
int rc;
uPageFaultLevel = 0;
goto UpperLevelPageFault;
PX86PDPE pPdpeSrc = &pVM->pgm.s.CTXSUFF(pGstPaePDPT)->a[(GCPtrPage >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
goto UpperLevelPageFault;
goto UpperLevelPageFault;
if (fWriteFault)
return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
return VINF_PGM_NO_DIRTY_BIT_TRACKING;
# ifdef IN_GC
return VINF_EM_RAW_GUEST_TRAP;
if (fWriteFault)
return VINF_SUCCESS;
if (pShwPage)
# ifdef VBOX_STRICT
if (pPage)
("Unexpected dirty bit tracking on monitored page %VGv (phys %VGp)!!!!!!\n", GCPtrPage, pPteSrc->u & X86_PTE_PAE_PG_MASK));
return VINF_PGM_HANDLED_DIRTY_BIT_FAULT;
# ifdef VBOX_STRICT
return VINF_PGM_NO_DIRTY_BIT_TRACKING;
return rc;
# ifdef IN_GC
/* Check the present bit as the shadow tables can cause different error codes by being out of sync. */
return VINF_EM_RAW_GUEST_TRAP;
AssertMsg(iPDSrc == ((GCPtrPage >> GST_PD_SHIFT) & GST_PD_MASK), ("iPDSrc=%x GCPtrPage=%VGv\n", iPDSrc, GCPtrPage));
return rc;
# ifndef PGM_WITHOUT_MAPPINGS
# ifndef IN_RING3
return VERR_ADDRESS_CONFLICT;
int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, GCPtrPage & (GST_PD_MASK << GST_PD_SHIFT));
return rc;
if (fPageTable)
if (fPageTable)
| (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
| (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
return VINF_SUCCESS;
return VINF_PGM_SYNC_CR3;
if (fPageTable)
| (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
# ifdef PGM_SYNC_N_PAGES
iPTDst = 0;
unsigned iPTDst = 0;
const unsigned offPTSrc = 0;
# ifndef IN_RING0
Log2(("SyncPT: 4K+ %VGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%VGp\n",
pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0],
* @todo It might be more efficient to sync only a part of the 4MB page (similar to what we do for 4kb PDs).
| (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
PteDstBase.u = PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);
unsigned iPTDst = 0;
# ifdef IN_RING3
return rc;
PteDst.u = 0;
# ifndef IN_RING0
PteDst.u = 0;
# ifdef PGMPOOL_WITH_USER_TRACKING
PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pShwPage, pPage->HCPhys >> MM_RAM_FLAGS_IDX_SHIFT, pPage, iPTDst); /** @todo PAGE FLAGS */
(RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)), PteDst.n.u1Present, PteDst.n.u1Write, PteDst.n.u1User, (uint64_t)PteDst.u,
iHCPage++;
iPTDst++;
else if (pRam)
iPTDst++;
# ifdef IN_GC
return rc;
rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, (RTGCUINTPTR)GCPtrPage, PGM_SYNC_NR_PAGES, 0 /* page not present */);
return rc;
return VERR_INTERNAL_ERROR;
#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
unsigned iPDSrc;
if (!pPDSrc)
unsigned iPDSrc;
if (!pPDSrc)
const unsigned iPDSrc = 0;
return rc;
return rc;
PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fPage, unsigned uErr)
#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_TYPE_AMD64) \
# ifndef IN_RING0
unsigned iPDSrc;
if (pPDSrc)
return VINF_EM_RAW_GUEST_TRAP;
unsigned iPDSrc;
if (!pPDSrc)
return VINF_EM_RAW_GUEST_TRAP;
const unsigned iPDSrc = 0;
return rc;
return rc;
return VINF_EM_RAW_GUEST_TRAP;
return rc;
return VERR_INTERNAL_ERROR;
#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
# if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
return BTH_PGMPOOLKIND_PT_FOR_PT;
return BTH_PGMPOOLKIND_PT_FOR_BIG;
#define MY_STAM_COUNTER_INC(a) do { } while (0)
return VINF_SUCCESS;
# ifdef PGMPOOL_WITH_MONITORING
# ifdef IN_RING3
return VINF_PGM_SYNC_CR3;
MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTXMID(Stat,SyncCR3Global) : &pVM->pgm.s.CTXMID(Stat,SyncCR3NotGlobal));
# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
bool fBigPagesSupported = true;
# ifndef IN_GC
unsigned iPdNoMapping;
pMapping = 0;
iPdNoMapping = ~0U;
LogFlow(("SyncCR3: Out-of-sync PML4E (GCPhys) GCPtr=%VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
(uint64_t)iPml4e << X86_PML4_SHIFT, pShwPdpt->GCPhys, GCPhysPdptSrc, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u));
pPml4eDst->u = 0;
unsigned iPDSrc;
LogFlow(("SyncCR3: Out-of-sync PDPE (GCPhys) GCPtr=%VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
((uint64_t)iPml4e << X86_PML4_SHIFT) + ((uint64_t)iPDPTE << X86_PDPT_SHIFT), pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
LogFlow(("SyncCR3: Out-of-sync PDPE (GCPhys) GCPtr=%VGv %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",
(uint64_t)iPDPTE << X86_PDPT_SHIFT, pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u));
/* Mark it as not present if there's no hypervisor mapping present. (bit flipped at the top of Trap0eHandler) */
pPdpeDst->u = 0;
AssertMsg(&pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512] == pPDEDst, ("%p vs %p\n", &pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512], pPDEDst));
&& !defined(PGM_WITHOUT_MAPPINGS)
pPDEDst += cPTs + (PGM_GST_TYPE != PGM_SHW_TYPE) * cPTs; /* Only applies to the pae shadow and 32 bits guest case */
# ifdef IN_RING3
int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPDPTE << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
return rc;
return VINF_PGM_SYNC_CR3;
# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
|| !fBigPagesSupported)
|| ( !fGlobal
|| ( fBigPagesSupported
# ifdef VBOX_WITH_STATISTICS
if ( !fGlobal
/** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.
* The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */
pPDEDst->u = 0;
pPDEDst++;
for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), pShwPde->idx, iPdShw);
pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdShw);
pPDEDst->u = 0;
pPDEDst++;
&& !defined(PGM_WITHOUT_MAPPINGS)
iPdNoMapping = ~0U;
# ifdef IN_RING3
int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPDPTE << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
return rc;
return VINF_PGM_SYNC_CR3;
if (pMapping)
pPDEDst += cPTs + (PGM_GST_TYPE != PGM_SHW_TYPE) * cPTs; /* Only applies to the pae shadow and 32 bits guest case */
# else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
# endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
return VINF_SUCCESS;
return VINF_SUCCESS;
#ifdef VBOX_STRICT
#ifdef IN_GC
#ifdef IN_RING3
PGMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb)
unsigned cErrors = 0;
bool fBigPagesSupported = true;
# ifndef IN_RING0
int rc;
# ifndef IN_RING0
AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%VHp HCPhyswShw=%VHp (cr3)\n", HCPhys, HCPhysShw), false);
AssertMsgReturn((cr3 & GST_CR3_PAGE_MASK) == GCPhys, ("GCPhys=%VGp cr3=%VGp\n", GCPhys, (RTGCPHYS)cr3), false);
AssertMsgFailed(("Present bit doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
cErrors++;
AssertMsgFailed(("Physical address doesn't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64 Phys %RX64 vs RX64\n", pPml4eDst->u, pPml4eSrc->u, pShwPdpt->GCPhys, GCPhysPdptSrc));
cErrors++;
AssertMsgFailed(("User/Write/NoExec bits don't match! pPml4eDst.u=%#RX64 pPml4eSrc.u=%RX64\n", pPml4eDst->u, pPml4eSrc->u));
cErrors++;
unsigned iPDSrc;
AssertMsgFailed(("Present bit doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
cErrors++;
AssertMsgFailed(("Physical address doesn't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs RX64\n", pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
cErrors++;
AssertMsgFailed(("User/Write/NoExec bits don't match! pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64\n", pPdpeDst->u, PdpeSrc.u));
cErrors++;
const PX86PDPAE pPDDst = pVM->pgm.s.CTXMID(ap,PaePDs)[0]; /* We treat this as a PD with 2048 entries, so no need to and with SHW_PD_MASK to get iPDDst */
AssertMsgFailed(("Mapping shall only have PGM_PDFLAGS_MAPPING set! PdeDst.u=%#RX64\n", (uint64_t)PdeDst.u));
cErrors++;
if (!pPoolPage)
cErrors++;
AssertMsgFailed(("PDE flags PWT and/or PCD is set at %VGv! These flags are not virtualized! PdeDst=%#RX64\n",
cErrors++;
cErrors++;
cErrors++;
|| !fBigPagesSupported)
cErrors++;
!= (!PdeSrc.b.u1Size || !fBigPagesSupported ? BTH_PGMPOOLKIND_PT_FOR_PT : BTH_PGMPOOLKIND_PT_FOR_BIG))
cErrors++;
if (!pPhysPage)
cErrors++;
cErrors++;
|| !fBigPagesSupported)
AssertMsgFailed(("Cannot map/convert guest physical address %VGp in the PDE at %VGv! PdeSrc=%#RX64\n",
cErrors++;
/// @todo We get here a lot on out-of-sync CR3 entries. The access handler should zap them to avoid false alarms here!
cErrors++;
cErrors++;
const unsigned offPTSrc = 0;
if (!(PteDst.u & (X86_PTE_P | PGM_PTFLAGS_TRACK_DIRTY))) /** @todo deal with ALL handlers and CSAM !P pages! */
# ifdef IN_RING3
AssertMsgFailed(("Out of sync (!P) PTE at %VGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%VGv iPTSrc=%x PdeSrc=%x physpte=%VGp\n",
cErrors++;
uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
# ifdef IN_RING3
cErrors++;
AssertMsgFailed(("Out of sync (phys) at %VGv! HCPhysShw=%VHp HCPhys=%VHp GCPhysGst=%VGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
if (!pPhysPage)
cErrors++;
AssertMsgFailed(("Invalid guest page at %VGv is writable! GCPhysGst=%VGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("Out of sync (phys) at %VGv! HCPhysShw=%VHp HCPhys=%VHp GCPhysGst=%VGp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("WRITE access flagged at %VGv but the page is writable! HCPhys=%VGv PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("ALL access flagged at %VGv but the page is present! HCPhys=%VHp PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
cErrors++;
cErrors++;
cErrors++;
AssertMsgFailed(("!DIRTY page at %VGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("PGM_PTFLAGS_TRACK_DIRTY set at %VGv but no accessed bit emulation! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("!ACCESSED page at %VGv is has the accessed bit set! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
# ifdef DEBUG_sandervl
AssertMsgFailed(("Flags mismatch at %VGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
uint64_t fIgnoreFlags = X86_PDE_AVL_MASK | GST_PDE_PG_MASK | X86_PDE4M_G | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_PWT | X86_PDE4M_PCD;
cErrors++;
cErrors++;
AssertMsgFailed(("!DIRTY page at %VGv is has mismatching accessed bit! PteSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("PGM_PDFLAGS_TRACK_DIRTY set at %VGv but no accessed bit emulation! PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("!ACCESSED page at %VGv is has the accessed bit set! PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("Flags mismatch (B) at %VGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PdeDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("The PTE at %VGv emulating a 2/4M page is marked TRACK_DIRTY! PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
# ifdef IN_RING3
cErrors++;
AssertMsgFailed(("Out of sync (phys) at %VGv! HCPhysShw=%VHp HCPhys=%VHp GCPhysGst=%VGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
if (!pPhysPage)
cErrors++;
AssertMsgFailed(("Invalid guest page at %VGv is writable! GCPhysGst=%VGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("Out of sync (phys) at %VGv! HCPhysShw=%VHp HCPhys=%VHp GCPhysGst=%VGp PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("WRITE access flagged at %VGv but the page is writable! HCPhys=%VGv PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
AssertMsgFailed(("ALL access flagged at %VGv but the page is present! HCPhys=%VGv PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
&& (PdeSrc.u & ~(fIgnoreFlags | X86_PTE_RW)) != (PteDst.u & ~fIgnoreFlags) /* lazy phys handler dereg. */
AssertMsgFailed(("Flags mismatch (BT) at %VGv! %#RX64 != %#RX64 fIgnoreFlags=%#RX64 PdeSrc=%#RX64 PteDst=%#RX64\n",
cErrors++;
# ifdef DEBUG
if (cErrors)
return cErrors;