PGMAllPool.cpp revision 50e0e1bb8996934b3f38c117a456f825052bfb0c
b450d7a1747c5f4fb7c917a8ec1f9ce8440d7ffevboxsync * PGM Shadow Page Pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * available from http://www.virtualbox.org. This file is free software;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * you can redistribute it and/or modify it under the terms of the GNU
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * General Public License (GPL) as published by the Free Software
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * additional information or have any questions.
b450d7a1747c5f4fb7c917a8ec1f9ce8440d7ffevboxsync/*******************************************************************************
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync* Header Files *
b450d7a1747c5f4fb7c917a8ec1f9ce8440d7ffevboxsync*******************************************************************************/
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/*******************************************************************************
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync* Internal Functions *
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync*******************************************************************************/
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
6a795f9e75e30c7f1d75cd45e5de233c71662f58vboxsync * Checks if the specified page pool kind is for a 4MB or 2MB guest page.
6a795f9e75e30c7f1d75cd45e5de233c71662f58vboxsync * @returns true if it's the shadow of a 4MB or 2MB guest page, otherwise false.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param enmKind The page kind.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(bool) pgmPoolIsBigPage(PGMPOOLKIND enmKind)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Maps a pool page into the current context.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns Pointer to the mapping.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPGM Pointer to the PGM instance data.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page to map.
75ef08b33f9c67a8dd50748ece1117aed8098d51vboxsyncvoid *pgmPoolMapPageFallback(PPGM pPGM, PPGMPOOLPAGE pPage)
75ef08b33f9c67a8dd50748ece1117aed8098d51vboxsync /* general pages are take care of by the inlined part, it
75ef08b33f9c67a8dd50748ece1117aed8098d51vboxsync only ends up here in case of failure. */
75ef08b33f9c67a8dd50748ece1117aed8098d51vboxsync AssertReleaseReturn(pPage->idx < PGMPOOL_IDX_FIRST, NULL);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** @todo make sure HCPhys is valid for *all* indexes. */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync /* special pages. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertReleaseMsgFailed(("Invalid index %d\n", pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync# else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertReleaseMsgFailed(("PGMPOOL_IDX_PAE_PD is not usable in VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 context\n"));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertReleaseMsgFailed(("Invalid index %d\n", pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsg(HCPhys && HCPhys != NIL_RTHCPHYS && !(PAGE_OFFSET_MASK & HCPhys), ("%RHp\n", HCPhys));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc = pgmR0DynMapHCPageInlined(pPGM, HCPhys, &pv);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync# endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Determin the size of a write instruction.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns number of bytes written.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pDis The disassembler state.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic unsigned pgmPoolDisasWriteSize(PDISCPUSTATE pDis)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This is very crude and possibly wrong for some opcodes,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * but since it's not really supposed to be called we can
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * probably live with that.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flushes a chain of pages sharing the same access monitor.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code suitable for scheduling.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage A page in the chain.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncint pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainFlush: Flush page %RGp type=%d\n", pPage->GCPhys, pPage->enmKind));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Find the list head.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Iterate the list flushing each shadow page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (rc2 == VERR_PGM_POOL_CLEARED && rc == VINF_SUCCESS)
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * Wrapper for getting the current context pointer to the entry being modified.
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * @returns Pointer to the current context mapping of the entry.
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvFault The fault virtual address.
824104c3b60b9c8d5c03c40658e33ecd6c4fa9e8vboxsync * @param GCPhysFault The fault physical address.
824104c3b60b9c8d5c03c40658e33ecd6c4fa9e8vboxsync * @param cbEntry The entry size.
824104c3b60b9c8d5c03c40658e33ecd6c4fa9e8vboxsyncDECLINLINE(const void *) pgmPoolMonitorGCPtr2CCPtr(PPGMPOOL pPool, RTHCPTR pvFault, RTGCPHYS GCPhysFault, const unsigned cbEntry)
824104c3b60b9c8d5c03c40658e33ecd6c4fa9e8vboxsyncDECLINLINE(const void *) pgmPoolMonitorGCPtr2CCPtr(PPGMPOOL pPool, RTGCPTR pvFault, RTGCPHYS GCPhysFault, const unsigned cbEntry)
824104c3b60b9c8d5c03c40658e33ecd6c4fa9e8vboxsync return (const void *)((RTGCUINTPTR)pvFault & ~(RTGCUINTPTR)(cbEntry - 1));
824104c3b60b9c8d5c03c40658e33ecd6c4fa9e8vboxsync int rc = PGMDynMapGCPageOff(pPool->pVMR0, GCPhysFault & ~(RTGCPHYS)(cbEntry - 1), &pvRet);
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync int rc = pgmRamGCPhys2HCPtr(&pPool->pVMR0->pgm.s, GCPhysFault & ~(RTGCPHYS)(cbEntry - 1), &pvRet);
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync return (RTHCPTR)((uintptr_t)pvFault & ~(RTHCUINTPTR)(cbEntry - 1));
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * Process shadow entries before they are changed by the guest.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * For PT entries we will clear them. For PD entries, we'll simply check
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * for mapping conflicts and set the SyncCR3 FF if found.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * @param pPage The head page.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * @param GCPhysFault The guest physical fault address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param uAddress In R0 and GC this is the guest context fault address (flat).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * In R3 this is the host context 'fault' address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pCpu The disassembler state for figuring out the write size.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This need not be specified if the caller knows we won't do cross entry accesses.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncvoid pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTHCPTR pvAddress, PDISCPUSTATE pCpu)
ad27e1d5e48ca41245120c331cc88b50464813cevboxsyncvoid pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTGCPTR pvAddress, PDISCPUSTATE pCpu)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned off = GCPhysFault & PAGE_OFFSET_MASK;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned cbWrite = (pCpu) ? pgmPoolDisasWriteSize(pCpu) : 0;
824104c3b60b9c8d5c03c40658e33ecd6c4fa9e8vboxsync LogFlow(("pgmPoolMonitorChainChanging: %RGv phys=%RGp kind=%d cbWrite=%d\n", pvAddress, GCPhysFault, pPage->enmKind, cbWrite));
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PCX86PTE pGstPte = (PCX86PTE)pgmPoolMonitorGCPtr2CCPtr(pPool, pvAddress, GCPhysFault, sizeof(*pGstPte));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log4(("pgmPoolMonitorChainChanging 32_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PG_MASK));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* page/2 sized */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned iShw = (off / sizeof(X86PTE)) & (X86_PG_PAE_ENTRIES - 1);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PCX86PTE pGstPte = (PCX86PTE)pgmPoolMonitorGCPtr2CCPtr(pPool, pvAddress, GCPhysFault, sizeof(*pGstPte));
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync Log4(("pgmPoolMonitorChainChanging pae_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PG_MASK));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PCX86PTEPAE pGstPte = (PCX86PTEPAE)pgmPoolMonitorGCPtr2CCPtr(pPool, pvAddress, GCPhysFault, sizeof(*pGstPte));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PAE_PG_MASK));
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync /* paranoia / a bit assumptive. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PTEPAE);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertReturnVoid(iShw2 < RT_ELEMENTS(uShw.pPTPae->a));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PCX86PTEPAE pGstPte = (PCX86PTEPAE)pgmPoolMonitorGCPtr2CCPtr(pPool, pvAddress, GCPhysFault, sizeof(*pGstPte));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", uShw.pPTPae->a[iShw2].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PAE_PG_MASK));
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned iShw = off / sizeof(X86PTE); // ASSUMING 32-bit guest paging!
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* paranoia / a bit assumptive. */
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PTE);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync && !VM_FF_ISSET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3))
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync# ifdef IN_RC /* TLB load - we're pushing things a bit... */
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPD->a[iShw].u & X86_PDE_PG_MASK, pPage->idx, iShw);
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync unsigned iGst = off / sizeof(X86PDE); // ASSUMING 32-bit guest paging!
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync Assert(pPage2->idx == PGMPOOL_IDX_PAE_PD_0 + iShwPdpt);
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage2);
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync if ((uShw.pPDPae->a[iShw].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw));
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync /* paranoia / a bit assumptive. */
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync if ( iShw2 < RT_ELEMENTS(uShw.pPDPae->a) /** @todo was completely wrong, it's better now after #1865 but still wrong from cross PD. */
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync && (uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
543fd530ce618a1b55531ea76a785c7add7d072cvboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2));
543fd530ce618a1b55531ea76a785c7add7d072cvboxsync#if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */
543fd530ce618a1b55531ea76a785c7add7d072cvboxsync && !VM_FF_ISSET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3))
543fd530ce618a1b55531ea76a785c7add7d072cvboxsync LogFlow(("pgmPoolMonitorChainChanging: iShwPdpt=%#x iShw=%#x: %RX64 -> freeing it!\n", iShwPdpt, iShw, uShw.pPDPae->a[iShw].u));
543fd530ce618a1b55531ea76a785c7add7d072cvboxsync# ifdef IN_RC /* TLB load - we're pushing things a bit... */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, pPage->idx, iShw + iShwPdpt * X86_PG_PAE_ENTRIES);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#ifdef PGMPOOL_INVALIDATE_UPPER_SHADOW_TABLE_ENTRIES
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Causes trouble when the guest uses a PDE to refer to the whole page table level
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * structure. (Invalidate here; faults later on when it tries to change the page
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * table entries -> recheck; probably only applies to the RC case.)
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Note: hardcoded PAE implementation dependency */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? PGMPOOL_IDX_PAE_PD : pPage->idx,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? iShw + (pPage->idx - PGMPOOL_IDX_PAE_PD_0) * X86_PG_PAE_ENTRIES : iShw);
543fd530ce618a1b55531ea76a785c7add7d072cvboxsync /* paranoia / a bit assumptive. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDEPAE);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertReturnVoid(iShw2 < RT_ELEMENTS(uShw.pPDPae->a));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#ifdef PGMPOOL_INVALIDATE_UPPER_SHADOW_TABLE_ENTRIES
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Note: hardcoded PAE implementation dependency */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? PGMPOOL_IDX_PAE_PD : pPage->idx,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? iShw2 + (pPage->idx - PGMPOOL_IDX_PAE_PD_0) * X86_PG_PAE_ENTRIES : iShw2);
75ef08b33f9c67a8dd50748ece1117aed8098d51vboxsync * Hopefully this doesn't happen very often:
8f8c8ff0bfe182cff047f8c028b2546b25087d44vboxsync * - touching unused parts of the page
8f8c8ff0bfe182cff047f8c028b2546b25087d44vboxsync * - messing with the bits of pd pointers without changing the physical address
543fd530ce618a1b55531ea76a785c7add7d072cvboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync if (iShw < X86_PG_PAE_PDPE_ENTRIES) /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* paranoia / a bit assumptive. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDPE);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->enmKind == PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* paranoia / a bit assumptive. */
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDEPAE);
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync AssertReturnVoid(iShw2 < RT_ELEMENTS(uShw.pPDPae->a));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Hopefully this doesn't happen very often:
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * - messing with the bits of pd pointers without changing the physical address
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (!VM_FF_ISSET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3))
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK, pPage->idx, iShw);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* paranoia / a bit assumptive. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDPE);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u));
8b82f5ce032cb07de31804c998483b0988530aebvboxsync pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK, pPage->idx, iShw2);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Hopefully this doesn't happen very often:
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * - messing with the bits of pd pointers without changing the physical address
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (!VM_FF_ISSET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3))
9083f76e8c5709604766d0215a380de516e781eevboxsync uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPML4->a[iShw].u));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPML4->a[iShw].u & X86_PML4E_PG_MASK, pPage->idx, iShw);
3649373f921ada8549bf86c6edb03b340f2d214avboxsync /* paranoia / a bit assumptive. */
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PML4E);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPML4->a[iShw2].u));
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPML4->a[iShw2].u & X86_PML4E_PG_MASK, pPage->idx, iShw2);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* IN_RING0 */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Checks if a access could be a fork operation in progress.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Meaning, that the guest is setuping up the parent process for Copy-On-Write.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns true if it's likly that we're forking, otherwise false.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pCpu The disassembled instruction.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param offFault The access offset.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(bool) pgmPoolMonitorIsForking(PPGMPOOL pPool, PDISCPUSTATE pCpu, unsigned offFault)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * i386 linux is using btr to clear X86_PTE_RW.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * The functions involved are (2.6.16 source inspection):
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * clear_bit
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * ptep_set_wrprotect
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * copy_one_pte
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * copy_pte_range
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * copy_pmd_range
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * copy_pud_range
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * copy_page_range
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * copy_process
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** @todo Validate that the bit index is X86_PTE_RW. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,Fork));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Determine whether the page is likely to have been reused.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns true if we consider the page as being reused for a different purpose.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns false if we consider it to still be a paging page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM VM Handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page in question.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pRegFrame Trap register frame.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pCpu The disassembly info for the faulting instruction.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvFault The fault address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @remark The REP prefix check is left to the caller because of STOSD/W.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PPGMPOOLPAGE pPage, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, RTGCPTR pvFault)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** @todo could make this general, faulting close to rsp should be safe reuse heuristic. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Fault caused by stack writes while trying to inject an interrupt event. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log(("pgmPoolMonitorIsReused: reused %RGv for interrupt stack (rsp=%RGv).\n", pvFault, pRegFrame->rsp));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* call implies the actual push of the return address faulted */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync case OP_MOVNTDQ: /* solaris - hwblkclr & hwblkpagecopy */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync //if (pPage->fCR3Mix)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync // return false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flushes the page being accessed.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code suitable for scheduling.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The pool page (head).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pCpu The disassembly of the write instruction.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pRegFrame The trap register frame.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhysFault The fault address as guest physical address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvFault The fault address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmPoolAccessHandlerFlush(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * First, do the flushing.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Emulate the instruction (xp/w2k problem, requires pc/cr2/sp detection).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc2 = EMInterpretInstructionCPU(pVM, pCpu, pRegFrame, pvFault, &cbWritten);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (PATMIsPatchGCAddr(pVM, (RTRCPTR)pRegFrame->eip))
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolAccessHandlerPTWorker: Interpretation failed for patch code %04x:%RGv, ignoring.\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_COUNTER_INC(&pPool->StatMonitorRZIntrFailPatch2);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* See use in pgmPoolAccessHandlerSimple(). */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolAccessHandlerPT: returns %Rrc (flushed)\n", rc));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Handles the STOSD write accesses.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code suitable for scheduling.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The pool page (head).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pCpu The disassembly of the write instruction.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pRegFrame The trap register frame.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhysFault The fault address as guest physical address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvFault The fault address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(int) pgmPoolAccessHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Increment the modification counter and insert it into the list
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * of modified pages the first time.
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * Execute REP STOSD.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * This ASSUMES that we're not invoked by Trap0e on in a out-of-sync
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * write situation, meaning that it's safe to write here.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolMonitorChainChanging(pPool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->eax, 4);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* See use in pgmPoolAccessHandlerSimple(). */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Handles the simple write accesses.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code suitable for scheduling.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The pool page (head).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pCpu The disassembly of the write instruction.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pRegFrame The trap register frame.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhysFault The fault address as guest physical address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvFault The fault address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Increment the modification counter and insert it into the list
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * of modified pages the first time.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear all the pages. ASSUMES that pvFault is readable.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolMonitorChainChanging(pPool, pPage, GCPhysFault, pvFault, pCpu);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Interpret the instruction.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc = EMInterpretInstructionCPU(pVM, pCpu, pRegFrame, pvFault, &cb);
b4dc9a151c850935f866da241db86e6ae33c45a2vboxsync LogFlow(("pgmPoolAccessHandlerPTWorker: Interpretation failed for %04x:%RGv - opcode=%d\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pRegFrame->cs, (RTGCPTR)pRegFrame->rip, pCpu->pCurInstr->opcode));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Quick hack, with logging enabled we're getting stale
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * code TLBs but no data TLB for EIP and crash in EMInterpretDisasOne.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flushing here is BAD and expensive, I think EMInterpretDisasOne will
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * have to be fixed to support this. But that'll have to wait till next week.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * An alternative is to keep track of the changed PTEs together with the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * GCPhys from the guest PT. This may proove expensive though.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * At the moment, it's VITAL that it's done AFTER the instruction interpreting
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * because we need the stale TLBs in some cases (XP boot). This MUST be fixed properly!
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolAccessHandlerSimple: returns %Rrc cb=%d\n", rc, cb));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * \#PF Handler callback for PT write accesses.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code (appropriate for GC return).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM VM Handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param uErrorCode CPU Error code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pRegFrame Trap register frame.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * NULL on DMA and other non CPU access.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvFault The fault address (cr2).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhysFault The GC physical address corresponding to pvFault.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvUser User argument.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolAccessHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * We should ALWAYS have the list head as user parameter. This
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * is because we use that page to record the changes.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Disassemble the faulting instruction.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc = EMInterpretDisasOne(pVM, pRegFrame, &Cpu, NULL);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Check if it's worth dealing with.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync bool fReused = false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ( ( pPage->cModifications < 48 /** @todo #define */ /** @todo need to check that it's not mapping EIP. */ /** @todo adjust this! */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync && !(fReused = pgmPoolMonitorIsReused(pVM, pPage, pRegFrame, &Cpu, pvFault))
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync && !pgmPoolMonitorIsForking(pPool, &Cpu, GCPhysFault & PAGE_OFFSET_MASK))
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Simple instructions, no REP prefix.
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync rc = pgmPoolAccessHandlerSimple(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync * Windows is frequently doing small memset() operations (netio test 4k+).
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync * We have to deal with these or we'll kill the cache and performance.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync && pRegFrame->ecx * 4 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK)
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync && (pRegFrame->eax == 0 || pRegFrame->eax == 0x80) /* the two values observed. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = pgmPoolAccessHandlerSTOSD(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,RepStosd), a);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* REP prefix, don't bother. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,RepPrefix));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log4(("pgmPoolAccessHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, Cpu.pCurInstr->opcode, Cpu.prefix));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Not worth it, so flush it.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * If we considered it to be reused, don't to back to ring-3
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * to emulate failed instructions since we usually cannot
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * interpret then. This may be a bit risky, in which case
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * the reuse detection must be fixed.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = pgmPoolAccessHandlerFlush(pVM, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,FlushPage), a);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync# endif /* !IN_RING3 */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* PGMPOOL_WITH_MONITORING */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Inserts a page into the GCPhys hash table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(void) pgmPoolHashInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolHashInsert: %RGp\n", pPage->GCPhys));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->GCPhys != NIL_RTGCPHYS); Assert(pPage->iNext == NIL_PGMPOOL_IDX);
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync * Removes a page from the GCPhys hash table.
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(void) pgmPoolHashRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolHashRemove: %RGp\n", pPage->GCPhys));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertReleaseMsgFailed(("GCPhys=%RGp idx=%#x\n", pPage->GCPhys, pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Frees up one cache page.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * @returns VBox status code.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * @retval VINF_SUCCESS on success.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * @retval VERR_PGM_POOL_CLEARED if the deregistration of a physical handler will cause a light weight pool flush.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * @param pPool The pool.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * @param iUser The user index.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmPoolCacheFreeOne(PPGMPOOL pPool, uint16_t iUser)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPool->iAgeHead != pPool->iAgeTail); /* We shouldn't be here if there < 2 cached entries! */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Select one page from the tail of the age list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/* This is the alternative to the SyncCR3 pgmPoolCacheUsed calls.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (pPool->aPages[iToFree].iUserHead != NIL_PGMPOOL_USER_INDEX)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uint16_t i = pPool->aPages[iToFree].iAgePrev;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned j = 0; j < 10 && i != NIL_PGMPOOL_USER_INDEX; j++, i = pPool->aPages[i].iAgePrev)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (pPool->aPages[iToFree].iUserHead == NIL_PGMPOOL_USER_INDEX)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync iToFree = i;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Reject any attempts at flushing the currently active shadow CR3 mapping
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) == pPage->Core.Key)
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync /* Refresh the cr3 mapping by putting it at the head of the age list. */
fe869d55b000b5b3445dcf7077ac443434ff3ec3vboxsync * Checks if a kind mismatch is really a page being reused
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * or if it's just normal remappings.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns true if reused and the cached page (enmKind1) should be flushed
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns false if not reused.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param enmKind1 The kind of the cached page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param enmKind2 The kind of the requested page.
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsyncstatic bool pgmPoolCacheReusedByKind(PGMPOOLKIND enmKind1, PGMPOOLKIND enmKind2)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Never reuse them. There is no remapping in non-paging mode.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * It's perfectly fine to reuse these, except for PAE and non-paging stuff.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * It's perfectly fine to reuse these, except for PAE and non-paging stuff.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * These cannot be flushed, and it's common to reuse the PDs as PTs.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return false;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Attempts to satisfy a pgmPoolAlloc request from the cache.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_PGM_CACHED_PAGE on success.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_FILE_NOT_FOUND if not found.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhys The GC physical address of the page we're gonna shadow.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param enmKind The kind of mapping.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUser The shadow page pool index of the user table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUserTable The index into the user table (shadowed).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param ppPage Where to store the pointer to the page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Look up the GCPhys in the hash.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolCacheAlloc: %RGp kind %d iUser=%d iUserTable=%x SLOT=%d\n", GCPhys, enmKind, iUser, iUserTable, i));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolCacheAlloc: slot %d found page %RGp\n", i, pPage->GCPhys));
4d4628e1fe67e333b01942cc6ac92818832fd0edvboxsync int rc = pgmPoolTrackAddUser(pPool, pPage, iUser, iUserTable);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * The kind is different. In some cases we should now flush the page
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * as it has been reused, but in most cases this is normal remapping
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync * of PDs as PT or big pages using the GCPhys field in a slightly
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * different way than the other kinds.
fe869d55b000b5b3445dcf7077ac443434ff3ec3vboxsync if (pgmPoolCacheReusedByKind((PGMPOOLKIND)pPage->enmKind, enmKind))
fe869d55b000b5b3445dcf7077ac443434ff3ec3vboxsync pgmPoolFlushPage(pPool, pPage); /* ASSUMES that VERR_PGM_POOL_CLEARED will be returned by pgmPoolTracInsert. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync } while (i != NIL_PGMPOOL_IDX);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolCacheAlloc: Missed GCPhys=%RGp enmKind=%d\n", GCPhys, enmKind));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Inserts a page into the cache.
16e6bb956968b3b7caee5a07dc98ad0e4aa60d36vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The cached page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param fCanBeCached Set if the page is fit for caching from the caller's point of view.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolCacheInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fCanBeCached)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Insert into the GCPhys hash if the page is fit for that.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolCacheInsert: Caching %p:{.Core=%RHp, .idx=%d, .enmKind=%d, GCPhys=%RGp}\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPage, pPage->Core.Key, pPage->idx, pPage->enmKind, pPage->GCPhys));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolCacheInsert: Not caching %p:{.Core=%RHp, .idx=%d, .enmKind=%d, GCPhys=%RGp}\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPage, pPage->Core.Key, pPage->idx, pPage->enmKind, pPage->GCPhys));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Insert at the head of the age list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPool->iAgeHead].iAgePrev = pPage->idx;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flushes a cached page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The cached page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolCacheFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
b4dc9a151c850935f866da241db86e6ae33c45a2vboxsync Log3(("pgmPoolCacheFlushPage: %RGp\n", pPage->GCPhys));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Remove the page from the hash.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Remove it from the age list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
b4dc9a151c850935f866da241db86e6ae33c45a2vboxsync pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* PGMPOOL_WITH_CACHE */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Looks for pages sharing the monitor.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns Pointer to the head page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns NULL if not found.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The Pool
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pNewPage The page which is going to be monitored.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic PPGMPOOLPAGE pgmPoolMonitorGetPageByGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pNewPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Look up the GCPhys in the hash.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync RTGCPHYS GCPhys = pNewPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* find the head */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* ignore, no monitoring. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
d9aa4de3f5ab154f8d65042d788b67e00bad28d9vboxsync } while (i != NIL_PGMPOOL_IDX);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Enabled write monitoring of a guest page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS on success.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_PGM_POOL_CLEARED if the registration of the physical handler will cause a light weight pool flush.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The cached page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmPoolMonitorInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorInsert %RGp\n", pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1)));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Filter out the relevant kinds.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Nothing to monitor here. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Install handler.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPAGE pPageHead = pgmPoolMonitorGetPageByGCPhys(pPool, pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPageHead != pPage); Assert(pPageHead->iMonitoredNext != pPage->idx);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPageHead->iMonitoredNext].iMonitoredPrev = pPage->idx;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX); Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const RTGCPHYS GCPhysPage = pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pPage),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pPage),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** @todo we should probably deal with out-of-memory conditions here, but for now increasing
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * the heap size should suffice. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Disables write monitoring of a guest page.
f4f1486a0ea478a9cf75ad985f1d25915fa1f3a4vboxsync * @returns VBox status code.
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync * @retval VINF_SUCCESS on success.
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync * @retval VERR_PGM_POOL_CLEARED if the deregistration of the physical handler will cause a light weight pool flush.
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync * @param pPool The pool.
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync * @param pPage The cached page.
2d8894b1c178c9f1199cac84059ca66aa5dee6b3vboxsyncstatic int pgmPoolMonitorFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
23631945c9cb3df68ca51c69ed0b77e90164b402vboxsync * Filter out the relevant kinds.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Nothing to monitor here. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Remove the page from the monitored list or uninstall it if last.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPAGE pNewHead = &pPool->aPages[pPage->iMonitoredNext];
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pNewHead),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pNewHead),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pNewHead),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPage->iMonitoredPrev].iMonitoredNext = pPage->iMonitoredNext;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPage->iMonitoredNext].iMonitoredPrev = pPage->iMonitoredPrev;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * Remove it from the list of modified pages (if in it).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Set or clear the fCR3Mix attribute in a chain of monitored pages.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The Pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage A page in the chain.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param fCR3Mix The new fCR3Mix value.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolMonitorChainChangeCR3Mix(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fCR3Mix)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* current */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* before */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* after */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Installs or modifies monitoring of a CR3 page (special).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * We're pretending the CR3 page is shadowed by the pool so we can use the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * generic mechanisms in detecting chained monitoring. (This also gives us a
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * tast of what code changes are required to really pool CR3 shadow pages.)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param idxRoot The CR3 (root) page index.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhysCR3 The (new) CR3 value.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncint pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(idxRoot != NIL_PGMPOOL_IDX && idxRoot < PGMPOOL_IDX_FIRST);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorMonitorCR3: idxRoot=%d pPage=%p:{.GCPhys=%RGp, .fMonitored=%d} GCPhysCR3=%RGp\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync idxRoot, pPage, pPage->GCPhys, pPage->fMonitored, GCPhysCR3));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * The unlikely case where it already matches.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flush the current monitoring and remove it from the hash.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, false);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Monitor the page at the new location and insert it into the hash.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, true);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Removes the monitoring of a CR3 page (special).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param idxRoot The CR3 (root) page index.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncint pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(idxRoot != NIL_PGMPOOL_IDX && idxRoot < PGMPOOL_IDX_FIRST);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorUnmonitorCR3: idxRoot=%d pPage=%p:{.GCPhys=%RGp, .fMonitored=%d}\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync idxRoot, pPage, pPage->GCPhys, pPage->fMonitored));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, false);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Inserts the page into the list of modified pages.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncvoid pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolMonitorModifiedInsert: idx=%d\n", pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync ("Next=%d Prev=%d idx=%d cModifications=%d Head=%d cModifiedPages=%d\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPage->iModifiedNext, pPage->iModifiedPrev, pPage->idx, pPage->cModifications,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPool->iModifiedHead].iModifiedPrev = pPage->idx;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (pPool->cModifiedPages > pPool->cModifiedPagesHigh)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Removes the page from the list of modified pages and resets the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * moficiation counter.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page which is believed to be in the list of modified pages.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log3(("pgmPoolMonitorModifiedRemove: idx=%d cModifications=%d\n", pPage->idx, pPage->cModifications));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPage->iModifiedNext].iModifiedPrev = NIL_PGMPOOL_IDX;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPage->iModifiedPrev].iModifiedNext = pPage->iModifiedNext;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->aPages[pPage->iModifiedNext].iModifiedPrev = pPage->iModifiedPrev;
0a3599702f0cfe19a23070ff9eddbcec0ae71298vboxsync * Zaps the list of modified pages, resetting their modification counters in the process.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolMonitorModifiedClearAll: cModifiedPages=%d\n", pPool->cModifiedPages));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsg(cPages == pPool->cModifiedPages, ("%d != %d\n", cPages, pPool->cModifiedPages));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear all shadow pages and clear all modification counters.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param pVM The VM handle.
dba0e7f8f385de972564b6917e305b8f53ea3480vboxsync * @remark Should only be used when monitoring is available, thus placed in
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * the PGMPOOL_WITH_MONITORING #ifdef.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolClearAll: cUsedPages=%d\n", pPool->cUsedPages));
4d4628e1fe67e333b01942cc6ac92818832fd0edvboxsync * Iterate all the pages until we've encountered all that in use.
4d4628e1fe67e333b01942cc6ac92818832fd0edvboxsync * This is simple but not quite optimal solution.
4d4628e1fe67e333b01942cc6ac92818832fd0edvboxsync unsigned cModifiedPages = 0; NOREF(cModifiedPages);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * We only care about shadow page tables.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* fall thru */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(!pPage->cModifications || ++cModifiedPages);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX || pPage->cModifications);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX || pPage->cModifications);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* swipe the special pages too. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (iPage = PGMPOOL_IDX_FIRST_SPECIAL; iPage < PGMPOOL_IDX_FIRST; iPage++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(!pPage->cModifications || ++cModifiedPages);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX || pPage->cModifications);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX || pPage->cModifications);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsg(cModifiedPages == pPool->cModifiedPages, ("%d != %d\n", cModifiedPages, pPool->cModifiedPages));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear all the GCPhys links and rebuild the phys ext free list.
3649373f921ada8549bf86c6edb03b340f2d214avboxsync for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync while (iPage-- > 0)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pRam->aPages[iPage].HCPhys &= MM_RAM_FLAGS_NO_REFS_MASK; /** @todo PAGE FLAGS */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < cMaxPhysExts; i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync#endif /* IN_RING3 */
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Handle SyncCR3 pool tasks
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @returns VBox status code.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @retval VINF_SUCCESS if successfully added.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_PGM_SYNC_CR3 is it needs to be deferred to ring 3 (GC only)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @remark Should only be used when monitoring is available, thus placed in
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * the PGMPOOL_WITH_MONITORING #ifdef.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * When monitoring shadowed pages, we reset the modification counters on CR3 sync.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Occasionally we will have to clear all the shadow page tables because we wanted
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * to monitor a page which was mapped by too many shadowed page tables. This operation
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * sometimes refered to as a 'lightweight flush'.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync# ifdef IN_RING3 /* Don't flush in ring-0 or raw mode, it's taking too long. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync# else /* !IN_RING3 */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("SyncCR3: PGM_SYNC_CLEAR_PGM_POOL is set -> VINF_PGM_SYNC_CR3\n"));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync# endif /* !IN_RING3 */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* PGMPOOL_WITH_MONITORING */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Frees up at least one user entry.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS if successfully added.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUser The user index.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmPoolTrackFreeOneUser(PPGMPOOL pPool, uint16_t iUser)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Just free cached pages in a braindead fashion.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** @todo walk the age list backwards and free the first with usage. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync } while (pPool->iUserFreeHead == NIL_PGMPOOL_USER_INDEX);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Lazy approach.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* @todo incompatible with long mode paging (cr3 root will be flushed) */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Inserts a page into the cache.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This will create user node for the page, insert it into the GCPhys
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * hash, and insert it into the age list.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @returns VBox status code.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @retval VINF_SUCCESS if successfully added.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @retval VERR_PGM_POOL_CLEARED if the deregistration of the physical handler will cause a light weight pool flush.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param pPool The pool.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param pPage The cached page.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param GCPhys The GC physical address of the page we're gonna shadow.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param iUser The user index.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param iUserTable The user table index.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsyncDECLINLINE(int) pgmPoolTrackInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhys, uint16_t iUser, uint32_t iUserTable)
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync LogFlow(("pgmPoolTrackInsert iUser %d iUserTable %d\n", iUser, iUserTable));
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Find free a user node.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Unlink the user node from the free list,
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * initialize and insert it into the user list.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Insert into cache and enable monitoring of the guest page if enabled.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Until we implement caching of all levels, including the CR3 one, we'll
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * have to make sure we don't try monitor & cache any recursive reuse of
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * a monitored CR3 page. Because all windows versions are doing this we'll
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * have to be able to do combined access monitoring, CR3 + PT and
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * PD + PT (guest PAE).
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * We're now cooperating with the CR3 monitor if an uncachable page is found.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync#if defined(PGMPOOL_WITH_MONITORING) || defined(PGMPOOL_WITH_CACHE)
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync const bool fCanBeMonitored = true;
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync bool fCanBeMonitored = pPool->CTX_SUFF(pVM)->pgm.s.GCPhysGstCR3Monitored == NIL_RTGCPHYS
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync || (GCPhys & X86_PTE_PAE_PG_MASK) != (pPool->CTX_SUFF(pVM)->pgm.s.GCPhysGstCR3Monitored & X86_PTE_PAE_PG_MASK)
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync pgmPoolCacheInsert(pPool, pPage, fCanBeMonitored); /* This can be expanded. */
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync /* 'Failed' - free the usage, and keep it in the cache (if enabled). */
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync#endif /* PGMPOOL_WITH_MONITORING */
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync# ifdef PGMPOOL_WITH_CACHE /* (only used when the cache is enabled.) */
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Adds a user reference to a page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This will
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This will move the page to the head of the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS if successfully added.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The cached page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUser The user index.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUserTable The user table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackAddUser iUser %d iUserTable %d\n", iUser, iUserTable));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Check that the entry doesn't already exists.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Allocate a user node.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Initialize the user node and insert it.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Tell the cache to update its replacement stats for this page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync# endif /* PGMPOOL_WITH_CACHE */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Frees a user record associated with a page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This does not clear the entry in the user table, it simply replaces the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * user record to the chain of free records.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param HCPhys The HC physical address of the shadow page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUser The shadow page pool index of the user table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUserTable The index into the user table (shadowed).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolTrackFreeUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Unlink and free the specified user entry.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Special: For PAE and 32-bit paging, there is usually no more than one user. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* General: Linear search. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Fatal: didn't find it */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("Didn't find the user entry! iUser=%#x iUserTable=%#x GCPhys=%RGp\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Gets the entry size of a shadow table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param enmKind The kind of page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns The size of the entry in bytes. That is, 4 or 8.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns If the kind is not for a table, an assertion is raised and 0 is
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * returned.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsyncDECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind)
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Gets the entry size of a guest table.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param enmKind The kind of page.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @returns The size of the entry in bytes. That is, 0, 4 or 8.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @returns If the kind is not for a table, an assertion is raised and 0 is
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * returned.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsyncDECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind)
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync /** @todo can we return 0? (nobody is calling this...) */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Scans one shadow page table for mappings of a physical page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPhysPage The guest page in question.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iShw The shadow page table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param cRefs The number of references made in that PT.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolTrackFlushGCPhysPTInt(PVM pVM, PCPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackFlushGCPhysPT: HCPhys=%RHp iShw=%d cRefs=%d\n", pPhysPage->HCPhys, iShw, cRefs));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Assert sanity.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsg(iShw < pPool->cCurPages && iShw != NIL_PGMPOOL_IDX, ("iShw=%d\n", iShw));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Then, clear the actual mappings to the page in the shadow PT.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const uint32_t u32 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
0d18f5b89ac6eb5d44c3e3d5453e55ab8cd7e804vboxsync Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX32 cRefs=%#x\n", i, pPT->a[i], cRefs));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPT->a[i].u = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync RTLogPrintf("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPT->a[i].u = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PX86PTPAE pPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);
8b90eb0585fa16024709ca374c69f1eb5d5a5a7cvboxsync for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX64 cRefs=%#x\n", i, pPT->a[i], cRefs));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPT->a[i].u = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync RTLogPrintf("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPT->a[i].u = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d u64=%RX64\n", cRefs, pPage->iFirstPresent, pPage->cPresent, u64));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PEPTPT pPT = (PEPTPT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX64 cRefs=%#x\n", i, pPT->a[i], cRefs));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPT->a[i].u = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync RTLogPrintf("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPT->a[i].u = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync AssertFatalMsgFailed(("enmKind=%d iShw=%d\n", pPage->enmKind, iShw));
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Scans one shadow page table for mappings of a physical page.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param pVM The VM handle.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param pPhysPage The guest page in question.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param iShw The shadow page table.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param cRefs The number of references made in that PT.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsyncvoid pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs)
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool);
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync LogFlow(("pgmPoolTrackFlushGCPhysPT: HCPhys=%RHp iShw=%d cRefs=%d\n", pPhysPage->HCPhys, iShw, cRefs));
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPT, f);
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, iShw, cRefs);
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync pPhysPage->HCPhys &= MM_RAM_FLAGS_NO_REFS_MASK; /** @todo PAGE FLAGS */
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPT, f);
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * Flushes a list of shadow page tables mapping the same physical page.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param pVM The VM handle.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param pPhysPage The guest page in question.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync * @param iPhysExt The physical cross reference extent list to flush.
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsyncvoid pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt)
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTs, f);
3e4bc3e32bc6ac59335fae7115d09f7a2ca9dc4bvboxsync LogFlow(("pgmPoolTrackFlushGCPhysPTs: HCPhys=%RHp iPhysExt\n", pPhysPage->HCPhys, iPhysExt));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, pPhysExt->aidx[i], 1);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* insert the list into the free list and clear the ram range entry. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPhysPage->HCPhys &= MM_RAM_FLAGS_NO_REFS_MASK; /** @todo PAGE FLAGS */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTs, f);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * Scans all shadow page tables for mappings of a physical page.
0db6a029780d9f9b347500e117320a8d5661efe5vboxsync * This may be slow, but it's most likely more efficient than cleaning
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * out the entire page pool / cache.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS if all references has been successfully cleared.
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * @retval VINF_PGM_GCPHYS_ALIASED if we're better off with a CR3 sync and
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * a page pool cleaning.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPhysPage The guest page in question.
da3503c04ce76e653401396fe2795a9bc2427a1dvboxsyncint pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage)
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTsSlow, s);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: cUsedPages=%d cPresent=%d HCPhys=%RHp\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->cUsedPages, pPool->cPresent, pPhysPage->HCPhys));
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync * There is a limit to what makes sense.
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: giving up... (cPresent=%d)\n", pPool->cPresent));
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync * Iterate all the pages until we've encountered all that in use.
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync * This is simple but not quite optimal solution.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * We only care about shadow page tables.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX32\n", iPage, i, pPT->a[i]));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPT->a[i].u = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PX86PTPAE pPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i]));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPT->a[i].u = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPhysPage->HCPhys &= MM_RAM_FLAGS_NO_REFS_MASK; /** @todo PAGE FLAGS */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clears the user entry in a user table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This is used to remove all references to a page when flushing it.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolTrackClearPageUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PCPGMPOOLUSER pUser)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Map the user page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPAGE pUserPage = &pPool->aPages[pUser->iUser];
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Must translate the fake 2048 entry PD to a 512 PD one since the R0 mapping is not linear. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pUserPage = &pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + iPdpt];
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pUserPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pUserPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Safety precaution in case we change the paging for other modes too in the future. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) != pPage->Core.Key);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Some sanity checks.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(!(u.pau32[iUserTable] & PGM_PDFLAGS_MAPPING));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(iUserTable < 2048 && pUser->iUser == PGMPOOL_IDX_PAE_PD);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsg(!(u.pau64[iUserTable] & PGM_PDFLAGS_MAPPING), ("%llx %d\n", u.pau64[iUserTable], iUserTable));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(!(u.pau64[iUserTable] & PGM_PDFLAGS_MAPPING));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* GCPhys >> PAGE_SHIFT is the index here */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsgFailed(("enmKind=%d\n", pUserPage->enmKind));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* VBOX_STRICT */
9d8f91b1f808d2e9d0594c41b6d89e18e05ecdcevboxsync * Clear the entry in the user page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* 32-bit entries */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* 64-bit entries */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("enmKind=%d iUser=%#x iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clears all users of a page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolTrackClearPageUsers(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Free all the user records.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Clear enter in user table. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackClearPageUser(pPool, pPage, &paUsers[i]);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Free it. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Next. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Allocates a new physical cross reference extent.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns Pointer to the allocated extent on success. NULL if we're out of them.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param piPhysExt Where to store the phys ext index.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncPPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_COUNTER_INC(&pPool->StamTrackPhysExtAllocFailures);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Frees a physical cross reference extent.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iPhysExt The extent to free.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncvoid pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Frees a physical cross reference extent.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iPhysExt The extent to free.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncvoid pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Insert a reference into a list of physical cross reference extents.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns The new ram range flags (top 16-bits).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param iPhysExt The physical extent index of the list head.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param iShwPT The shadow page table index.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic uint16_t pgmPoolTrackPhysExtInsert(PVM pVM, uint16_t iPhysExt, uint16_t iShwPT)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync /* special common case. */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync if (paPhysExts[iPhysExt].aidx[2] == NIL_PGMPOOL_IDX)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_COUNTER_INC(&pVM->pgm.s.StatTrackAliasedMany);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync LogFlow(("pgmPoolTrackPhysExtAddref: %d:{,,%d}\n", iPhysExt, iShwPT));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync return iPhysExt | (MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync /* general treatment. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (paPhysExts[iPhysExt].aidx[i] == NIL_PGMPOOL_IDX)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_COUNTER_INC(&pVM->pgm.s.StatTrackAliasedMany);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackPhysExtAddref: %d:{%d} i=%d cMax=%d\n", iPhysExt, iShwPT, i, cMax));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return iPhysExtStart | (MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackPhysExtAddref: overflow (1) iShwPT=%d\n", iShwPT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return MM_RAM_FLAGS_IDX_OVERFLOWED | (MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* add another extent to the list. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPHYSEXT pNew = pgmPoolTrackPhysExtAlloc(pVM, &iPhysExt);
996f6011d1bafd9dd0ebfd07bf8821eff20491dfvboxsync return MM_RAM_FLAGS_IDX_OVERFLOWED | (MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
996f6011d1bafd9dd0ebfd07bf8821eff20491dfvboxsync LogFlow(("pgmPoolTrackPhysExtAddref: added new extent %d:{%d}->%d\n", iPhysExt, iShwPT, iPhysExtStart));
996f6011d1bafd9dd0ebfd07bf8821eff20491dfvboxsync return iPhysExt | (MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
996f6011d1bafd9dd0ebfd07bf8821eff20491dfvboxsync * Add a reference to guest physical page where extents are in use.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns The new ram range flags (top 16-bits).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param u16 The ram range flags (top 16-bits).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iShwPT The shadow page table index.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncuint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) != MM_RAM_FLAGS_CREFS_PHYSEXT)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Convert to extent list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert((u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) == 1);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPHYSEXT pPhysExt = pgmPoolTrackPhysExtAlloc(pVM, &iPhysExt);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackPhysExtAddref: new extent: %d:{%d, %d}\n", iPhysExt, u16 & MM_RAM_FLAGS_IDX_MASK, iShwPT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync u16 = iPhysExt | (MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync u16 = MM_RAM_FLAGS_IDX_OVERFLOWED | (MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync else if (u16 != (MM_RAM_FLAGS_IDX_OVERFLOWED | (MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT))))
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Insert into the extent list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync u16 = pgmPoolTrackPhysExtInsert(pVM, u16 & MM_RAM_FLAGS_IDX_MASK, iShwPT);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_COUNTER_INC(&pVM->pgm.s.StatTrackAliasedLots);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear references to guest physical memory.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPhysPage Pointer to the aPages entry in the ram range.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncvoid pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMPAGE pPhysPage)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const unsigned cRefs = pPhysPage->HCPhys >> MM_RAM_FLAGS_CREFS_SHIFT; /** @todo PAGE FLAGS */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsg(cRefs == MM_RAM_FLAGS_CREFS_PHYSEXT, ("cRefs=%d HCPhys=%RHp pPage=%p:{.idx=%d}\n", cRefs, pPhysPage->HCPhys, pPage, pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uint16_t iPhysExt = (pPhysPage->HCPhys >> MM_RAM_FLAGS_IDX_SHIFT) & MM_RAM_FLAGS_IDX_MASK;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Look for the shadow page and check if it's all freed.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (paPhysExts[iPhysExt].aidx[i] != NIL_PGMPOOL_IDX)
996f6011d1bafd9dd0ebfd07bf8821eff20491dfvboxsync LogFlow(("pgmPoolTrackPhysExtDerefGCPhys: HCPhys=%RX64 idx=%d\n", pPhysPage->HCPhys, pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* we can free the node. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const uint16_t iPhysExtNext = paPhysExts[iPhysExt].iNext;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* lonely node */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackPhysExtDerefGCPhys: HCPhys=%RX64 idx=%d lonely\n", pPhysPage->HCPhys, pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPhysPage->HCPhys &= MM_RAM_FLAGS_NO_REFS_MASK; /** @todo PAGE FLAGS */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync else if (iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackPhysExtDerefGCPhys: HCPhys=%RX64 idx=%d head\n", pPhysPage->HCPhys, pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPhysPage->HCPhys = (pPhysPage->HCPhys & MM_RAM_FLAGS_NO_REFS_MASK) /** @todo PAGE FLAGS */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync | ((uint64_t)MM_RAM_FLAGS_CREFS_PHYSEXT << MM_RAM_FLAGS_CREFS_SHIFT)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync | ((uint64_t)iPhysExtNext << MM_RAM_FLAGS_IDX_SHIFT);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* in list */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackPhysExtDerefGCPhys: HCPhys=%RX64 idx=%d\n", pPhysPage->HCPhys, pPage->idx));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("not-found! cRefs=%d HCPhys=%RHp pPage=%p:{.idx=%d}\n", cRefs, pPhysPage->HCPhys, pPage, pPage->idx));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync else /* nothing to do */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolTrackPhysExtDerefGCPhys: HCPhys=%RX64\n", pPhysPage->HCPhys));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear references to guest physical memory.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This is the same as pgmPoolTracDerefGCPhys except that the guest physical address
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * is assumed to be correct, so the linear search can be skipped and we can assert
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * at an earlier point.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param HCPhys The host physical address corresponding to the guest page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhys The guest physical address corresponding to HCPhys.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolTracDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhys)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Walk range list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* does it match? */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncRTHCPHYS HCPhysPage = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncLog(("pgmPoolTracDerefGCPhys %RHp vs %RHp\n", HCPhysPage, HCPhys));
75ef08b33f9c67a8dd50748ece1117aed8098d51vboxsync if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage]);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp\n", HCPhys, GCPhys));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear references to guest physical memory.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param HCPhys The host physical address corresponding to the guest page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhysHint The guest physical address which may corresponding to HCPhys.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Walk range list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* does it match? */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage]);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Damn, the hint didn't work. We'll have to do an expensive linear search.
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync STAM_COUNTER_INC(&pPool->StatTrackLinearRamSearches);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync while (iPage-- > 0)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log4(("pgmPoolTracDerefGCPhysHint: Linear HCPhys=%RHp GCPhysHint=%RGp GCPhysReal=%RGp\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync HCPhys, GCPhysHint, pRam->GCPhys + (iPage << PAGE_SHIFT)));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage]);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("HCPhys=%RHp GCPhysHint=%RGp\n", HCPhys, GCPhysHint));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear references to guest physical memory in a 32-bit / 32-bit page table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * @param pShwPT The shadow page table (mapping of the page).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pGstPT The guest page table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(void) pgmPoolTrackDerefPT32Bit32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PT pShwPT, PCX86PT pGstPT)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log4(("pgmPoolTrackDerefPT32Bit32Bit: i=%d pte=%RX32 hint=%RX32\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync i, pShwPT->a[i].u & X86_PTE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear references to guest physical memory in a PAE / 32-bit page table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pShwPT The shadow page table (mapping of the page).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pGstPT The guest page table (just a half one).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(void) pgmPoolTrackDerefPTPae32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PT pGstPT)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++)
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync Log4(("pgmPoolTrackDerefPTPae32Bit: i=%d pte=%RX32 hint=%RX32\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * Clear references to guest physical memory in a PAE / PAE page table.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPool The pool.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPage The page.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pShwPT The shadow page table (mapping of the page).
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pGstPT The guest page table.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsyncDECLINLINE(void) pgmPoolTrackDerefPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX32 hint=%RX32\n",
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK));
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * Clear references to guest physical memory in a 32-bit / 4MB page table.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPool The pool.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPage The page.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pShwPT The shadow page table (mapping of the page).
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsyncDECLINLINE(void) pgmPoolTrackDerefPT32Bit4MB(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PT pShwPT)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync Log4(("pgmPoolTrackDerefPT32Bit4MB: i=%d pte=%RX32 GCPhys=%RGp\n",
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & X86_PTE_PG_MASK, GCPhys);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * Clear references to guest physical memory in a PAE / 2/4MB page table.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPool The pool.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPage The page.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pShwPT The shadow page table (mapping of the page).
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsyncDECLINLINE(void) pgmPoolTrackDerefPTPaeBig(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync Log4(("pgmPoolTrackDerefPTPaeBig: i=%d pte=%RX64 hint=%RGp\n",
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, GCPhys);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * Clear references to shadowed pages in a PAE (legacy or 64 bits) page directory.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPool The pool.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPage The page.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pShwPD The shadow page directory (mapping of the page).
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsyncDECLINLINE(void) pgmPoolTrackDerefPDPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPAE pShwPD)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PAE_PG_MASK);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync AssertFatalMsgFailed(("%RX64\n", pShwPD->a[i].u & X86_PDE_PAE_PG_MASK));
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * Clear references to shadowed pages in a 64-bit page directory pointer table.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPool The pool.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPage The page.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsyncDECLINLINE(void) pgmPoolTrackDerefPDPT64Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPT pShwPDPT)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & X86_PDPE_PG_MASK));
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * Clear references to shadowed pages in a 64-bit level 4 page table.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPool The pool.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPage The page.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pShwPML4 The shadow page directory pointer table (mapping of the page).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncDECLINLINE(void) pgmPoolTrackDerefPML464Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PML4 pShwPML4)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPML4->a); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPML4->a[i].u & X86_PDPE_PG_MASK);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync AssertFatalMsgFailed(("%RX64\n", pShwPML4->a[i].u & X86_PML4E_PG_MASK));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * Clear references to shadowed pages in an EPT page table.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPage The page.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pShwPML4 The shadow page directory pointer table (mapping of the page).
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsyncDECLINLINE(void) pgmPoolTrackDerefPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPT pShwPT)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync Log4(("pgmPoolTrackDerefPTEPT: i=%d pte=%RX64 GCPhys=%RX64\n",
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync i, pShwPT->a[i].u & EPT_PTE_PG_MASK, pPage->GCPhys));
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & EPT_PTE_PG_MASK, GCPhys);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * Clear references to shadowed pages in an EPT page directory.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPool The pool.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pPage The page.
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync * @param pShwPD The shadow page directory (mapping of the page).
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsyncDECLINLINE(void) pgmPoolTrackDerefPDEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPD pShwPD)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & EPT_PDE_PG_MASK);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync AssertFatalMsgFailed(("%RX64\n", pShwPD->a[i].u & EPT_PDE_PG_MASK));
2255e4c50ad0baa1a293a35a61e893633b7f7727vboxsync /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear references to shadowed pages in an EPT page directory pointer table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param pPage The page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsyncDECLINLINE(void) pgmPoolTrackDerefPDPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPDPT pShwPDPT)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Clears all references made by this page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * This includes other shadow pages and GC physical addresses.
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync * @param pPool The pool.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param pPage The page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsyncstatic void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Map the shadow page and take action according to the page kind.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pgmPoolTrackDerefPT32Bit32Bit(pPool, pPage, (PX86PT)pvShw, (PCX86PT)pvGst);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync int rc = PGM_GCPHYS_2_PTR_EX(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pgmPoolTrackDerefPTPae32Bit(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PT)pvGst);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pgmPoolTrackDerefPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync case PGMPOOLKIND_32BIT_PT_FOR_PHYS: /* treat it like a 4 MB page */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pgmPoolTrackDerefPT32Bit4MB(pPool, pPage, (PX86PT)pvShw);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync case PGMPOOLKIND_PAE_PT_FOR_PHYS: /* treat it like a 2 MB page */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackDerefPTPaeBig(pPool, pPage, (PX86PTPAE)pvShw);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#else /* !PGMPOOL_WITH_GCPHYS_TRACKING */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* !PGMPOOL_WITH_GCPHYS_TRACKING */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackDerefPDPae(pPool, pPage, (PX86PDPAE)pvShw);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackDerefPDPT64Bit(pPool, pPage, (PX86PDPT)pvShw);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackDerefPML464Bit(pPool, pPage, (PX86PML4)pvShw);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackDerefPTEPT(pPool, pPage, (PEPTPT)pvShw);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackDerefPDEPT(pPool, pPage, (PEPTPD)pvShw);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolTrackDerefPDPTEPT(pPool, pPage, (PEPTPDPT)pvShw);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* paranoia, clear the shadow page. Remove this laser (i.e. let Alloc and ClearAll do it). */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#endif /* PGMPOOL_WITH_USER_TRACKING */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flushes all the special root pages as part of a pgmPoolFlushAllInt operation.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmPoolFlushAllSpecialRoots(PPGMPOOL pPool)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * These special pages are all mapped into the indexes 1..PGMPOOL_IDX_FIRST.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Get the page address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Mark stuff not present.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned iPage = 0; iPage < X86_PG_ENTRIES; iPage++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((u.pau32[iPage] & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == X86_PDE_P)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned iPage = 0; iPage < X86_PG_PAE_ENTRIES; iPage++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ((u.pau64[iPage] & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == X86_PDE_P)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync /* Not root of shadowed pages currently, ignore it. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Paranoia (to be removed), flag a global CR3 sync.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flushes the entire cache.
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * It will assert a global CR3 flush (FF) and assumes the caller is aware of this
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * and execute this CR3 flush.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPool The pool.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * If there are no pages in the pool, there is nothing to do.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Start a subset so we won't run out of mapping space. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Nuke the free list and reinsert all pages into it.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = pPool->cCurPages - 1; i >= PGMPOOL_IDX_FIRST; i--)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->Core.Key == MMPage2Phys(pPool->pVMR3, pPage->pvPageR3));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPage->fZeroed = false; /* This could probably be optimized, but better safe than sorry. */
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync pPool->aPages[pPool->cCurPages - 1].iNext = NIL_PGMPOOL_IDX;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Zap and reinitialize the user records.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < cMaxUsers; i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync paUsers[cMaxUsers - 1].iNext = NIL_PGMPOOL_USER_INDEX;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear all the GCPhys links and rebuild the phys ext free list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync while (iPage-- > 0)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pRam->aPages[iPage].HCPhys &= MM_RAM_FLAGS_NO_REFS_MASK; /** @todo PAGE FLAGS */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < cMaxPhysExts; i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Just zap the modified list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Clear the GCPhys hash and the age list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flush all the special root pages.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Reinsert active pages into the hash and ensure monitoring chains are correct.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = PGMPOOL_IDX_FIRST_SPECIAL; i < PGMPOOL_IDX_FIRST; i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* ASSUMES that we're not sharing with any of the other special pages (safe for now). */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pPage),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pPage),
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPage->iUserHead == NIL_PGMPOOL_USER_INDEX); /* for now */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Pop the subset. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Finally, assert the FF.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Flushes a pool page.
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * This moves the page to the free list after removing all user references to it.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * In GC this will cause a CR3 reload if the page is traced back to an active root page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS on success.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_PGM_POOL_CLEARED if the deregistration of the physical handler will cause a light weight pool flush.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param pPool The pool.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param HCPhys The HC physical address of the shadow page.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsyncint pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%d, .GCPhys=%RGp}\n",
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pPage, pPage->Core.Key, pPage->idx, pPage->enmKind, pPage->GCPhys));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Quietly reject any attempts at flushing any of the special root pages.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync Log(("pgmPoolFlushPage: special root page, rejected. enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync if (PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) == pPage->Core.Key)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync AssertMsg(pPage->enmKind == PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4,
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync /* Start a subset so we won't run out of mapping space. */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Mark the page as being in need of a ASMMemZeroPage().
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Clear the page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Flush it from the cache.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync#endif /* PGMPOOL_WITH_CACHE */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync /* Heavy stuff done. */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Deregistering the monitoring.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Free the page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Frees a usage of a pool page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * The caller is responsible to updating the user table so that it no longer
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * references the shadow page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param pPool The pool.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param HCPhys The HC physical address of the shadow page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param iUser The shadow page pool index of the user table.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param iUserTable The index into the user table (shadowed).
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsyncvoid pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync LogFlow(("pgmPoolFreeByPage: pPage=%p:{.Key=%RHp, .idx=%d, enmKind=%d} iUser=%#x iUserTable=%#x\n",
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pPage, pPage->Core.Key, pPage->idx, pPage->enmKind, iUser, iUserTable));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pgmPoolFlushPage(pPool, pPage); /* ASSUMES that VERR_PGM_POOL_CLEARED can be ignored here. */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Makes one or more free page free.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @returns VBox status code.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @retval VINF_SUCCESS on success.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param pPool The pool.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * @param iUser The user of the page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsyncstatic int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, uint16_t iUser)
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync LogFlow(("pgmPoolMakeMoreFreePages: iUser=%#x\n", iUser));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * If the pool isn't full grown yet, expand it.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync int rc = CTXALLMID(VMM, CallHost)(pPool->CTX_SUFF(pVM), VMMCALLHOST_PGM_POOL_GROW, 0);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Free one cached page.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Flush the pool.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * If we have tracking enabled, it should be possible to come up with
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * a cheap replacement strategy...
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync /* @todo incompatible with long mode paging (cr3 root will be flushed) */
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * Allocates a page from the pool.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * This page may actually be a cached page and not in need of any processing
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * on the callers part.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @returns VBox status code.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @retval VINF_SUCCESS if a NEW page was allocated.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @retval VINF_PGM_CACHED_PAGE if a CACHED page was returned.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param pVM The VM handle.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param GCPhys The GC physical address of the page we're gonna shadow.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * For 4MB and 2MB PD entries, it's the first address the
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * shadow PT is covering.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param enmKind The kind of mapping.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param iUser The shadow page pool index of the user table.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param iUserTable The index into the user table (shadowed).
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync * @param ppPage Where to store the pointer to the page. NULL is stored here on failure.
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsyncint pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync LogFlow(("pgmPoolAlloc: GCPhys=%RGp enmKind=%d iUser=%#x iUserTable=%#x\n", GCPhys, enmKind, iUser, iUserTable));
152d786a21a506f9e2a2e16ba8efdc2bcae133abvboxsync Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync int rc2 = pgmPoolCacheAlloc(pPool, GCPhys, enmKind, iUser, iUserTable, ppPage);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync LogFlow(("pgmPoolAlloc: cached returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d}\n", rc2, *ppPage, (*ppPage)->Core.Key, (*ppPage)->idx));
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync * Allocate a new one.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync Log(("pgmPoolMakeMoreFreePages failed with %Rrc -> return VERR_PGM_POOL_FLUSHED\n", rc));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync AssertReleaseReturn(iNew != NIL_PGMPOOL_IDX, VERR_INTERNAL_ERROR);
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync /* unlink the free head */
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Initialize it.
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync pPool->cUsedPages++; /* physical handler registration / pgmPoolTrackFlushGCPhysPTsSlow requirement. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pPage->fSeenNonGlobal = false; /* Set this to 'true' to disable this feature. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Insert into the tracking and cache. If this fails, free the page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc3 = pgmPoolTrackInsert(pPool, pPage, GCPhys, iUser, iUserTable);
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync Log(("pgmPoolAlloc: returns %Rrc (Insert)\n", rc3));
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync Log(("pgmPoolTrackInsert failed with %Rrc -> return VERR_PGM_POOL_FLUSHED\n", rc3));
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync#endif /* PGMPOOL_WITH_USER_TRACKING */
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * Commit the allocation, clear the page and return.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolAlloc: returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d, .fCached=%RTbool, .fMonitored=%RTbool}\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc, pPage, pPage->Core.Key, pPage->idx, pPage->fCached, pPage->fMonitored));
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * Frees a usage of a pool page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
4946f90c5c7016131555f0c925091d4ede6bdde0vboxsync * @param HCPhys The HC physical address of the shadow page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUser The shadow page pool index of the user table.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param iUserTable The index into the user table (shadowed).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncvoid pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("pgmPoolFree: HCPhys=%RHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Gets a in-use page in the pool by it's physical address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns Pointer to the page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param HCPhys The HC physical address of the shadow page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @remark This function will NEVER return NULL. It will assert if HCPhys is invalid.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncPPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** @todo profile this! */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMPOOLPAGE pPage = pgmPoolGetPage(pPool, HCPhys);
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync Log3(("pgmPoolGetPageByHCPhys: HCPhys=%RHp -> %p:{.idx=%d .GCPhys=%RGp .enmKind=%d}\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync HCPhys, pPage, pPage->idx, pPage->GCPhys, pPage->enmKind));
46a78ba0ce1d037aaed54f3df16ebd9c0b70ed39vboxsync * Flushes the entire cache.
dba0e7f8f385de972564b6917e305b8f53ea3480vboxsync * It will assert a global CR3 flush (FF) and assumes the caller is aware of this
dba0e7f8f385de972564b6917e305b8f53ea3480vboxsync * and execute this CR3 flush.
dba0e7f8f385de972564b6917e305b8f53ea3480vboxsync * @param pPool The pool.