PGMInternal.h revision 5cb0f7646b848b0466664f25d55b5a88cae297ea
f7b81dcd1a01325f5ca2806c2694b8f1d3b9eb4cvboxsync * PGM - Internal header file.
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * Copyright (C) 2006-2013 Oracle Corporation
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
5e3a885d489b1c99d79d576813f8f321bae46927vboxsync * available from http://www.virtualbox.org. This file is free software;
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * you can redistribute it and/or modify it under the terms of the GNU
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * General Public License (GPL) as published by the Free Software
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync/** @defgroup grp_pgm_int Internals
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * @ingroup grp_pgm
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * @internal
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync/** @name PGM Compile Time Config
2599750883f13b2a38c421dc073df83e2f9cdae6vboxsync * Indicates that there are no guest mappings in the shadow tables.
2599750883f13b2a38c421dc073df83e2f9cdae6vboxsync * Note! In ring-3 the macro is also used to exclude the managment of the
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * intermediate context page tables. On 32-bit systems we use the intermediate
2599750883f13b2a38c421dc073df83e2f9cdae6vboxsync * context to support 64-bit guest execution. Thus, we cannot fully make it
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * without mappings there even when VBOX_WITH_RAW_MODE is not defined.
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * In raw-mode context there are by design always guest mappings (the code is
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * executed from one), while in ring-0 there are none at all. Neither context
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * manages the page tables for intermediate switcher context, that's all done in
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * On 32-bit darwin (hybrid kernel) we do 64-bit guest support differently, so
2599750883f13b2a38c421dc073df83e2f9cdae6vboxsync * there we can safely work without mappings if we don't compile in raw-mode.
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync#if defined(IN_RING0) \
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync || ( !defined(VBOX_WITH_RAW_MODE) \
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * Check and skip global PDEs for non-global flushes
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * Optimization for PAE page tables that are modified often
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync//#if 0 /* disabled again while debugging */
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * Large page support enabled only on 64 bits hosts; applies to nested paging only.
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * Enables optimizations for MMIO handlers that exploits X86_TRAP_PF_RSVD and
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * VMX_EXIT_EPT_MISCONFIG.
f74e35c55b43bdbc2d2ac21d61ac77ab764deadcvboxsync * Sync N pages instead of a whole page table
b4b896baff58dad1f81179386beed73197bf59bevboxsync * Number of pages to sync during a page fault
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * Note that \#PFs are much more expensive in the VT-x/AMD-V case due to
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * world switch overhead, so let's sync more.
2599750883f13b2a38c421dc073df83e2f9cdae6vboxsync/* Chose 32 based on the compile test in @bugref{4219}; 64 shows worse stats.
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * 32 again shows better results than 16; slightly more overhead in the \#PF handler,
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * but ~5% fewer faults.
2599750883f13b2a38c421dc073df83e2f9cdae6vboxsync * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
2599750883f13b2a38c421dc073df83e2f9cdae6vboxsync/** @def PGMPOOL_CFG_MAX_GROW
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * The maximum number of pages to add to the pool in one go.
ebb998c68d059fb28283dd5631df33cbca044519vboxsync/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
b4b896baff58dad1f81179386beed73197bf59bevboxsync * Enables some extra assertions for virtual handlers (mainly phys2virt related).
ebb998c68d059fb28283dd5631df33cbca044519vboxsync/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * Enables the experimental lazy page allocation code. */
ebb998c68d059fb28283dd5631df33cbca044519vboxsync/*#define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
ebb998c68d059fb28283dd5631df33cbca044519vboxsync/** @def VBOX_WITH_REAL_WRITE_MONITORED_PAGES
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * Enables real write monitoring of pages, i.e. mapping them read-only and
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * only making them writable when getting a write access #PF. */
ebb998c68d059fb28283dd5631df33cbca044519vboxsync/** @name PDPT and PML4 flags.
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * These are placed in the three bits available for system programs in
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * the PDPT and PML4 entries.
b4b896baff58dad1f81179386beed73197bf59bevboxsync/** The entry is a permanent one and it's must always be present.
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * Never free such an entry. */
ebb998c68d059fb28283dd5631df33cbca044519vboxsync/** Mapping (hypervisor allocated pagetable). */
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync/** @name Page directory flags.
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * These are placed in the three bits available for system programs in
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * the page directory entries.
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync/** Mapping (hypervisor allocated pagetable). */
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync/** Made read-only to facilitate dirty bit tracking. */
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync/** @name Page flags.
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * These are placed in the three bits available for system programs in
b4b896baff58dad1f81179386beed73197bf59bevboxsync * the page entries.
b4b896baff58dad1f81179386beed73197bf59bevboxsync/** Made read-only to facilitate dirty bit tracking. */
ebb998c68d059fb28283dd5631df33cbca044519vboxsync/** Scanned and approved by CSAM (tm).
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * NOTE: Must be identical to the one defined in CSAMInternal.h!!
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/vmm/pgm.h. */
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync/** @name Defines used to indicate the shadow and guest paging in the templates.
ebb998c68d059fb28283dd5631df33cbca044519vboxsync/** Macro for checking if the guest is using paging.
18456bfb9f30d08e5347aa108a2fef0c49c0ab0bvboxsync * @param uGstType PGM_TYPE_*
ebb998c68d059fb28283dd5631df33cbca044519vboxsync * @param uShwType PGM_TYPE_*
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * @remark ASSUMES certain order of the PGM_TYPE_* values.
b4b896baff58dad1f81179386beed73197bf59bevboxsync/** Macro for checking if the guest supports the NX bit.
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * @param uGstType PGM_TYPE_*
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * @param uShwType PGM_TYPE_*
c0704390ccf86cde88ad1e69dedf77d3e57aa15avboxsync * @remark ASSUMES certain order of the PGM_TYPE_* values.
#define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) PGM_GCPHYS_2_PTR_V2((pVCpu)->CTX_SUFF(pVM), pVCpu, GCPhys, ppv)
# ifdef LOG_ENABLED
#ifdef IN_RC
#ifdef IN_RC
#ifdef IN_RC
#ifdef IN_RC
#ifdef IN_RC
typedef union PGMSHWPTEPAE
} PGMSHWPTEPAE;
# define PGMSHWPTEPAE_IS_P(Pte) ( ((Pte).uCareful & (X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == X86_PTE_P )
# define PGMSHWPTEPAE_IS_P_RW(Pte) ( ((Pte).uCareful & (X86_PTE_P | X86_PTE_RW | X86_PTE_PAE_MBZ_MASK_NX)) == (X86_PTE_P | X86_PTE_RW) )
# define PGMSHWPTEPAE_ATOMIC_SET(Pte, uVal) do { ASMAtomicWriteU64(&(Pte).uCareful, (uVal)); } while (0)
# define PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).uCareful, (Pte2).uCareful); } while (0)
typedef struct PGMSHWPTPAE
} PGMSHWPTPAE;
# define PGMSHWPTEPAE_IS_P_RW(Pte) ( ((Pte).u & (X86_PTE_P | X86_PTE_RW)) == (X86_PTE_P | X86_PTE_RW) )
# define PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
typedef struct PGMMAPPING
bool fFinalized;
} PGMMAPPING;
typedef struct PGMPHYSHANDLER
#ifdef VBOX_WITH_STATISTICS
typedef struct PGMPHYS2VIRTHANDLER
typedef struct PGMVIRTHANDLER
#ifdef VBOX_WITH_STATISTICS
typedef union PGMPAGE
} PGMPAGE;
PGM_PAGE_INIT((a_pPage), (a_pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (a_uType), PGM_PAGE_STATE_ZERO)
#define PGM_PAGE_STATE_ZERO 0
# define PGM_PAGE_GET_STATE(a_pPage) __extension__ ({ PGM_PAGE_ASSERT_LOCK(pVM); PGM_PAGE_GET_STATE_NA(a_pPage); })
# define PGM_PAGE_GET_HCPHYS(a_pPage) __extension__ ({ PGM_PAGE_ASSERT_LOCK(pVM); PGM_PAGE_GET_HCPHYS_NA(a_pPage); })
# define PGM_PAGE_GET_TYPE(a_pPage) __extension__ ({ PGM_PAGE_ASSERT_LOCK(pVM); PGM_PAGE_GET_TYPE_NA(a_pPage); })
#define PGM_PAGE_PDE_TYPE_DONTCARE 0
/** Can use a page directory entry to map the continuous range - temporarily disabled (by page monitoring). */
#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
# define PGM_PAGE_GET_TRACKING(a_pPage) __extension__ ({ PGM_PAGE_ASSERT_LOCK(pVM); PGM_PAGE_GET_TRACKING_NA(a_pPage); })
# define PGMLIVESAVERAMPAGE_WITH_CRC32
typedef struct PGMLIVESAVERAMPAGE
typedef struct PGMRAMRANGE
} PGMRAMRANGE;
(!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2) ) )
typedef struct PGMROMPAGE
bool fWrittenTo;
bool fDirty;
bool fDirtiedRecently;
} LiveSave;
} PGMROMPAGE;
typedef struct PGMROMRANGE
} PGMROMRANGE;
typedef struct PGMLIVESAVEMMIO2PAGE
bool fDirty;
bool fZero;
bool fReserved;
typedef struct PGMMMIO2RANGE
bool fMapped;
bool fOverlapping;
* PGMPhysRead/Write cache entry
typedef struct PGMPHYSCACHEENTRY
* PGMPhysRead/Write cache to reduce REM memory access overhead
typedef struct PGMPHYSCACHE
} PGMPHYSCACHE;
typedef struct PGMCHUNKR3MAP
void *pv;
typedef struct PGMCHUNKR3MAPTLBE
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
typedef struct PGMCHUNKR3MAPTLB
typedef struct PGMPAGER3MAPTLBE
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
typedef struct PGMPAGER3MAPTLB
typedef struct PGMRCDYNMAPENTRY
struct PGMRCDYNMAPENTRY_PPTE
} uPte;
* paDynPageMap* PGM members. However, it has to be defined in PGMInternal.h
typedef struct PGMRCDYNMAP
} PGMRCDYNMAP;
typedef struct PGMMAPSETENTRY
#ifndef IN_RC
typedef struct PGMMAPSET
} PGMMAPSET;
#ifdef IN_RC
typedef void * PPGMPAGEMAP;
typedef void ** PPPGMPAGEMAP;
#define NIL_PGMPOOL_IDX 0
typedef struct PGMPOOLUSER
#pragma pack()
typedef struct PGMPOOLPHYSEXT
#pragma pack()
typedef enum PGMPOOLKIND
PGMPOOLKIND_INVALID = 0,
} PGMPOOLKIND;
typedef struct PGMPOOLPAGE
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
bool fPadding2;
/** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages. */
# ifdef VBOX_STRICT
} PGMPOOLPAGE;
typedef struct PGMPOOL
bool fCacheEnabled;
#ifdef VBOX_WITH_STATISTICS
#ifdef VBOX_WITH_STATISTICS
# define PGMPOOL_PAGE_2_PTR(a_pVM, a_pPage) pgmPoolMapPageInlined((a_pVM), (a_pPage) RTLOG_COMMA_SRC_POS)
AssertReleaseMsg(RT_VALID_PTR(a_pPage->pvPageR3), ("enmKind=%d idx=%#x HCPhys=%RHp GCPhys=%RGp caller=%s\n", a_pPage->enmKind, a_pPage->idx, a_pPage->Core.Key, a_pPage->GCPhys, pszCaller));
# define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage) pgmPoolMapPageV2Inlined((a_pVM), (a_pVCpu), (a_pPage) RTLOG_COMMA_SRC_POS)
#define PGMPOOL_TD_IDX_SHIFT 0
#define PGM_WITH_A20
#ifdef PGM_WITH_A20
# define PGM_A20_ASSERT_MASKED(pVCpu, a_GCPhys) Assert(PGM_A20_APPLY(pVCpu, a_GCPhys) == (a_GCPhys))
typedef struct PGMTREES
} PGMTREES;
typedef struct PGMPTWALKCORE
bool fSucceeded;
bool fNotPresent;
bool fBadPhysAddr;
bool fRsvdError;
bool fBigPage;
bool fGigantPage;
bool fEffectiveUS;
bool fEffectiveRW;
bool fEffectiveNX;
typedef struct PGMPTWALKGSTAMD64
typedef struct PGMPTWALKGSTPAE
typedef struct PGMPTWALKGST32BIT
typedef enum PGMPTWALKGSTTYPE
typedef struct PGMPTWALKGST
} PGMPTWALKGST;
#ifdef IN_RC
# ifdef IN_RING3
typedef struct PGMMODEDATA
DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
#ifdef VBOX_STRICT
DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
#ifdef VBOX_STRICT
DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
#ifdef VBOX_STRICT
DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
#ifdef VBOX_WITH_STATISTICS
typedef struct PGMSTATS
STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */
STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */
STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate; /**< R3: Profiling of the virtual handler updates. */
STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
STAMCOUNTER StatR3PhysHandlerLookupHits; /**< R3: Number of cache hits when looking up physical handlers. */
STAMCOUNTER StatR3PhysHandlerLookupMisses; /**< R3: Number of cache misses when looking up physical handlers. */
STAMCOUNTER StatRZPhysHandlerLookupHits; /**< RC/R0: Number of cache hits when lookup up physical handlers. */
STAMCOUNTER StatRZPhysHandlerLookupMisses; /**< RC/R0: Number of cache misses when looking up physical handlers */
STAMPROFILE StatRZVirtHandlerSearchByPhys; /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
STAMPROFILE StatR3VirtHandlerSearchByPhys; /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
} PGMSTATS;
typedef struct PGM
bool fRamPreAlloc;
bool fNestedPaging;
bool fNoMorePhysWrites;
bool fPciPassthrough;
bool fFinalizedMappings;
bool fMappingsFixed;
bool fMappingsFixedRestored;
* @todo The plan of keeping PGMRCDYNMAP private to PGMRZDynMap.cpp didn't
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
uint32_t c;
* @cfgm PGM/MaxRing3Chunks */
} ChunkR3Map;
} Rom,
Ram;
bool fActive;
} LiveSave;
bool volatile fErrInjHandyPages;
uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero + Pure MMIO.) */
#ifdef VBOX_WITH_STATISTICS
} PGM;
typedef struct PGMCPUSTATS
STAMCOUNTER StatR0NpMiscfgSyncPage; /**< R0: SyncPage calls from PGMR0Trap0eHandlerNPMisconfig(). */
STAMPROFILE StatRZTrap0eTime2Ballooned; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is read access to a ballooned page. */
STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
STAMPROFILE StatRZTrap0eTime2HndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
STAMPROFILE StatRZTrap0eTime2InvalidPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access to an invalid physical guest address. */
STAMPROFILE StatRZTrap0eTime2MakeWritable; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a page that needed to be made writable. */
STAMPROFILE StatRZTrap0eTime2Mapping; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is the guest mappings. */
STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
STAMPROFILE StatRZTrap0eTime2WPEmulation; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP emulation. */
STAMPROFILE StatRZTrap0eTime2Wp0RoUsHack; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be enabled. */
STAMPROFILE StatRZTrap0eTime2Wp0RoUsUnhack; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CR0.WP and netware hack to be disabled. */
STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
STAMCOUNTER StatRZTrap0eHandlersPhysAll; /**< RC/R0: Number of traps due to physical all-access handlers. */
STAMCOUNTER StatRZTrap0eHandlersPhysAllOpt; /**< RC/R0: Number of the physical all-access handler traps using the optimization. */
STAMCOUNTER StatRZTrap0eHandlersPhysWrite; /**< RC/R0: Number of traps due to write-physical access handlers. */
STAMCOUNTER StatRZTrap0eHandlersVirtual; /**< RC/R0: Number of traps due to virtual access handlers. */
STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys; /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
STAMCOUNTER StatRZDynMapPageSlowLoopMisses; /**< RZ: Misses in the pgmR0DynMapPageSlow search loop. */
STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
STAMCOUNTER StatRZDirtyPageStale; /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
STAMCOUNTER StatRZPageOutOfSyncUserWrite; /**< RC/R0: The number of times user page is out of sync was detected in \#PF. */
STAMCOUNTER StatRZPageOutOfSyncSupervisorWrite; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF. */
STAMCOUNTER StatRZPageOutOfSyncBallloon; /**< RC/R0: The number of times a ballooned page was accessed (read). */
STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
STAMCOUNTER StatR3PageOutOfSyncUserWrite; /**< R3: The number of times user page is out of sync was detected in \#PF. */
STAMCOUNTER StatR3PageOutOfSyncSupervisorWrite; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF. */
STAMCOUNTER StatR3PageOutOfSyncBallloon; /**< R3: The number of times a ballooned page was accessed (read). */
STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
} PGMCPUSTATS;
typedef struct PGMCPU
bool fA20Enabled;
bool fNoExecuteEnabled;
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
/** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC.
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
} PGMCPU;
#define PGM_LOCK_ASSERT_OWNER_EX(a_pVM, a_pVCpu) Assert(PDMCritSectIsOwnerEx(&(a_pVM)->pgm.s.CritSectX, pVCpu))
#ifndef PGM_WITHOUT_MAPPINGS
int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, bool fDoAccounting);
int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock);
VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
#ifdef IN_RING3
DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, void *fpvFlushRemTbl);
int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);
# ifdef LOG_ENABLED
int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, bool fA20Enabled,
int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush = true /* DO NOT USE false UNLESS YOU KNOWN WHAT YOU'RE DOING!! */);
int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs);
void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint, uint16_t iPte);
uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, PPGMPAGE pPhysPage, uint16_t u16, uint16_t iShwPT, uint16_t iPte);
void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte);
void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite);
void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);
int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags);