PGM.cpp revision 4ca9552b3be9ce15c5c33f07c0146b287411d306
/* $Id$ */
/** @file
* PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
*/
/*
* Copyright (C) 2006 InnoTek Systemberatung GmbH
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License as published by the Free Software Foundation,
* in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
* distribution. VirtualBox OSE is distributed in the hope that it will
* be useful, but WITHOUT ANY WARRANTY of any kind.
*
* If you received this file as part of a commercial VirtualBox
* distribution, then only the terms of your commercial VirtualBox
* license agreement apply instead of the previous paragraph.
*/
/** @page pg_pgm PGM - The Page Manager and Monitor
*
*
*
* @section sec_pg_modes Paging Modes
*
* There are three memory contexts: Host Context (HC), Guest Context (GC)
* and intermediate context. When talking about paging HC can also be refered to
* as "host paging", and GC refered to as "shadow paging".
*
* We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
* is defined by the host operating system. The mode used in the shadow paging mode
* depends on the host paging mode and what the mode the guest is currently in. The
* following relation between the two is defined:
*
* @verbatim
Host > 32-bit | PAE | AMD64 |
Guest | | | |
==v================================
32-bit 32-bit PAE PAE
-------|--------|--------|--------|
PAE PAE PAE PAE
-------|--------|--------|--------|
AMD64 AMD64 AMD64 AMD64
-------|--------|--------|--------| @endverbatim
*
* All configuration except those in the diagonal (upper left) are expected to
* require special effort from the switcher (i.e. a bit slower).
*
*
*
*
* @section sec_pg_shw The Shadow Memory Context
*
*
* [..]
*
* Because of guest context mappings requires PDPTR and PML4 entries to allow
* writing on AMD64, the two upper levels will have fixed flags whatever the
* guest is thinking of using there. So, when shadowing the PD level we will
* calculate the effective flags of PD and all the higher levels. In legacy
* PAE mode this only applies to the PWT and PCD bits (the rest are
*
*
*
* @section sec_pg_int The Intermediate Memory Context
*
* The world switch goes thru an intermediate memory context which purpose it is
* to provide different mappings of the switcher code. All guest mappings are also
* present in this context.
*
* The switcher code is mapped at the same location as on the host, at an
* identity mapped location (physical equals virtual address), and at the
* hypervisor location.
*
* PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
* simplifies switching guest CPU mode and consistency at the cost of more
* code to do the work. All memory use for those page tables is located below
* 4GB (this includes page tables for guest context mappings).
*
*
* @subsection subsec_pg_int_gc Guest Context Mappings
*
* During assignment and relocation of a guest context mapping the intermediate
* memory context is used to verify the new location.
*
* Guest context mappings are currently restricted to below 4GB, for reasons
* of simplicity. This may change when we implement AMD64 support.
*
*
*
*
* @section sec_pg_misc Misc
*
* @subsection subsec_pg_misc_diff Differences Between Legacy PAE and Long Mode PAE
*
* The differences between legacy PAE and long mode PAE are:
* -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
* all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
* usual meanings while 6 is ignored (AMD). This means that upon switching to
* legacy PAE mode we'll have to clear these bits and when going to long mode
* they must be set. This applies to both intermediate and shadow contexts,
* however we don't need to do it for the intermediate one since we're
* executing with CR0.WP at that time.
* -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
* a page aligned one is required.
*/
/** Saved state data unit version. */
#define PGM_SAVED_STATE_VERSION 5
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PGM
#include "PGMInternal.h"
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
static PGMMODE pgmR3CalcShadowMode(PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
#ifdef VBOX_WITH_STATISTICS
#endif
#ifdef VBOX_WITH_DEBUGGER
/** @todo all but the two last commands must be converted to 'info'. */
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
#endif
/*******************************************************************************
* Global Variables *
*******************************************************************************/
#ifdef VBOX_WITH_DEBUGGER
/** Command descriptors. */
{
/* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
{ "pgmsyncalways", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSyncAlways, "", "Toggle permanent CR3 syncing." },
};
#endif
#if 1/// @todo ndef __AMD64__
/*
* Shadow - 32-bit mode
*/
#define PGM_SHW_TYPE PGM_TYPE_32BIT
#include "PGMShw.h"
/* Guest - real mode */
#define PGM_GST_TYPE PGM_TYPE_REAL
#include "PGMGst.h"
#include "PGMBth.h"
/* Guest - protected mode */
#define PGM_GST_TYPE PGM_TYPE_PROT
#include "PGMGst.h"
#include "PGMBth.h"
/* Guest - 32-bit mode */
#define PGM_GST_TYPE PGM_TYPE_32BIT
#include "PGMGst.h"
#include "PGMBth.h"
#endif /* !__AMD64__ */
/*
* Shadow - PAE mode
*/
#define PGM_SHW_TYPE PGM_TYPE_PAE
#include "PGMShw.h"
/* Guest - real mode */
#define PGM_GST_TYPE PGM_TYPE_REAL
#include "PGMBth.h"
/* Guest - protected mode */
#define PGM_GST_TYPE PGM_TYPE_PROT
#include "PGMBth.h"
/* Guest - 32-bit mode */
#define PGM_GST_TYPE PGM_TYPE_32BIT
#include "PGMBth.h"
/* Guest - PAE mode */
#define PGM_GST_TYPE PGM_TYPE_PAE
#include "PGMGst.h"
#include "PGMBth.h"
/*
* Shadow - AMD64 mode
*/
#define PGM_SHW_TYPE PGM_TYPE_AMD64
#include "PGMShw.h"
/* Guest - real mode */
#define PGM_GST_TYPE PGM_TYPE_REAL
#include "PGMBth.h"
/* Guest - protected mode */
#define PGM_GST_TYPE PGM_TYPE_PROT
#include "PGMBth.h"
/* Guest - AMD64 mode */
#define PGM_GST_TYPE PGM_TYPE_AMD64
#include "PGMGst.h"
#include "PGMBth.h"
/**
* Initiates the paging of VM.
*
* @returns VBox status code.
* @param pVM Pointer to VM structure.
*/
{
LogFlow(("PGMR3Init:\n"));
/*
* Assert alignment and sizes.
*/
/*
* Init the structure.
*/
{
}
/*
* Get the configured RAM size - to estimate saved state size.
*/
if (rc == VERR_CFGM_VALUE_NOT_FOUND)
else if (VBOX_SUCCESS(rc))
{
cbRam = 0;
}
else
{
return rc;
}
/*
* Register saved state data unit.
*/
if (VBOX_FAILURE(rc))
return rc;
/* Initialise PGM critical section. */
/*
* Trees
*/
if (VBOX_SUCCESS(rc))
{
/*
* Init the paging.
*/
}
if (VBOX_SUCCESS(rc))
{
/*
* Init the page pool.
*/
}
if (VBOX_SUCCESS(rc))
{
/*
* Info & statistics
*/
"Shows the current paging mode. "
"Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing's given.",
"Dumps all the entries in the top level paging table. No arguments.",
"Dumps all the physical address ranges. No arguments.",
"Dumps physical and virtual handlers. "
"Pass 'phys' or 'virt' as argument if only one kind is wanted.",
STAM_REL_REG(pVM, &pVM->pgm.s.cGuestModeChanges, STAMTYPE_COUNTER, "/PGM/cGuestModeChanges", STAMUNIT_OCCURENCES, "Number of guest mode changes.");
#ifdef VBOX_WITH_STATISTICS
#endif
#ifdef VBOX_WITH_DEBUGGER
/*
* Debugger commands.
*/
static bool fRegisteredCmds = false;
if (!fRegisteredCmds)
{
if (VBOX_SUCCESS(rc))
fRegisteredCmds = true;
}
#endif
return VINF_SUCCESS;
}
/* No cleanup necessary, MM frees all memory. */
return rc;
}
/**
* Init paging.
*
* Since we need to check what mode the host is operating in before we can choose
* the right paging functions for the host we have to delay this until R0 has
* been initialized.
*
* @returns VBox status code.
* @param pVM VM handle.
*/
{
/*
* Force a recalculation of modes and switcher so everyone gets notified.
*/
/*
* Allocate static mapping space for whatever the cr3 register
* points to and in the case of PAE mode to the 4 PDs.
*/
if (VBOX_FAILURE(rc))
{
return rc;
}
/*
* Allocate pages for the three possible intermediate contexts
* (AMD64, PAE and plain 32-Bit). We maintain all three contexts
* for the sake of simplicity. The AMD64 uses the PAE for the
* lower levels, making the total number of pages 11 (3 + 7 + 1).
*
* We assume that two page tables will be enought for the core code
* mappings (HC virtual and identity).
*/
{
AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n"));
return VERR_NO_PAGE_MEMORY;
}
AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePDPTR != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPTR & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK));
/*
* Initialize the pages, setting up the PML4 and PDPTR for repetitive 4GB action.
*/
{
}
{
pVM->pgm.s.pInterPaePDPTR64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
}
pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
/*
* Allocate pages for the three possible guest contexts (AMD64, PAE and plain 32-Bit).
* We allocate pages for all three posibilities to in order to simplify mappings and
* avoid resource failure during mode switches. So, we need to cover all levels of the
* of the first 4GB down to PD level.
* As with the intermediate context, AMD64 uses the PAE PDPTR and PDs.
*/
AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[0] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[1]);
AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[1] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[2]);
AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[2] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[3]);
{
AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n"));
return VERR_NO_PAGE_MEMORY;
}
/* get physical addresses. */
/*
* Initialize the pages, setting up the PML4 and PDPTR for action below 4GB.
*/
{
/* The flags will be corrected when entering and leaving long mode. */
}
/*
* Initialize paging workers and mode from current host mode
* and the guest running in real mode.
*/
{
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_PAE_NX:
break;
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL
if (ARCH_BITS != 64)
{
AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
}
#endif
break;
default:
}
if (VBOX_SUCCESS(rc))
if (VBOX_SUCCESS(rc))
{
LogFlow(("pgmR3InitPaging: returns successfully\n"));
#if HC_ARCH_BITS == 64
LogRel(("Debug: HCPhys32BitPD=%VHp aHCPhysPaePDs={%VHp,%VHp,%VHp,%VHp} HCPhysPaePDPTR=%VHp HCPhysPaePML4=%VHp\n",
pVM->pgm.s.HCPhys32BitPD, pVM->pgm.s.aHCPhysPaePDs[0], pVM->pgm.s.aHCPhysPaePDs[1], pVM->pgm.s.aHCPhysPaePDs[2], pVM->pgm.s.aHCPhysPaePDs[3],
LogRel(("Debug: HCPhysInterPD=%VHp HCPhysInterPaePDPTR=%VHp HCPhysInterPaePML4=%VHp\n",
LogRel(("Debug: apInterPTs={%VHp,%VHp} apInterPaePTs={%VHp,%VHp} apInterPaePDs={%VHp,%VHp,%VHp,%VHp} pInterPaePDPTR64=%VHp\n",
MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
#endif
return VINF_SUCCESS;
}
return rc;
}
#ifdef VBOX_WITH_STATISTICS
/**
* Init statistics
*/
{
STAM_REG(pVM, &pPGM->StatGCInvalidatePage, STAMTYPE_PROFILE, "/PGM/GC/InvalidatePage", STAMUNIT_TICKS_PER_CALL, "PGMGCInvalidatePage() profiling.");
STAM_REG(pVM, &pPGM->StatGCInvalidatePage4KBPages, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4KBPages", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a 4KB page.");
STAM_REG(pVM, &pPGM->StatGCInvalidatePage4MBPages, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4MBPages", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a 4MB page.");
STAM_REG(pVM, &pPGM->StatGCInvalidatePage4MBPagesSkip, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4MBPagesSkip",STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() skipped a 4MB page.");
STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDMappings, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDMappings", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a page directory containing mappings (no conflict).");
STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDNAs, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDNAs", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a not accessed page directory.");
STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDNPs, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDNPs", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a not present page directory.");
STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDOutOfSync", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for an out of sync page directory.");
STAM_REG(pVM, &pPGM->StatGCInvalidatePageSkipped, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/Skipped", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
STAM_REG(pVM, &pPGM->StatGCSyncPT, STAMTYPE_PROFILE, "/PGM/GC/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGCSyncPT() body.");
STAM_REG(pVM, &pPGM->StatGCAccessedPage, STAMTYPE_COUNTER, "/PGM/GC/AccessedPage", STAMUNIT_OCCURENCES, "The number of pages marked not present for accessed bit emulation.");
STAM_REG(pVM, &pPGM->StatGCDirtyPage, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Mark", STAMUNIT_OCCURENCES, "The number of pages marked read-only for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatGCDirtyPageBig, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/MarkBig", STAMUNIT_OCCURENCES, "The number of 4MB pages marked read-only for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatGCDirtyPageTrap, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Trap", STAMUNIT_OCCURENCES, "The number of traps generated for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatGCDirtyPageSkipped, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Skipped", STAMUNIT_OCCURENCES, "The number of pages already dirty or readonly.");
STAM_REG(pVM, &pPGM->StatGCDirtiedPage, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/SetDirty", STAMUNIT_OCCURENCES, "The number of pages marked dirty because of write accesses.");
STAM_REG(pVM, &pPGM->StatGCDirtyTrackRealPF, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/RealPF", STAMUNIT_OCCURENCES, "The number of real pages faults during dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatGCPageAlreadyDirty, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/AlreadySet", STAMUNIT_OCCURENCES, "The number of pages already marked dirty because of write accesses.");
STAM_REG(pVM, &pPGM->StatGCDirtyBitTracking, STAMTYPE_PROFILE, "/PGM/GC/DirtyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMTrackDirtyBit() body.");
STAM_REG(pVM, &pPGM->StatGCSyncPTAlloc, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Alloc", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() needed to allocate page tables.");
STAM_REG(pVM, &pPGM->StatGCSyncPTConflict, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Conflicts", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() detected conflicts.");
STAM_REG(pVM, &pPGM->StatGCSyncPTFailed, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Failed", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() failed.");
STAM_REG(pVM, &pPGM->StatGCTrap0e, STAMTYPE_PROFILE, "/PGM/GC/Trap0e", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGCTrap0eHandler() body.");
STAM_REG(pVM, &pPGM->StatCheckPageFault, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/CheckPageFault", STAMUNIT_TICKS_PER_CALL, "Profiling of checking for dirty/access emulation faults.");
STAM_REG(pVM, &pPGM->StatLazySyncPT, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of lazy page table syncing.");
STAM_REG(pVM, &pPGM->StatMapping, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/Mapping", STAMUNIT_TICKS_PER_CALL, "Profiling of checking virtual mappings.");
STAM_REG(pVM, &pPGM->StatOutOfSync, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/OutOfSync", STAMUNIT_TICKS_PER_CALL, "Profiling of out of sync page handling.");
STAM_REG(pVM, &pPGM->StatHandlers, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of checking handlers.");
STAM_REG(pVM, &pPGM->StatEIPHandlers, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/EIPHandlers", STAMUNIT_TICKS_PER_CALL, "Profiling of checking eip handlers.");
STAM_REG(pVM, &pPGM->StatTrap0eCSAM, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/CSAM", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is CSAM.");
STAM_REG(pVM, &pPGM->StatTrap0eDirtyAndAccessedBits, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/DirtyAndAccessedBits", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
STAM_REG(pVM, &pPGM->StatTrap0eGuestTrap, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/GuestTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a guest trap.");
STAM_REG(pVM, &pPGM->StatTrap0eHndPhys, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerPhysical", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a physical handler.");
STAM_REG(pVM, &pPGM->StatTrap0eHndVirt, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerVirtual",STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a virtual handler.");
STAM_REG(pVM, &pPGM->StatTrap0eHndUnhandled, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerUnhandled", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
STAM_REG(pVM, &pPGM->StatTrap0eMisc, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/Misc", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is not known.");
STAM_REG(pVM, &pPGM->StatTrap0eOutOfSync, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncHndPhys, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncHndPhys", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncHndVirt, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncHndVirt", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page.");
STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncObsHnd, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncObsHnd", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
STAM_REG(pVM, &pPGM->StatTrap0eSyncPT, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
STAM_REG(pVM, &pPGM->StatTrap0eMapHandler, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Mapping", STAMUNIT_OCCURENCES, "Number of traps due to access handlers in mappings.");
STAM_REG(pVM, &pPGM->StatHandlersOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/OutOfSync", STAMUNIT_OCCURENCES, "Number of traps due to out-of-sync handled pages.");
STAM_REG(pVM, &pPGM->StatHandlersPhysical, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Physical", STAMUNIT_OCCURENCES, "Number of traps due to physical access handlers.");
STAM_REG(pVM, &pPGM->StatHandlersVirtual, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Virtual", STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers.");
STAM_REG(pVM, &pPGM->StatHandlersVirtualByPhys, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/VirtualByPhys", STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers by physical address.");
STAM_REG(pVM, &pPGM->StatHandlersVirtualUnmarked, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/VirtualUnmarked", STAMUNIT_OCCURENCES,"Number of traps due to virtual access handlers by virtual address (without proper physical flags).");
STAM_REG(pVM, &pPGM->StatHandlersUnhandled, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Unhandled", STAMUNIT_OCCURENCES, "Number of traps due to access outside range of monitored page(s).");
STAM_REG(pVM, &pPGM->StatGCTrap0eConflicts, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Conflicts", STAMUNIT_OCCURENCES, "The number of times #PF was caused by an undetected conflict.");
STAM_REG(pVM, &pPGM->StatGCTrap0eUSNotPresentRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/NPRead", STAMUNIT_OCCURENCES, "Number of user mode not present read page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eUSNotPresentWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/NPWrite", STAMUNIT_OCCURENCES, "Number of user mode not present write page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eUSWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Write", STAMUNIT_OCCURENCES, "Number of user mode write page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eUSReserved, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Reserved", STAMUNIT_OCCURENCES, "Number of user mode reserved bit page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eUSRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Read", STAMUNIT_OCCURENCES, "Number of user mode read page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eSVNotPresentRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/NPRead", STAMUNIT_OCCURENCES, "Number of supervisor mode not present read page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eSVNotPresentWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/NPWrite", STAMUNIT_OCCURENCES, "Number of supervisor mode not present write page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eSVWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/Write", STAMUNIT_OCCURENCES, "Number of supervisor mode write page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eSVReserved, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/Reserved", STAMUNIT_OCCURENCES, "Number of supervisor mode reserved bit page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eUnhandled, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/GuestPF/Unhandled", STAMUNIT_OCCURENCES, "Number of guest real page faults.");
STAM_REG(pVM, &pPGM->StatGCTrap0eMap, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/GuestPF/Map", STAMUNIT_OCCURENCES, "Number of guest page faults due to map accesses.");
STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteHandled, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteInt", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 change was successfully handled.");
STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteUnhandled, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteEmu", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 change was passed back to the recompiler.");
STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteConflict, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteConflict", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 monitoring detected a conflict.");
STAM_REG(pVM, &pPGM->StatGCPageOutOfSyncSupervisor, STAMTYPE_COUNTER, "/PGM/GC/OutOfSync/SuperVisor", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync.");
STAM_REG(pVM, &pPGM->StatGCPageOutOfSyncUser, STAMTYPE_COUNTER, "/PGM/GC/OutOfSync/User", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync.");
STAM_REG(pVM, &pPGM->StatGCGuestROMWriteHandled, STAMTYPE_COUNTER, "/PGM/GC/ROMWriteInt", STAMUNIT_OCCURENCES, "The number of times the Guest ROM change was successfully handled.");
STAM_REG(pVM, &pPGM->StatGCGuestROMWriteUnhandled, STAMTYPE_COUNTER, "/PGM/GC/ROMWriteEmu", STAMUNIT_OCCURENCES, "The number of times the Guest ROM change was passed back to the recompiler.");
STAM_REG(pVM, &pPGM->StatDynMapCacheHits, STAMTYPE_COUNTER, "/PGM/GC/DynMapCache/Hits" , STAMUNIT_OCCURENCES, "Number of dynamic page mapping cache hits.");
STAM_REG(pVM, &pPGM->StatDynMapCacheMisses, STAMTYPE_COUNTER, "/PGM/GC/DynMapCache/Misses" , STAMUNIT_OCCURENCES, "Number of dynamic page mapping cache misses.");
STAM_REG(pVM, &pPGM->StatHCDetectedConflicts, STAMTYPE_COUNTER, "/PGM/HC/DetectedConflicts", STAMUNIT_OCCURENCES, "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
STAM_REG(pVM, &pPGM->StatHCGuestPDWrite, STAMTYPE_COUNTER, "/PGM/HC/PDWrite", STAMUNIT_OCCURENCES, "The total number of times pgmHCGuestPDWriteHandler() was called.");
STAM_REG(pVM, &pPGM->StatHCGuestPDWriteConflict, STAMTYPE_COUNTER, "/PGM/HC/PDWriteConflict", STAMUNIT_OCCURENCES, "The number of times pgmHCGuestPDWriteHandler() detected a conflict.");
STAM_REG(pVM, &pPGM->StatHCInvalidatePage, STAMTYPE_PROFILE, "/PGM/HC/InvalidatePage", STAMUNIT_TICKS_PER_CALL, "PGMHCInvalidatePage() profiling.");
STAM_REG(pVM, &pPGM->StatHCInvalidatePage4KBPages, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4KBPages", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a 4KB page.");
STAM_REG(pVM, &pPGM->StatHCInvalidatePage4MBPages, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4MBPages", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a 4MB page.");
STAM_REG(pVM, &pPGM->StatHCInvalidatePage4MBPagesSkip, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4MBPagesSkip",STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() skipped a 4MB page.");
STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDMappings, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDMappings", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a page directory containing mappings (no conflict).");
STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDNAs, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDNAs", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a not accessed page directory.");
STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDNPs, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDNPs", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a not present page directory.");
STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDOutOfSync", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for an out of sync page directory.");
STAM_REG(pVM, &pPGM->StatHCInvalidatePageSkipped, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/Skipped", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
STAM_REG(pVM, &pPGM->StatHCResolveConflict, STAMTYPE_PROFILE, "/PGM/HC/ResolveConflict", STAMUNIT_TICKS_PER_CALL, "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
STAM_REG(pVM, &pPGM->StatHCPrefetch, STAMTYPE_PROFILE, "/PGM/HC/Prefetch", STAMUNIT_TICKS_PER_CALL, "PGMR3PrefetchPage profiling.");
STAM_REG(pVM, &pPGM->StatHCSyncPT, STAMTYPE_PROFILE, "/PGM/HC/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMR3SyncPT() body.");
STAM_REG(pVM, &pPGM->StatHCAccessedPage, STAMTYPE_COUNTER, "/PGM/HC/AccessedPage", STAMUNIT_OCCURENCES, "The number of pages marked not present for accessed bit emulation.");
STAM_REG(pVM, &pPGM->StatHCDirtyPage, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Mark", STAMUNIT_OCCURENCES, "The number of pages marked read-only for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatHCDirtyPageBig, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/MarkBig", STAMUNIT_OCCURENCES, "The number of 4MB pages marked read-only for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatHCDirtyPageTrap, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Trap", STAMUNIT_OCCURENCES, "The number of traps generated for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatHCDirtyPageSkipped, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Skipped", STAMUNIT_OCCURENCES, "The number of pages already dirty or readonly.");
STAM_REG(pVM, &pPGM->StatHCDirtyBitTracking, STAMTYPE_PROFILE, "/PGM/HC/DirtyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMTrackDirtyBit() body.");
STAM_REG(pVM, &pPGM->StatGCSyncPagePDNAs, STAMTYPE_COUNTER, "/PGM/GC/SyncPagePDNAs", STAMUNIT_OCCURENCES, "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
STAM_REG(pVM, &pPGM->StatGCSyncPagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/SyncPagePDOutOfSync", STAMUNIT_OCCURENCES, "The number of time we've encountered an out-of-sync PD in SyncPage.");
STAM_REG(pVM, &pPGM->StatHCSyncPagePDNAs, STAMTYPE_COUNTER, "/PGM/HC/SyncPagePDNAs", STAMUNIT_OCCURENCES, "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
STAM_REG(pVM, &pPGM->StatHCSyncPagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/HC/SyncPagePDOutOfSync", STAMUNIT_OCCURENCES, "The number of time we've encountered an out-of-sync PD in SyncPage.");
STAM_REG(pVM, &pPGM->StatFlushTLB, STAMTYPE_PROFILE, "/PGM/FlushTLB", STAMUNIT_OCCURENCES, "Profiling of the PGMFlushTLB() body.");
STAM_REG(pVM, &pPGM->StatFlushTLBNewCR3, STAMTYPE_COUNTER, "/PGM/FlushTLB/NewCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
STAM_REG(pVM, &pPGM->StatFlushTLBNewCR3Global, STAMTYPE_COUNTER, "/PGM/FlushTLB/NewCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
STAM_REG(pVM, &pPGM->StatFlushTLBSameCR3, STAMTYPE_COUNTER, "/PGM/FlushTLB/SameCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
STAM_REG(pVM, &pPGM->StatFlushTLBSameCR3Global, STAMTYPE_COUNTER, "/PGM/FlushTLB/SameCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
STAM_REG(pVM, &pPGM->StatGCSyncCR3, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() body.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3Handlers, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() update handler section.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3HandlerVirtualUpdate, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers/VirtualUpdate",STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler updates.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3HandlerVirtualReset, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers/VirtualReset", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler resets.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3Global, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/Global", STAMUNIT_OCCURENCES, "The number of global CR3 syncs.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3NotGlobal, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/NotGlobal", STAMUNIT_OCCURENCES, "The number of non-global CR3 syncs.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3DstCacheHit, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstChacheHit", STAMUNIT_OCCURENCES, "The number of times we got some kind of a cache hit.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3DstFreed, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstFreed", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3DstFreedSrcNP, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstFreedSrcNP", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry for which the source entry was not present.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3DstNotPresent, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstNotPresent", STAMUNIT_OCCURENCES, "The number of times we've encountered a not present shadow entry for a present guest entry.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3DstSkippedGlobalPD, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstSkippedGlobalPD", STAMUNIT_OCCURENCES, "The number of times a global page directory wasn't flushed.");
STAM_REG(pVM, &pPGM->StatGCSyncCR3DstSkippedGlobalPT, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstSkippedGlobalPT", STAMUNIT_OCCURENCES, "The number of times a page table with only global entries wasn't flushed.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() body.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3Handlers, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() update handler section.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3HandlerVirtualUpdate, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers/VirtualUpdate",STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler updates.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3HandlerVirtualReset, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers/VirtualReset", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler resets.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3Global, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/Global", STAMUNIT_OCCURENCES, "The number of global CR3 syncs.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3NotGlobal, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/NotGlobal", STAMUNIT_OCCURENCES, "The number of non-global CR3 syncs.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3DstCacheHit, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstChacheHit", STAMUNIT_OCCURENCES, "The number of times we got some kind of a cache hit.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3DstFreed, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstFreed", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3DstFreedSrcNP, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstFreedSrcNP", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry for which the source entry was not present.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3DstNotPresent, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstNotPresent", STAMUNIT_OCCURENCES, "The number of times we've encountered a not present shadow entry for a present guest entry.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3DstSkippedGlobalPD, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstSkippedGlobalPD", STAMUNIT_OCCURENCES, "The number of times a global page directory wasn't flushed.");
STAM_REG(pVM, &pPGM->StatHCSyncCR3DstSkippedGlobalPT, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstSkippedGlobalPT", STAMUNIT_OCCURENCES, "The number of times a page table with only global entries wasn't flushed.");
STAM_REG(pVM, &pPGM->StatVirtHandleSearchByPhysGC, STAMTYPE_PROFILE, "/PGM/VirtHandler/SearchByPhys/GC", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmHandlerVirtualFindByPhysAddr in GC.");
STAM_REG(pVM, &pPGM->StatVirtHandleSearchByPhysHC, STAMTYPE_PROFILE, "/PGM/VirtHandler/SearchByPhys/HC", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmHandlerVirtualFindByPhysAddr in HC.");
STAM_REG(pVM, &pPGM->StatHandlePhysicalReset, STAMTYPE_COUNTER, "/PGM/HC/HandlerPhysicalReset", STAMUNIT_OCCURENCES, "The number of times PGMR3HandlerPhysicalReset is called.");
STAM_REG(pVM, &pPGM->StatHCGstModifyPage, STAMTYPE_PROFILE, "/PGM/HC/GstModifyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGstModifyPage() body.");
STAM_REG(pVM, &pPGM->StatGCGstModifyPage, STAMTYPE_PROFILE, "/PGM/GC/GstModifyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGstModifyPage() body.");
STAM_REG(pVM, &pPGM->StatSynPT4kGC, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/4k", STAMUNIT_OCCURENCES, "Nr of 4k PT syncs");
STAM_REG(pVM, &pPGM->StatSynPT4kHC, STAMTYPE_COUNTER, "/PGM/HC/SyncPT/4k", STAMUNIT_OCCURENCES, "Nr of 4k PT syncs");
STAM_REG(pVM, &pPGM->StatSynPT4MGC, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/4M", STAMUNIT_OCCURENCES, "Nr of 4M PT syncs");
STAM_REG(pVM, &pPGM->StatSynPT4MHC, STAMTYPE_COUNTER, "/PGM/HC/SyncPT/4M", STAMUNIT_OCCURENCES, "Nr of 4M PT syncs");
STAM_REG(pVM, &pPGM->StatDynRamTotal, STAMTYPE_COUNTER, "/PGM/RAM/TotalAlloc", STAMUNIT_MEGABYTES, "Allocated mbs of guest ram.");
STAM_REG(pVM, &pPGM->StatDynRamGrow, STAMTYPE_COUNTER, "/PGM/RAM/Grow", STAMUNIT_OCCURENCES, "Nr of pgmr3PhysGrowRange calls.");
#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
STAM_REG(pVM, &pPGM->StatTrackVirgin, STAMTYPE_COUNTER, "/PGM/Track/Virgin", STAMUNIT_OCCURENCES, "The number of first time shadowings");
STAM_REG(pVM, &pPGM->StatTrackAliased, STAMTYPE_COUNTER, "/PGM/Track/Aliased", STAMUNIT_OCCURENCES, "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
STAM_REG(pVM, &pPGM->StatTrackAliasedMany, STAMTYPE_COUNTER, "/PGM/Track/AliasedMany", STAMUNIT_OCCURENCES, "The number of times we're tracking using cRef2.");
STAM_REG(pVM, &pPGM->StatTrackAliasedLots, STAMTYPE_COUNTER, "/PGM/Track/AliasedLots", STAMUNIT_OCCURENCES, "The number of times we're hitting pages which has overflowed cRef2");
STAM_REG(pVM, &pPGM->StatTrackOverflows, STAMTYPE_COUNTER, "/PGM/Track/Overflows", STAMUNIT_OCCURENCES, "The number of times the extent list grows to long.");
STAM_REG(pVM, &pPGM->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Track/Deref", STAMUNIT_OCCURENCES, "Profiling of SyncPageWorkerTrackDeref (expensive).");
#endif
for (unsigned i = 0; i < PAGE_ENTRIES; i++)
{
/** @todo r=bird: We need a STAMR3RegisterF()! */
char szName[32];
int rc = STAMR3Register(pVM, &pPGM->StatGCTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of traps in page directory n.");
rc = STAMR3Register(pVM, &pPGM->StatGCSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of syncs per PD n.");
rc = STAMR3Register(pVM, &pPGM->StatGCSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of out of sync pages per page directory n.");
}
}
#endif /* VBOX_WITH_STATISTICS */
/**
* Init the PGM bits that rely on VMMR0 and MM to be fully initialized.
*
* The dynamic mapping area will also be allocated and initialized at this
* time. We could allocate it during PGMR3Init of course, but the mapping
* wouldn't be allocated at that time preventing us from setting up the
* page table entries with the dummy page.
*
* @returns VBox status code.
* @param pVM VM handle.
*/
{
/*
* Reserve space for mapping the paging pages into guest context.
*/
int rc = MMR3HyperReserve(pVM, PAGE_SIZE * (2 + ELEMENTS(pVM->pgm.s.apHCPaePDs) + 1 + 2 + 2), "Paging", &pVM->pgm.s.pGC32BitPD);
/*
* Reserve space for the dynamic mappings.
*/
/** @todo r=bird: Need to verify that the checks for crossing PTs are correct here. They seems to be assuming 4MB PTs.. */
rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping", &pVM->pgm.s.pbDynPageMapBaseGC);
if ( VBOX_SUCCESS(rc)
&& (pVM->pgm.s.pbDynPageMapBaseGC >> PGDIR_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> PGDIR_SHIFT))
rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping not crossing", &pVM->pgm.s.pbDynPageMapBaseGC);
if (VBOX_SUCCESS(rc))
{
AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> PGDIR_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> PGDIR_SHIFT));
}
return rc;
}
/**
* Ring-3 init finalizing.
*
* @returns VBox status code.
* @param pVM The VM handle.
*/
{
/*
* Map the paging pages into the guest context.
*/
{
}
/* A bit of paranoia is justified. */
AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[0] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[1]);
AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[1] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[2]);
AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[2] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[3]);
/*
* Reserve space for the dynamic mappings.
* Initialize the dynamic mapping pages with dummy pages to simply the cache.
*/
/* get the pointer to the page table entries. */
pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTGC + iPG * sizeof(pMapping->aPTs[0].pPTHC->a[0]);
pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsGC + iPG * sizeof(pMapping->aPTs[0].paPaePTsHC->a[0]);
/* init cache */
for (unsigned i = 0; i < MM_HYPER_DYNAMIC_SIZE; i += PAGE_SIZE)
{
}
return rc;
}
/**
* Applies relocations to data and code managed by this
* component. This function will be called at init and
* whenever the VMM need to relocate it self inside the GC.
*
* @param pVM The VM.
* @param offDelta Relocation delta relative to old location.
*/
{
LogFlow(("PGMR3Relocate\n"));
/*
* Paging stuff.
*/
/** @todo move this into shadow and guest specific relocation functions. */
/*
* Trees.
*/
/*
* Ram ranges.
*/
{
{
if (pCur->pavHCChunkGC)
}
}
/*
* Update the two page directories with all page table mappings.
* (One or more of them have changed, that's why we're here.)
*/
/* Relocate GC addresses of Page Tables. */
{
{
}
}
/*
* Dynamic page mapping area.
*/
/*
* Physical and virtual handlers.
*/
RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, true, pgmR3RelocatePhysHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesHC->VirtHandlers, true, pgmR3RelocateVirtHandler, &offDelta);
/*
* The page pool.
*/
}
/**
* Callback function for relocating a physical access handler.
*
* @returns 0 (continue enum)
* @param pNode Pointer to a PGMPHYSHANDLER node.
* @param pvUser Pointer to the offDelta. This is a pointer to the delta since we're
* not certain the delta will fit in a void pointer for all possible configs.
*/
{
return 0;
}
/**
* Callback function for relocating a virtual access handler.
*
* @returns 0 (continue enum)
* @param pNode Pointer to a PGMVIRTHANDLER node.
* @param pvUser Pointer to the offDelta. This is a pointer to the delta since we're
* not certain the delta will fit in a void pointer for all possible configs.
*/
{
return 0;
}
/**
* The VM is being reset.
*
* For the PGM component this means that any PD write monitors
* needs to be removed.
*
* @param pVM VM handle.
*/
{
LogFlow(("PGMR3Reset:\n"));
/*
* Unfix any fixed mappings and disable CR3 monitoring.
*/
#ifdef DEBUG
#endif
/*
* Reset the shadow page pool.
*/
/*
* Re-init other members.
*/
/*
* Clear the FFs PGM owns.
*/
/*
* Zero memory.
*/
{
while (iPage-- > 0)
{
if (pRam->aHCPhys[iPage] & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2))
{
Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aHCPhys[iPage] & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO)));
continue;
}
{
ASMMemZero32((char *)pRam->pavHCChunkHC[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
}
else
}
}
/*
* Switch mode back to real mode.
*/
}
/**
* Terminates the PGM.
*
* @returns VBox status code.
* @param pVM Pointer to VM structure.
*/
{
}
/**
* Execute state save operation.
*
* @returns VBox status code.
* @param pVM VM Handle.
* @param pSSM SSM operation handle.
*/
{
/*
* Save basic data (required / unaffected by relocation).
*/
#if 1
#else
#endif
/*
* The guest mappings.
*/
uint32_t i = 0;
{
SSMR3PutU32(pSSM, i);
/* flags are done by the mapping owners! */
}
/*
* Ram range flags and bits.
*/
i = 0;
{
/** @todo MMIO ranges may move (PCI reconfig), we currently assume they don't. */
SSMR3PutU32(pSSM, i);
/* Flags. */
/* any memory associated with the range. */
{
{
{
}
else
}
}
{
if (VBOX_FAILURE(rc))
{
return rc;
}
}
}
}
/**
* Execute state load operation.
*
* @returns VBox status code.
* @param pVM VM Handle.
* @param pSSM SSM operation handle.
* @param u32Version Data layout version.
*/
{
/*
* Validate version.
*/
if (u32Version != PGM_SAVED_STATE_VERSION)
{
Log(("pgmR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, PGM_SAVED_STATE_VERSION));
}
/*
* Call the reset function to make sure all the memory is cleared.
*/
/*
* Load basic data (required / unaffected by relocation).
*/
#if 1
#else
uint32_t u;
SSMR3GetU32(pSSM, &u);
pPGM->fMappingsFixed = u;
#endif
if (VBOX_FAILURE(rc))
return rc;
/* check separator. */
if (VBOX_FAILURE(rc))
return rc;
{
}
/*
* The guest mappings.
*/
uint32_t i = 0;
for (;; i++)
{
/* Check the seqence number / separator. */
if (VBOX_FAILURE(rc))
return rc;
if (u32Sep == ~0U)
break;
if (u32Sep != i)
{
}
/* get the mapping details. */
char szDesc[256];
szDesc[0] = '\0';
if (VBOX_FAILURE(rc))
return rc;
if (VBOX_FAILURE(rc))
return rc;
/* find matching range. */
break;
if (!pMapping)
{
LogRel(("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%VGv)\n",
AssertFailed();
return VERR_SSM_LOAD_CONFIG_MISMATCH;
}
/* relocate it. */
{
#if HC_ARCH_BITS == 64
#endif
}
else
}
/*
* Ram range flags and bits.
*/
i = 0;
{
/** @todo MMIO ranges may move (PCI reconfig), we currently assume they don't. */
/* Check the seqence number / separator. */
if (VBOX_FAILURE(rc))
return rc;
if (u32Sep == ~0U)
break;
if (u32Sep != i)
{
}
/* Get the range details. */
if (VBOX_FAILURE(rc))
return rc;
if (fHaveBits & ~1)
{
}
/* Match it up with the current range. */
{
LogRel(("Ram range: %VGp-%VGp %VGp bytes %s\n"
"State : %VGp-%VGp %VGp bytes %s\n",
AssertFailed();
return VERR_SSM_LOAD_CONFIG_MISMATCH;
}
/* Flags. */
{
u16 &= PAGE_OFFSET_MASK & ~( MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_VIRTUAL_ALL
}
/* any memory associated with the range. */
{
{
if (VBOX_FAILURE(rc))
return rc;
if (fValidChunk > 1)
if (fValidChunk)
{
{
if (VBOX_FAILURE(rc))
return rc;
}
}
/* else nothing to do */
}
}
{
if (VBOX_FAILURE(rc))
{
return rc;
}
}
}
/*
* We require a full resync now.
*/
pPGM->fPhysCacheFlushPending = true;
/*
* Change the paging mode.
*/
}
/**
* Show paging mode.
*
* @param pVM VM Handle.
* @param pHlp The info helpers.
* @param pszArgs "all" (default), "guest", "shadow" or "host".
*/
{
/* digest argument. */
if (pszArgs)
else
{
fGuest = true;
fShadow = true;
fHost = true;
}
/* print info. */
if (fGuest)
if (fShadow)
if (fHost)
{
const char *psz;
{
default: psz = "unknown"; break;
}
}
}
/**
* Dump registered MMIO ranges to the log.
*
* @param pVM VM Handle.
* @param pHlp The info helpers.
* @param pszArgs Arguments, ignored.
*/
{
"RAM ranges (pVM=%p)\n"
"%.*s %.*s\n",
pVM,
"%VGp-%VGp %VHv\n",
}
/**
* Dump the page directory to the log.
*
* @param pVM VM Handle.
* @param pHlp The info helpers.
* @param pszArgs Arguments, ignored.
*/
{
/** @todo fix this! Convert the PGMR3DumpHierarchyHC functions to do guest stuff. */
/* Big pages supported? */
/* Global pages supported? */
/*
* Get page directory addresses.
*/
Assert(MMPhysGCPhys2HCVirt(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
/*
* Iterate the page directory.
*/
{
{
{
"%04X - %VGp P=%d U=%d RW=%d G=%d - BIG\n",
iPD,
PdeSrc.u & X86_PDE_PG_MASK,
}
else
{
"%04X - %VGp P=%d U=%d RW=%d [G=%d]\n",
iPD,
PdeSrc.u & X86_PDE4M_PG_MASK,
}
}
}
}
/**
* Serivce a VMMCALLHOST_PGM_LOCK call.
*
* @returns VBox status code.
* @param pVM The VM handle.
*/
{
}
/**
* Converts a PGMMODE value to a PGM_TYPE_* \#define.
*
* @returns PGM_TYPE_*.
* @param pgmMode The mode value to convert.
*/
{
switch (pgmMode)
{
case PGMMODE_REAL: return PGM_TYPE_REAL;
case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
case PGMMODE_PAE:
case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
default:
}
}
/**
* Gets the index into the paging mode data array of a SHW+GST mode.
*
* @returns PGM::paPagingData index.
* @param uShwType The shadow paging mode type.
* @param uGstType The guest paging mode type.
*/
{
+ (uGstType - PGM_TYPE_REAL);
}
/**
* Gets the index into the paging mode data array of a SHW+GST mode.
*
* @returns PGM::paPagingData index.
* @param enmShw The shadow paging mode.
* @param enmGst The guest paging mode.
*/
{
}
/**
* Calculates the max data index.
* @returns The number of entries in the pagaing data array.
*/
DECLINLINE(unsigned) pgmModeDataMaxIndex(void)
{
}
/**
* Initializes the paging mode data kept in PGM::paModeData.
*
* @param pVM The VM handle.
* @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
* This is used early in the init process to avoid trouble with PDM
* not being initialized yet.
*/
{
int rc;
/*
* Allocate the array on the first call.
*/
{
pVM->pgm.s.paModeData = (PPGMMODEDATA)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMMODEDATA) * pgmModeDataMaxIndex());
}
/*
* Initialize the array entries.
*/
return VINF_SUCCESS;
}
/**
* Swtich to different (or relocated in the relocate case) mode data.
*
* @param pVM The VM handle.
* @param enmShw The the shadow paging mode.
* @param enmGst The the guest paging mode.
*/
{
/* shadow */
/* guest */
/* both */
#ifdef VBOX_STRICT
#endif
#ifdef VBOX_STRICT
#endif
#ifdef VBOX_STRICT
#endif
}
#ifdef DEBUG_bird
#include <stdlib.h> /* getenv() remove me! */
#endif
/**
* Calculates the shadow paging mode.
*
* @returns The shadow paging mode.
* @param enmGuestMode The guest mode.
* @param enmHostMode The host mode.
* @param enmShadowMode The current shadow mode.
* @param penmSwitcher Where to store the switcher to use.
* VMMSWITCHER_INVALID means no change.
*/
static PGMMODE pgmR3CalcShadowMode(PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher)
{
switch (enmGuestMode)
{
/*
* When switching to real or protected mode we don't change
* anything since it's likely that we'll switch back pretty soon.
*
* During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
* and is supposed to determin which shadow paging and switcher to
* use during init.
*/
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
if (enmShadowMode != PGMMODE_INVALID)
break; /* (no change) */
switch (enmHostMode)
{
case SUPPAGINGMODE_32_BIT:
break;
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
if (getenv("VBOX_32BIT"))
{
}
#endif
break;
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
break;
}
break;
case PGMMODE_32_BIT:
switch (enmHostMode)
{
case SUPPAGINGMODE_32_BIT:
break;
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
if (getenv("VBOX_32BIT"))
{
}
#endif
break;
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
break;
}
break;
case PGMMODE_PAE:
case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
switch (enmHostMode)
{
case SUPPAGINGMODE_32_BIT:
break;
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
break;
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
break;
}
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
switch (enmHostMode)
{
case SUPPAGINGMODE_32_BIT:
break;
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
break;
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
break;
}
break;
default:
return PGMMODE_INVALID;
}
return enmShadowMode;
}
/**
* Performs the actual mode change.
* This is called by PGMChangeMode and pgmR3InitPaging().
*
* @returns VBox status code.
* @param pVM VM handle.
* @param enmGuestMode The new guest mode. This is assumed to be different from
* the current mode.
*/
{
/*
* Calc the shadow mode and switcher.
*/
PGMMODE enmShadowMode = pgmR3CalcShadowMode(enmGuestMode, pVM->pgm.s.enmHostMode, pVM->pgm.s.enmShadowMode, &enmSwitcher);
if (enmSwitcher != VMMSWITCHER_INVALID)
{
/*
* Select new switcher.
*/
if (VBOX_FAILURE(rc))
{
return rc;
}
}
/*
* Exit old mode(s).
*/
/* shadow */
{
{
if (VBOX_FAILURE(rc))
{
return rc;
}
}
}
/* guest */
{
if (VBOX_FAILURE(rc))
{
return rc;
}
}
/*
* Load new paging mode data.
*/
/*
* Enter new shadow mode (if changed).
*/
{
int rc;
switch (enmShadowMode)
{
case PGMMODE_32_BIT:
break;
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
break;
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
default:
return VERR_INTERNAL_ERROR;
}
if (VBOX_FAILURE(rc))
{
return rc;
}
}
/*
* Enter the new guest and shadow+guest modes.
*/
int rc = -1;
int rc2 = -1;
switch (enmGuestMode)
{
case PGMMODE_REAL:
{
case PGMMODE_32_BIT:
break;
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
break;
default: AssertFailed(); break;
}
break;
case PGMMODE_PROTECTED:
{
case PGMMODE_32_BIT:
break;
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
break;
default: AssertFailed(); break;
}
break;
case PGMMODE_32_BIT:
{
case PGMMODE_32_BIT:
break;
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
AssertMsgFailed(("Should use PAE shadow mode!\n"));
default: AssertFailed(); break;
}
break;
//case PGMMODE_PAE_NX:
case PGMMODE_PAE:
{
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_32_BIT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
AssertMsgFailed(("Should use PAE shadow mode!\n"));
default: AssertFailed(); break;
}
break;
//case PGMMODE_AMD64_NX:
case PGMMODE_AMD64:
GCPhysCR3 = CPUMGetGuestCR3(pVM) & 0xfffffffffffff000ULL; /** @todo define this mask and make CR3 64-bit in this case! */
{
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
break;
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
AssertMsgFailed(("Should use AMD64 shadow mode!\n"));
default: AssertFailed(); break;
}
break;
default:
break;
}
/* status codes. */
if (VBOX_SUCCESS(rc))
{
rc = VINF_SUCCESS;
}
/*
* Notify SELM so it can update the TSSes with correct CR3s.
*/
/* Notify HWACCM as well. */
return rc;
}
/**
* Dumps a PAE shadow page table.
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param pPT Pointer to the page table.
* @param u64Address The virtual address of the page table starts.
* @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
* @param cMaxDepth The maxium depth.
* @param pHlp Pointer to the output functions.
*/
static int pgmR3DumpHierarchyHCPaePT(PVM pVM, PX86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
{
{
{
fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
? "%016llx 3 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx\n"
: "%08llx 2 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx\n",
Pte.u & X86_PTE_PAE_PG_MASK);
}
}
return VINF_SUCCESS;
}
/**
* Dumps a PAE shadow page directory table.
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param HCPhys The physical address of the page directory table.
* @param u64Address The virtual address of the page table starts.
* @param cr4 The CR4, PSE is currently used.
* @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
* @param cMaxDepth The maxium depth.
* @param pHlp Pointer to the output functions.
*/
static int pgmR3DumpHierarchyHCPaePD(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
{
if (!pPD)
{
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory at HCPhys=%#VHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
}
int rc = VINF_SUCCESS;
{
{
fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
? "%016llx 2 | P %c %c %c %c %c %s %s %s %s 4M %c%c%c %016llx\n"
: "%08llx 1 | P %c %c %c %c %c %s %s %s %s 4M %c%c%c %016llx\n",
Pde.u & X86_PDE_PAE_PG_MASK);
else
{
fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
? "%016llx 2 | P %c %c %c %c %c %s %s .. %s 4K %c%c%c %016llx\n"
: "%08llx 1 | P %c %c %c %c %c %s %s .. %s 4K %c%c%c %016llx\n",
Pde.u & X86_PDE_PAE_PG_MASK);
if (cMaxDepth >= 1)
{
/** @todo what about using the page pool for mapping PTs? */
if (!(Pde.u & PGM_PDFLAGS_MAPPING))
else
{
{
{
pHlp->pfnPrintf(pHlp, "%0*llx error! Mapping error! PT %d has HCPhysPT=%VHp not %VHp is in the PD.\n",
}
}
}
int rc2 = VERR_INVALID_PARAMETER;
if (pPT)
else
}
}
}
}
return rc;
}
/**
* Dumps a PAE shadow page directory pointer table.
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param HCPhys The physical address of the page directory pointer table.
* @param u64Address The virtual address of the page table starts.
* @param cr4 The CR4, PSE is currently used.
* @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
* @param cMaxDepth The maxium depth.
* @param pHlp Pointer to the output functions.
*/
static int pgmR3DumpHierarchyHCPaePDPTR(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
{
if (!pPDPTR)
{
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory pointer table at HCPhys=%#VHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
}
int rc = VINF_SUCCESS;
for (unsigned i = 0; i < c; i++)
{
{
if (fLongMode)
"%016llx 1 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
Pdpe.u & X86_PDPE_PG_MASK);
else
"%08x 0 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
i << X86_PDPTR_SHIFT,
Pdpe.u & X86_PDPE_PG_MASK);
if (cMaxDepth >= 1)
{
int rc2 = pgmR3DumpHierarchyHCPaePD(pVM, Pdpe.u & X86_PDPE_PG_MASK, u64Address + ((uint64_t)i << X86_PDPTR_SHIFT),
}
}
}
return rc;
}
/**
* Dumps a 32-bit shadow page table.
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param HCPhys The physical address of the table.
* @param cr4 The CR4, PSE is currently used.
* @param cMaxDepth The maxium depth.
* @param pHlp Pointer to the output functions.
*/
static int pgmR3DumpHierarchyHcPaePML4(PVM pVM, RTHCPHYS HCPhys, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
{
if (!pPML4)
{
pHlp->pfnPrintf(pHlp, "Page map level 4 at HCPhys=%#VHp was not found in the page pool!\n", HCPhys);
return VERR_INVALID_PARAMETER;
}
int rc = VINF_SUCCESS;
{
{
uint64_t u64Address = ((uint64_t)i << X86_PML4_SHIFT) | (((uint64_t)i >> (X86_PML4_SHIFT - X86_PDPTR_SHIFT - 1)) * 0xffff000000000000ULL);
"%016llx 0 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
Pml4e.u & X86_PML4E_PG_MASK);
if (cMaxDepth >= 1)
{
int rc2 = pgmR3DumpHierarchyHCPaePDPTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, u64Address, cr4, true, cMaxDepth - 1, pHlp);
}
}
}
return rc;
}
/**
* Dumps a 32-bit shadow page table.
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param pPT Pointer to the page table.
* @param u32Address The virtual address this table starts at.
* @param pHlp Pointer to the output functions.
*/
{
{
{
"%08x 1 | P %c %c %c %c %c %s %s %s .. 4K %c%c%c %08x\n",
u32Address + (i << X86_PT_SHIFT),
Pte.u & X86_PDE_PG_MASK);
}
}
return VINF_SUCCESS;
}
/**
* Dumps a 32-bit shadow page directory and page tables.
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param cr3 The root of the hierarchy.
* @param cr4 The CR4, PSE is currently used.
* @param cMaxDepth How deep into the hierarchy the dumper should go.
* @param pHlp Pointer to the output functions.
*/
int pgmR3DumpHierarchyHC32BitPD(PVM pVM, uint32_t cr3, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
{
if (!pPD)
{
pHlp->pfnPrintf(pHlp, "Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK);
return VERR_INVALID_PARAMETER;
}
int rc = VINF_SUCCESS;
{
{
"%08x 0 | P %c %c %c %c %c %s %s %s .. 4M %c%c%c %08x\n",
Pde.u & X86_PDE4M_PG_MASK);
else
{
"%08x 0 | P %c %c %c %c %c %s %s .. .. 4K %c%c%c %08x\n",
Pde.u & X86_PDE_PG_MASK);
if (cMaxDepth >= 1)
{
/** @todo what about using the page pool for mapping PTs? */
if (!(Pde.u & PGM_PDFLAGS_MAPPING))
else
{
{
pHlp->pfnPrintf(pHlp, "%08x error! Mapping error! PT %d has HCPhysPT=%VHp not %VHp is in the PD.\n",
}
}
int rc2 = VERR_INVALID_PARAMETER;
if (pPT)
else
pHlp->pfnPrintf(pHlp, "%08x error! Page table at %#x was not found in the page pool!\n", u32Address, HCPhys);
}
}
}
}
return rc;
}
/**
* Dumps a 32-bit shadow page table.
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param pPT Pointer to the page table.
* @param u32Address The virtual address this table starts at.
* @param PhysSearch Address to search for.
*/
{
{
{
Log(( /*P R S A D G WT CD AT NX 4M a m d */
"%08x 1 | P %c %c %c %c %c %s %s %s .. 4K %c%c%c %08x\n",
u32Address + (i << X86_PT_SHIFT),
Pte.u & X86_PDE_PG_MASK));
{
Log(("Found %VGp at %VGv -> flags=%llx\n", PhysSearch, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), fPageShw));
}
}
}
return VINF_SUCCESS;
}
/**
* Dumps a 32-bit guest page directory and page tables.
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param cr3 The root of the hierarchy.
* @param cr4 The CR4, PSE is currently used.
* @param PhysSearch Address to search for.
*/
{
bool fLongMode = false;
{
return VERR_INVALID_PARAMETER;
}
Log(("cr3=%08x cr4=%08x%s\n"
"%-*s P - Present\n"
"%-*s | R/W - Read (0) / Write (1)\n"
"%-*s | | U/S - User (1) / Supervisor (0)\n"
"%-*s | | | A - Accessed\n"
"%-*s | | | | D - Dirty\n"
"%-*s | | | | | G - Global\n"
"%-*s | | | | | | WT - Write thru\n"
"%-*s | | | | | | | CD - Cache disable\n"
"%-*s | | | | | | | | AT - Attribute table (PAT)\n"
"%-*s | | | | | | | | | NX - No execute (K8)\n"
"%-*s | | | | | | | | | | 4K/4M/2M - Page size.\n"
"%-*s | | | | | | | | | | | AVL - a=allocated; m=mapping; d=track dirty;\n"
"%-*s | | | | | | | | | | | | p=permanent; v=validated;\n"
"%-*s Level | | | | | | | | | | | | Page\n"
/* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
- W U - - - -- -- -- -- -- 010 */
{
{
Log(( /*P R S A D G WT CD AT NX 4M a m d */
"%08x 0 | P %c %c %c %c %c %s %s %s .. 4M %c%c%c %08x\n",
Pde.u & X86_PDE4M_PG_MASK));
/** @todo PhysSearch */
else
{
Log(( /*P R S A D G WT CD AT NX 4M a m d */
"%08x 0 | P %c %c %c %c %c %s %s .. .. 4K %c%c%c %08x\n",
Pde.u & X86_PDE_PG_MASK));
////if (cMaxDepth >= 1)
{
/** @todo what about using the page pool for mapping PTs? */
int rc2 = VERR_INVALID_PARAMETER;
if (pPT)
else
}
}
}
}
return rc;
}
/**
*
* @returns VBox status code (VINF_SUCCESS).
* @param pVM The VM handle.
* @param cr3 The root of the hierarchy.
* @param cr4 The cr4, only PAE and PSE is currently used.
* @param fLongMode Set if long mode, false if not long mode.
* @param cMaxDepth Number of levels to dump.
* @param pHlp Pointer to the output functions.
*/
PGMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
{
if (!pHlp)
pHlp = DBGFR3InfoLogHlp();
if (!cMaxDepth)
return VINF_SUCCESS;
"cr3=%08x cr4=%08x%s\n"
"%-*s P - Present\n"
"%-*s | R/W - Read (0) / Write (1)\n"
"%-*s | | U/S - User (1) / Supervisor (0)\n"
"%-*s | | | A - Accessed\n"
"%-*s | | | | D - Dirty\n"
"%-*s | | | | | G - Global\n"
"%-*s | | | | | | WT - Write thru\n"
"%-*s | | | | | | | CD - Cache disable\n"
"%-*s | | | | | | | | AT - Attribute table (PAT)\n"
"%-*s | | | | | | | | | NX - No execute (K8)\n"
"%-*s | | | | | | | | | | 4K/4M/2M - Page size.\n"
"%-*s | | | | | | | | | | | AVL - a=allocated; m=mapping; d=track dirty;\n"
"%-*s | | | | | | | | | | | | p=permanent; v=validated;\n"
"%-*s Level | | | | | | | | | | | | Page\n"
/* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
- W U - - - -- -- -- -- -- 010 */
if (cr4 & X86_CR4_PAE)
{
if (fLongMode)
return pgmR3DumpHierarchyHCPaePDPTR(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, 0, cr4, false, cMaxDepth, pHlp);
}
}
#ifdef VBOX_WITH_DEBUGGER
/**
* The '.pgmram' command.
*
* @returns VBox status.
* @param pCmd Pointer to the command descriptor (as registered).
* @param pCmdHlp Pointer to command helper functions.
* @param pVM Pointer to the current VM (if any).
* @param paArgs Pointer to (readonly) array of arguments.
* @param cArgs Number of arguments in the array.
*/
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
{
/*
* Validate input.
*/
if (!pVM)
/*
* Dump the ranges.
*/
{
"%VGp - %VGp %p\n",
if (VBOX_FAILURE(rc))
return rc;
}
return VINF_SUCCESS;
}
/**
* The '.pgmmap' command.
*
* @returns VBox status.
* @param pCmd Pointer to the command descriptor (as registered).
* @param pCmdHlp Pointer to command helper functions.
* @param pVM Pointer to the current VM (if any).
* @param paArgs Pointer to (readonly) array of arguments.
* @param cArgs Number of arguments in the array.
*/
static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
{
/*
* Validate input.
*/
if (!pVM)
/*
* Print message about the fixedness of the mappings.
*/
int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, pVM->pgm.s.fMappingsFixed ? "The mappings are FIXED.\n" : "The mappings are FLOATING.\n");
if (VBOX_FAILURE(rc))
return rc;
/*
* Dump the ranges.
*/
{
"%08x - %08x %s\n",
if (VBOX_FAILURE(rc))
return rc;
}
return VINF_SUCCESS;
}
/**
* The '.pgmsync' command.
*
* @returns VBox status.
* @param pCmd Pointer to the command descriptor (as registered).
* @param pCmdHlp Pointer to command helper functions.
* @param pVM Pointer to the current VM (if any).
* @param paArgs Pointer to (readonly) array of arguments.
* @param cArgs Number of arguments in the array.
*/
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
{
/*
* Validate input.
*/
if (!pVM)
/*
* Force page directory sync.
*/
if (VBOX_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
/**
* The '.pgmsyncalways' command.
*
* @returns VBox status.
* @param pCmd Pointer to the command descriptor (as registered).
* @param pCmdHlp Pointer to command helper functions.
* @param pVM Pointer to the current VM (if any).
* @param paArgs Pointer to (readonly) array of arguments.
* @param cArgs Number of arguments in the array.
*/
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
{
/*
* Validate input.
*/
if (!pVM)
/*
* Force page directory sync.
*/
{
}
else
{
}
}
#endif
/**
* pvUser argument of the pgmR3CheckIntegrity*Node callbacks.
*/
typedef struct PGMCHECKINTARGS
{
bool fLeftToRight; /**< true: left-to-right; false: right-to-left. */
/**
* Validate a node in the physical handler tree.
*
* @returns 0 on if ok, other wise 1.
* @param pNode The handler node.
* @param pvUser pVM.
*/
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
{
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGp-%VGp %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys->Core.KeyLast > pCur->Core.Key),
("pPrevPhys=%p %VGp-%VGp %s\n"
" pCur=%p %VGp-%VGp %s\n",
pArgs->pPrevPhys, pArgs->pPrevPhys->Core.Key, pArgs->pPrevPhys->Core.KeyLast, pArgs->pPrevPhys->pszDesc,
return 0;
}
/**
* Validate a node in the virtual handler tree.
*
* @returns 0 on if ok, other wise 1.
* @param pNode The handler node.
* @param pvUser pVM.
*/
static DECLCALLBACK(int) pgmR3CheckIntegrityVirtHandlerNode(PAVLROGCPTRNODECORE pNode, void *pvUser)
{
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGv-%VGv %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevVirt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevVirt->Core.KeyLast > pCur->Core.Key),
("pPrevVirt=%p %VGv-%VGv %s\n"
" pCur=%p %VGv-%VGv %s\n",
pArgs->pPrevVirt, pArgs->pPrevVirt->Core.Key, pArgs->pPrevVirt->Core.KeyLast, pArgs->pPrevVirt->pszDesc,
{
AssertReleaseMsg(pCur->aPhysToVirt[iPage].offVirtHandler == -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage]),
("pCur=%p %VGv-%VGv %s\n"
"iPage=%d offVirtHandle=%#x expected %#x\n",
}
return 0;
}
/**
* Validate a node in the virtual handler tree.
*
* @returns 0 on if ok, other wise 1.
* @param pNode The handler node.
* @param pvUser pVM.
*/
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysToVirtHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
{
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGp-%VGp\n", pCur, pCur->Core.Key, pCur->Core.KeyLast));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
("pPrevPhys2Virt=%p %VGp-%VGp\n"
" pCur=%p %VGp-%VGp\n",
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
("pPrevPhys2Virt=%p %VGp-%VGp\n"
" pCur=%p %VGp-%VGp\n",
AssertReleaseMsg((pCur->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD),
("pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
{
for (;;)
{
pCur2 = (PPGMPHYS2VIRTHANDLER)((intptr_t)pCur + (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
(" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
AssertReleaseMsg((pCur2->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == PGMPHYS2VIRTHANDLER_IN_TREE,
(" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
"pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
(" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
"pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
(" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
"pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
break;
}
}
return 0;
}
/**
* Perform an integrity check on the PGM component.
*
* @returns VINF_SUCCESS if everything is fine.
* @returns VBox error status after asserting on integrity breach.
* @param pVM The VM handle.
*/
{
/*
* Check the trees.
*/
int cErrors = 0;
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, true, pgmR3CheckIntegrityPhysHandlerNode, &Args);
Args.fLeftToRight = false;
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, false, pgmR3CheckIntegrityPhysHandlerNode, &Args);
Args.fLeftToRight = true;
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesHC->VirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
Args.fLeftToRight = false;
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesHC->VirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
Args.fLeftToRight = true;
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysToVirtHandlers, true, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
Args.fLeftToRight = false;
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysToVirtHandlers, false, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
}
/**
* Inform PGM if we want all mappings to be put into the shadow page table. (necessary for e.g. VMX)
*
* @returns VBox status code.
* @param pVM VM handle.
* @param fEnable Enable or disable shadow mappings
*/
{
/* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
return VINF_SUCCESS;
}