PGMPool.cpp revision f18060397202298d86552c3c16a2f7473c852e5c
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * PGM Shadow Page Pool.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * available from http://www.virtualbox.org. This file is free software;
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * you can redistribute it and/or modify it under the terms of the GNU
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * General Public License (GPL) as published by the Free Software
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * additional information or have any questions.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync/** @page pg_pgm_pool PGM Shadow Page Pool
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Motivations:
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# Relationship between shadow page tables and physical guest pages. This
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * should allow us to skip most of the global flushes now following access
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * handler changes. The main expense is flushing shadow pages.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# Limit the pool size if necessary (default is kind of limitless).
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# Allocate shadow pages from RC. We use to only do this in SyncCR3.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# Required for 64-bit guests.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# Combining the PD cache and page pool in order to simplify caching.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * @section sec_pgm_pool_outline Design Outline
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * The shadow page pool tracks pages used for shadowing paging structures (i.e.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * page tables, page directory, page directory pointer table and page map
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * level-4). Each page in the pool has an unique identifier. This identifier is
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * used to link a guest physical page to a shadow PT. The identifier is a
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * non-zero value and has a relativly low max value - say 14 bits. This makes it
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * possible to fit it into the upper bits of the of the aHCPhys entries in the
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * ram range.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * By restricting host physical memory to the first 48 bits (which is the
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * announced physical memory range of the K8L chip (scheduled for 2008)), we
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * can safely use the upper 16 bits for shadow page ID and reference counting.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Update: The 48 bit assumption will be lifted with the new physical memory
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * management (PGMPAGE), so we won't have any trouble when someone stuffs 2TB
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * into a box in some years.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Now, it's possible for a page to be aliased, i.e. mapped by more than one PT
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * or PD. This is solved by creating a list of physical cross reference extents
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * when ever this happens. Each node in the list (extent) is can contain 3 page
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * pool indexes. The list it self is chained using indexes into the paPhysExt
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * @section sec_pgm_pool_life Life Cycle of a Shadow Page
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# The SyncPT function requests a page from the pool.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * The request includes the kind of page it is (PT/PD, PAE/legacy), the
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * address of the page it's shadowing, and more.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# The pool responds to the request by allocating a new page.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * When the cache is enabled, it will first check if it's in the cache.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Should the pool be exhausted, one of two things can be done:
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# Flush the whole pool and current CR3.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# Use the cache to find a page which can be flushed (~age).
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# The SyncPT function will sync one or more pages and insert it into the
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * shadow PD.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# The SyncPage function may sync more pages on a later \#PFs.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * -# The page is freed / flushed in SyncCR3 (perhaps) and some other cases.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * When caching is enabled, the page isn't flush but remains in the cache.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * @section sec_pgm_pool_impl Monitoring
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * We always monitor PAGE_SIZE chunks of memory. When we've got multiple shadow
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * pages for the same PAGE_SIZE of guest memory (PAE and mixed PD/PT) the pages
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * sharing the monitor get linked using the iMonitoredNext/Prev. The head page
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * is the pvUser to the access handlers.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * @section sec_pgm_pool_impl Implementation
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * The pool will take pages from the MM page pool. The tracking data
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * (attributes, bitmaps and so on) are allocated from the hypervisor heap. The
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * pool content can be accessed both by using the page id and the physical
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * address (HC). The former is managed by means of an array, the latter by an
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * offset based AVL tree.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Flushing of a pool page means that we iterate the content (we know what kind
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * it is) and updates the link information in the ram range.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync/*******************************************************************************
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync* Header Files *
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync*******************************************************************************/
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync/*******************************************************************************
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync* Internal Functions *
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync*******************************************************************************/
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsyncstatic DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync#endif /* PGMPOOL_WITH_MONITORING */
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Initalizes the pool
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * @returns VBox status code.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * @param pVM The VM handle.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * Query Pool config.
a5f487d264b5aebe8d28ad35d0353630bd2b77cdvboxsync PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM/Pool");
a5f487d264b5aebe8d28ad35d0353630bd2b77cdvboxsync /** @cfgm{/PGM/Pool/MaxPages, uint16_t, #pages, 16, 0x3fff, 1024}
a5f487d264b5aebe8d28ad35d0353630bd2b77cdvboxsync * The max size of the shadow page pool in pages. The pool will grow dynamically
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync * up to this limit.
9055f61bb57d2a625c6434d55beac7565c3b3c0dvboxsync int rc = CFGMR3QueryU16Def(pCfg, "MaxPages", &cMaxPages, 4*_1M >> PAGE_SHIFT);
AssertLogRelMsgReturn(cMaxPages <= PGMPOOL_IDX_LAST && cMaxPages >= RT_ALIGN(PGMPOOL_IDX_FIRST, 16),
/** @cfgm{/PGM/Pool/MaxPhysExts, uint16_t, #extents, 16, MaxPages * 2, MAX(MaxPages*2,0x3fff)}
rc = CFGMR3QueryU16Def(pCfg, "MaxPhysExts", &cMaxPhysExts, RT_MAX(cMaxPages * 2, PGMPOOL_IDX_LAST));
/** @cfgm{/PGM/Pool/ChacheEnabled, bool, true}
bool fCacheEnabled;
#ifdef PGMPOOL_WITH_USER_TRACKING
#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
return rc;
#ifdef PGMPOOL_WITH_USER_TRACKING
for (unsigned i = 0; i < cMaxUsers; i++)
#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
for (unsigned i = 0; i < cMaxPhysExts; i++)
#ifdef PGMPOOL_WITH_CACHE
#ifdef PGMPOOL_WITH_MONITORING
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = pVM->pgm.s.pShwPaePdptR3; /* not used - isn't it wrong as well? */
#ifdef PGMPOOL_WITH_USER_TRACKING
#ifdef PGMPOOL_WITH_MONITORING
#ifdef PGMPOOL_WITH_CACHE
#ifdef VBOX_WITH_STATISTICS
STAM_REG(pVM, &pPool->cCurPages, STAMTYPE_U16, "/PGM/Pool/cCurPages", STAMUNIT_PAGES, "Current pool size.");
STAM_REG(pVM, &pPool->cMaxPages, STAMTYPE_U16, "/PGM/Pool/cMaxPages", STAMUNIT_PAGES, "Max pool size.");
STAM_REG(pVM, &pPool->cUsedPages, STAMTYPE_U16, "/PGM/Pool/cUsedPages", STAMUNIT_PAGES, "The number of pages currently in use.");
STAM_REG(pVM, &pPool->cUsedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/cUsedPagesHigh", STAMUNIT_PAGES, "The high watermark for cUsedPages.");
STAM_REG(pVM, &pPool->StatAlloc, STAMTYPE_PROFILE_ADV, "/PGM/Pool/Alloc", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolAlloc.");
STAM_REG(pVM, &pPool->StatClearAll, STAMTYPE_PROFILE, "/PGM/Pool/ClearAll", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolClearAll.");
STAM_REG(pVM, &pPool->StatFlushAllInt, STAMTYPE_PROFILE, "/PGM/Pool/FlushAllInt", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFlushAllInt.");
STAM_REG(pVM, &pPool->StatFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFlushPage.");
STAM_REG(pVM, &pPool->StatFree, STAMTYPE_PROFILE, "/PGM/Pool/Free", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFree.");
STAM_REG(pVM, &pPool->StatZeroPage, STAMTYPE_PROFILE, "/PGM/Pool/ZeroPage", STAMUNIT_TICKS_PER_CALL, "Profiling time spend zeroing pages. Overlaps with Alloc.");
# ifdef PGMPOOL_WITH_USER_TRACKING
STAM_REG(pVM, &pPool->cMaxUsers, STAMTYPE_U16, "/PGM/Pool/Track/cMaxUsers", STAMUNIT_COUNT, "Max user tracking records.");
STAM_REG(pVM, &pPool->cPresent, STAMTYPE_U32, "/PGM/Pool/Track/cPresent", STAMUNIT_COUNT, "Number of present page table entries.");
STAM_REG(pVM, &pPool->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Pool/Track/Deref", STAMUNIT_OCCURENCES, "Profiling of pgmPoolTrackDeref.");
STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPT, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPT", STAMUNIT_OCCURENCES, "Profiling of pgmPoolTrackFlushGCPhysPT.");
STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTs, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTs", STAMUNIT_OCCURENCES, "Profiling of pgmPoolTrackFlushGCPhysPTs.");
STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTsSlow, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTsSlow", STAMUNIT_OCCURENCES, "Profiling of pgmPoolTrackFlushGCPhysPTsSlow.");
STAM_REG(pVM, &pPool->StatTrackFreeUpOneUser, STAMTYPE_COUNTER, "/PGM/Pool/Track/FreeUpOneUser", STAMUNIT_OCCURENCES, "The number of times we were out of user tracking records.");
# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
STAM_REG(pVM, &pPool->StatTrackDerefGCPhys, STAMTYPE_PROFILE, "/PGM/Pool/Track/DrefGCPhys", STAMUNIT_OCCURENCES, "Profiling deref activity related tracking GC physical pages.");
STAM_REG(pVM, &pPool->StatTrackLinearRamSearches, STAMTYPE_COUNTER, "/PGM/Pool/Track/LinearRamSearches", STAMUNIT_OCCURENCES, "The number of times we had to do linear ram searches.");
STAM_REG(pVM, &pPool->StamTrackPhysExtAllocFailures,STAMTYPE_COUNTER, "/PGM/Pool/Track/PhysExtAllocFailures", STAMUNIT_OCCURENCES, "The number of failing pgmPoolTrackPhysExtAlloc calls.");
# ifdef PGMPOOL_WITH_MONITORING
STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access handler.");
STAM_REG(pVM, &pPool->StatMonitorRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction.");
STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler.");
STAM_REG(pVM, &pPool->StatMonitorRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork().");
STAM_REG(pVM, &pPool->StatMonitorRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access we've handled (except REP STOSD).");
STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction.");
STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing.");
STAM_REG(pVM, &pPool->StatMonitorRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle.");
STAM_REG(pVM, &pPool->StatMonitorRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled.");
STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler.");
STAM_REG(pVM, &pPool->StatMonitorR3EmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction.");
STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler.");
STAM_REG(pVM, &pPool->StatMonitorR3Fork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork().");
STAM_REG(pVM, &pPool->StatMonitorR3Handled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access we've handled (except REP STOSD).");
STAM_REG(pVM, &pPool->StatMonitorR3RepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle.");
STAM_REG(pVM, &pPool->StatMonitorR3RepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled.");
STAM_REG(pVM, &pPool->StatMonitorR3Async, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Async", STAMUNIT_OCCURENCES, "Times we're called in an async thread and need to flush.");
STAM_REG(pVM, &pPool->cModifiedPages, STAMTYPE_U16, "/PGM/Pool/Monitor/cModifiedPages", STAMUNIT_PAGES, "The current cModifiedPages value.");
STAM_REG(pVM, &pPool->cModifiedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/Monitor/cModifiedPagesHigh", STAMUNIT_PAGES, "The high watermark for cModifiedPages.");
# ifdef PGMPOOL_WITH_CACHE
STAM_REG(pVM, &pPool->StatCacheHits, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Hits", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls satisfied by the cache.");
STAM_REG(pVM, &pPool->StatCacheMisses, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Misses", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls not statisfied by the cache.");
STAM_REG(pVM, &pPool->StatCacheKindMismatches, STAMTYPE_COUNTER, "/PGM/Pool/Cache/KindMismatches", STAMUNIT_OCCURENCES, "The number of shadow page kind mismatches. (Better be low, preferably 0!)");
STAM_REG(pVM, &pPool->StatCacheFreeUpOne, STAMTYPE_COUNTER, "/PGM/Pool/Cache/FreeUpOne", STAMUNIT_OCCURENCES, "The number of times the cache was asked to free up a page.");
STAM_REG(pVM, &pPool->StatCacheCacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Cacheable", STAMUNIT_OCCURENCES, "The number of cacheable allocations.");
STAM_REG(pVM, &pPool->StatCacheUncacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Uncacheable", STAMUNIT_OCCURENCES, "The number of uncacheable allocations.");
return VINF_SUCCESS;
#ifdef PGMPOOL_WITH_USER_TRACKING
#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
#ifdef PGMPOOL_WITH_MONITORING
int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerRC);
rc = PDMR3LdrGetSymbolR0(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerR0);
#ifdef PGMPOOL_WITH_USER_TRACKING
#ifdef PGMPOOL_WITH_MONITORING
#ifdef PGMPOOL_WITH_CACHE
return VINF_SUCCESS;
#ifdef PGMPOOL_WITH_MONITORING
static DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
LogFlow(("pgmR3PoolAccessHandler: GCPhys=%RGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",
Log(("pgmR3PoolAccessHandler: async thread, requesting EMT to flush the page: %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",
int rc = VMR3ReqCallEx(pPool->pVMR3, VMREQDEST_ANY, NULL, 0, VMREQFLAGS_NO_WAIT | VMREQFLAGS_VOID, (PFNRT)pgmR3PoolFlushReusedPage, 2, pPool, pPage);
pgmPoolMonitorChainFlush(pPool, pPage); /* ASSUME that VERR_PGM_POOL_CLEARED can be ignored here and that FFs will deal with it in due time. */
return VINF_PGM_HANDLER_DO_DEFAULT;