PGMR0DynMap.cpp revision 00fa3bc989ffe84474e828c8b90b24284dcfdf0c
/* $Id$ */
/** @file
* PGM - Page Manager and Monitor, ring-0 dynamic mapping cache.
*/
/*
* Copyright (C) 2008 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PGM
#include "../PGMInternal.h"
#include "../PGMInline.h"
#include <iprt/asm-amd64-x86.h>
#include <iprt/semaphore.h>
#include <iprt/spinlock.h>
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
/** The max size of the mapping cache (in pages). */
/** The small segment size that is adopted on out-of-memory conditions with a
* single big segment. */
#define PGMR0DYNMAP_SMALL_SEG_PAGES 128
/** The number of pages we reserve per CPU. */
#define PGMR0DYNMAP_PAGES_PER_CPU 256
/** The minimum number of pages we reserve per CPU.
* This must be equal or larger than the autoset size. */
#define PGMR0DYNMAP_PAGES_PER_CPU_MIN 64
/** The number of guard pages.
* @remarks Never do tuning of the hashing or whatnot with a strict build! */
#if defined(VBOX_STRICT)
# define PGMR0DYNMAP_GUARD_PAGES 1
#else
# define PGMR0DYNMAP_GUARD_PAGES 0
#endif
/** The dummy physical address of guard pages. */
/** The dummy reference count of guard pages. (Must be non-zero.) */
#if 0
/** Define this to just clear the present bit on guard pages.
* The alternative is to replace the entire PTE with an bad not-present
* PTE. Either way, XNU will screw us. :-/ */
#define PGMR0DYNMAP_GUARD_NP
#endif
/** The dummy PTE value for a page. */
/** The dummy PTE value for a page. */
/** Calcs the overload threshold. Current set at 50%. */
#if 0
/* Assertions causes panics if preemption is disabled, this can be used to work aroudn that. */
//#define RTSpinlockAcquire(a,b) do {} while (0)
//#define RTSpinlockRelease(a,b) do {} while (0)
#endif
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* Ring-0 dynamic mapping cache segment.
*
* The dynamic mapping cache can be extended with additional segments if the
* load is found to be too high. This done the next time a VM is created, under
* the protection of the init mutex. The arrays is reallocated and the new
* segment is added to the end of these. Nothing is rehashed of course, as the
* indexes / addresses must remain unchanged.
*
* This structure is only modified while owning the init mutex or during module
* init / term.
*/
typedef struct PGMR0DYNMAPSEG
{
/** Pointer to the next segment. */
struct PGMR0DYNMAPSEG *pNext;
/** The memory object for the virtual address range that we're abusing. */
/** The start page in the cache. (I.e. index into the arrays.) */
/** The number of pages this segment contributes. */
/** The number of page tables. */
/** The memory objects for the page tables. */
/** Pointer to a ring-0 dynamic mapping cache segment. */
typedef PGMR0DYNMAPSEG *PPGMR0DYNMAPSEG;
/**
* Ring-0 dynamic mapping cache entry.
*
* This structure tracks
*/
typedef struct PGMR0DYNMAPENTRY
{
/** The physical address of the currently mapped page.
* This is duplicate for three reasons: cache locality, cache policy of the PT
* mappings and sanity checks. */
/** Pointer to the page. */
void *pvPage;
/** The number of references. */
/** PTE pointer union. */
union PGMR0DYNMAPENTRY_PPTE
{
/** PTE pointer, 32-bit legacy version. */
/** PTE pointer, PAE version. */
/** PTE pointer, the void version. */
void *pv;
} uPte;
/** CPUs that haven't invalidated this entry after it's last update. */
/** Pointer to a ring-0 dynamic mapping cache entry. */
typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY;
/**
* Ring-0 dynamic mapping cache.
*
* This is initialized during VMMR0 module init but no segments are allocated at
* that time. Segments will be added when the first VM is started and removed
* again when the last VM shuts down, thus avoid consuming memory while dormant.
* At module termination, the remaining bits will be freed up.
*/
typedef struct PGMR0DYNMAP
{
/** The usual magic number / eye catcher (PGMR0DYNMAP_MAGIC). */
/** Spinlock serializing the normal operation of the cache. */
/** Array for tracking and managing the pages. */
/** The cache size given as a number of pages. */
bool fLegacyMode;
/** The current load.
* This does not include guard pages. */
/** The max load ever.
* This is maintained to get trigger adding of more mapping space. */
/** Initialization / termination lock. */
/** The number of guard pages. */
/** The number of users (protected by hInitLock). */
/** Array containing a copy of the original page tables.
* The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */
void *pvSavedPTEs;
/** List of segments. */
/** The paging mode. */
} PGMR0DYNMAP;
/** Pointer to the ring-0 dynamic mapping cache */
typedef PGMR0DYNMAP *PPGMR0DYNMAP;
/** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
#define PGMR0DYNMAP_MAGIC 0x19640201
/**
* Paging level data.
*/
typedef struct PGMR0DYNMAPPGLVL
{
struct
{
union
{
void *pv; /**< hMapObj address. */
} u;
} a[4];
/** Pointer to paging level data. */
typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL;
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Pointer to the ring-0 dynamic mapping cache. */
static PPGMR0DYNMAP g_pPGMR0DynMap;
/** For overflow testing. */
static bool g_fPGMR0DynMapTestRunning = false;
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
#if 0 /*def DEBUG*/
#endif
/**
* Initializes the ring-0 dynamic mapping cache.
*
* @returns VBox status code.
*/
VMMR0DECL(int) PGMR0DynMapInit(void)
{
/*
* Create and initialize the cache instance.
*/
int rc = VINF_SUCCESS;
{
case SUPPAGINGMODE_32_BIT:
pThis->fLegacyMode = false;
break;
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
pThis->fLegacyMode = false;
break;
default:
break;
}
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
return VINF_SUCCESS;
}
}
}
return rc;
}
/**
* Terminates the ring-0 dynamic mapping cache.
*/
VMMR0DECL(void) PGMR0DynMapTerm(void)
{
/*
* Destroy the cache.
*
* There is not supposed to be any races here, the loader should
* make sure about that. So, don't bother locking anything.
*
* The VM objects should all be destroyed by now, so there is no
* dangling users or anything like that to clean up. This routine
* is just a mirror image of PGMR0DynMapInit.
*/
if (pThis)
{
/* This should *never* happen, but in case it does try not to leak memory. */
("cUsers=%d paPages=%p pvSavedPTEs=%p cPages=%#x\n",
/* Free the associated resources. */
}
}
/**
* Initializes the dynamic mapping cache for a new VM.
*
* @returns VBox status code.
* @param pVM Pointer to the shared VM structure.
*/
{
AssertMsgReturn(!pVM->pgm.s.pvR0DynMapUsed, ("%p (pThis=%p)\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap), VERR_WRONG_ORDER);
/*
* Initialize the auto sets.
*/
while (idCpu-- > 0)
{
while (j-- > 0)
{
}
}
/*
* Do we need the cache? Skip the last bit if we don't.
*/
if (!VMMIsHwVirtExtForced(pVM))
return VINF_SUCCESS;
/*
* Reference and if necessary setup or expand the cache.
*/
{
#if 0 /*def DEBUG*/
if (RT_SUCCESS(rc))
{
if (RT_FAILURE(rc))
}
#endif
}
if (RT_SUCCESS(rc))
else
return rc;
}
/**
* Terminates the dynamic mapping cache usage for a VM.
*
* @param pVM Pointer to the shared VM structure.
*/
{
/*
* Return immediately if we're not using the cache.
*/
return;
{
#ifdef VBOX_STRICT
#endif
/*
* Clean up and check the auto sets.
*/
while (idCpu-- > 0)
{
{
/*
* The set is open, close it.
*/
while (j-- > 0)
{
else
}
}
else
while (j-- > 0)
{
}
}
/*
* Release our reference to the mapping cache.
*/
}
else
}
/**
* Shoots down the TLBs for all the cache pages, pgmR0DynMapTearDown helper.
*
* @param idCpu The current CPU.
* @param pvUser1 The dynamic mapping cache instance.
* @param pvUser2 Unused, NULL.
*/
{
while (iPage-- > 0)
}
/**
* Shoot down the TLBs for every single cache entry on all CPUs.
*
* @returns IPRT status code (RTMpOnAll).
* @param pThis The dynamic mapping cache instance.
*/
{
if (RT_FAILURE(rc))
{
while (iPage-- > 0)
}
return rc;
}
/**
* Calculate the new cache size based on cMaxLoad statistics.
*
* @returns Number of pages.
* @param pThis The dynamic mapping cache instance.
* @param pcMinPages The minimal size in pages.
*/
{
/* cCpus * PGMR0DYNMAP_PAGES_PER_CPU(_MIN). */
/* adjust against cMaxLoad. */
/* adjust against max and current size. */
if (cPages > PGMR0DYNMAP_MAX_PAGES)
if (cMinPages > PGMR0DYNMAP_MAX_PAGES)
*pcMinPages = cMinPages;
return cPages;
}
/**
* Initializes the paging level data.
*
* @param pThis The dynamic mapping cache instance.
* @param pPgLvl The paging level data.
*/
{
{
case SUPPAGINGMODE_32_BIT:
break;
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_PAE_NX:
break;
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
break;
default:
AssertFailed();
break;
}
{
}
}
/**
* Maps a PTE.
*
* This will update the segment structure when new PTs are mapped.
*
* It also assumes that we (for paranoid reasons) wish to establish a mapping
* chain from CR3 to the PT that all corresponds to the processor we're
* currently running on, and go about this by running with interrupts disabled
* and restarting from CR3 for every change.
*
* @returns VBox status code, VINF_TRY_AGAIN if we changed any mappings and had
* to re-enable interrupts.
* @param pThis The dynamic mapping cache instance.
* @param pPgLvl The paging level structure.
* @param pvPage The page.
* @param pSeg The segment.
* @param cMaxPTs The max number of PTs expected in the segment.
* @param ppvPTE Where to store the PTE address.
*/
{
{
{
/*
* Need to remap this level.
* The final level, the PT, will not be freed since that is what it's all about.
*/
ASMIntEnable();
else
{
}
int rc = RTR0MemObjEnterPhys(&pPgLvl->a[i].hMemObj, HCPhys, PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
if (RT_SUCCESS(rc))
{
(void *)-1 /* pvFixed */, 0 /* cbAlignment */,
if (RT_SUCCESS(rc))
{
AssertMsg(((uintptr_t)pPgLvl->a[i].u.pv & ~(uintptr_t)PAGE_OFFSET_MASK), ("%p\n", pPgLvl->a[i].u.pv));
return VINF_TRY_AGAIN;
}
}
else
return rc;
}
/*
* The next level.
*/
if (pThis->fLegacyMode)
{
}
else
{
}
{
LogRel(("PGMR0DynMap: internal error - iPgLvl=%u cLevels=%u uEntry=%#llx fAnd=%#llx fRes=%#llx got=%#llx\n"
"PGMR0DynMap: pv=%p pvPage=%p iEntry=%#x fLegacyMode=%RTbool\n",
i, pPgLvl->cLevels, uEntry, pPgLvl->a[i].fAndMask, pPgLvl->a[i].fResMask, uEntry & pPgLvl->a[i].fAndMask,
return VERR_INTERNAL_ERROR;
}
/*Log(("#%d: iEntry=%4d uEntry=%#llx pvEntry=%p HCPhys=%RHp \n", i, iEntry, uEntry, pvEntry, pPgLvl->a[i].HCPhys));*/
}
/* made it thru without needing to remap anything. */
return VINF_SUCCESS;
}
/**
* Sets up a guard page.
*
* @param pThis The dynamic mapping cache instance.
* @param pPage The page.
*/
{
#ifdef PGMR0DYNMAP_GUARD_NP
#else
if (pThis->fLegacyMode)
else
#endif
pThis->cGuardPages++;
}
/**
* Adds a new segment of the specified size.
*
* @returns VBox status code.
* @param pThis The dynamic mapping cache instance.
* @param cPages The size of the new segment, give as a page count.
*/
{
int rc2;
/*
* Do the array reallocations first.
* (The pages array has to be replaced behind the spinlock of course.)
*/
void *pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * (pThis->cPages + cPages));
if (!pvSavedPTEs)
return VERR_NO_MEMORY;
if (!pvPages)
{
pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * pThis->cPages);
if (pvSavedPTEs)
return VERR_NO_MEMORY;
}
/*
* Allocate the segment structure and pages of memory, then touch all the pages (paranoia).
*/
PPGMR0DYNMAPSEG pSeg = (PPGMR0DYNMAPSEG)RTMemAllocZ(RT_UOFFSETOF(PGMR0DYNMAPSEG, ahMemObjPTs[cMaxPTs]));
if (!pSeg)
return VERR_NO_MEMORY;
if (RT_SUCCESS(rc))
{
/*
* Walk thru the pages and set them up with a mapping of their PTE and everything.
*/
{
/* Initialize the page data. */
/* Map its page table, retry until we've got a clean run (paranoia). */
do
while (rc == VINF_TRY_AGAIN);
if (RT_FAILURE(rc))
break;
/* Save the PTE. */
if (pThis->fLegacyMode)
else
#ifdef VBOX_STRICT
/* Check that we've got the right entry. */
if (HCPhysPage != HCPhysPte)
{
LogRel(("pgmR0DynMapAddSeg: internal error - page #%u HCPhysPage=%RHp HCPhysPte=%RHp pbPage=%p pvPte=%p\n",
break;
}
#endif
} /* for each page */
ASMIntEnable();
/* cleanup non-PT mappings */
if (RT_SUCCESS(rc))
{
#if PGMR0DYNMAP_GUARD_PAGES > 0
/*
* Setup guard pages.
* (Note: TLBs will be shot down later on.)
*/
{
iPage++; /* the guarded page */
}
/* Make sure the very last page is a guard page too. */
#endif /* PGMR0DYNMAP_GUARD_PAGES > 0 */
/*
* Commit it by adding the segment to the list and updating the page count.
*/
return VINF_SUCCESS;
}
/*
* Bail out.
*/
{
}
}
/* Don't bother resizing the arrays, but free them if we're the only user. */
{
}
return rc;
}
/**
* Called by PGMR0DynMapInitVM under the init lock.
*
* @returns VBox status code.
* @param pThis The dynamic mapping cache instance.
*/
{
/*
* Calc the size and add a segment of that size.
*/
if (rc == VERR_NO_MEMORY)
{
/*
* Try adding smaller segments.
*/
do
rc = VINF_SUCCESS;
if (rc == VERR_NO_MEMORY)
{
}
}
#if PGMR0DYNMAP_GUARD_PAGES > 0
/* paranoia */
if (RT_SUCCESS(rc))
#endif
return rc;
}
/**
* Called by PGMR0DynMapInitVM under the init lock.
*
* @returns VBox status code.
* @param pThis The dynamic mapping cache instance.
*/
{
/*
* Calc the new target size and add a segment of the appropriate size.
*/
return VINF_SUCCESS;
if (rc == VERR_NO_MEMORY)
{
/*
* Try adding smaller segments.
*/
do
rc = VINF_SUCCESS;
if (rc == VERR_NO_MEMORY)
}
#if PGMR0DYNMAP_GUARD_PAGES > 0
/* paranoia */
if (RT_SUCCESS(rc))
#endif
return rc;
}
/**
* Called by PGMR0DynMapTermVM under the init lock.
*
* @returns VBox status code.
* @param pThis The dynamic mapping cache instance.
*/
{
/*
* Restore the original page table entries
*/
if (pThis->fLegacyMode)
{
while (iPage-- > 0)
{
}
}
else
{
while (iPage-- > 0)
{
}
}
/*
* Shoot down the TLBs on all CPUs before freeing them.
*/
/*
* Free the segments.
*/
{
int rc;
while (iPT-- > 0)
{
}
}
/*
* Free the arrays and restore the initial state.
* The cLoadMax value is left behind for the next setup.
*/
pThis->cGuardPages = 0;
}
/**
* Release references to a page, caller owns the spin lock.
*
* @param pThis The dynamic mapping cache instance.
* @param iPage The page.
* @param cRefs The number of references to release.
*/
{
if (!cRefs)
}
/**
* Release references to a page, caller does not own the spin lock.
*
* @param pThis The dynamic mapping cache instance.
* @param iPage The page.
* @param cRefs The number of references to release.
*/
{
}
/**
* pgmR0DynMapPage worker that deals with the tedious bits.
*
* @returns The page index on success, UINT32_MAX on failure.
* @param pThis The dynamic mapping cache instance.
* @param HCPhys The address of the page to be mapped.
* @param iPage The page index pgmR0DynMapPage hashed HCPhys to.
* @param pVM The shared VM structure, for statistics only.
*/
{
#ifdef VBOX_WITH_STATISTICS
#endif
/*
* Check if any of the first 3 pages are unreferenced since the caller
* already has made sure they aren't matching.
*/
#ifdef VBOX_WITH_STATISTICS
bool fLooped = false;
#endif
else
{
/*
* Search for an unused or matching entry.
*/
for (;;)
{
{
return iFreePage;
}
break;
/* advance */
return UINT32_MAX;
}
#ifdef VBOX_WITH_STATISTICS
fLooped = true;
#endif
}
#if 0 //def VBOX_WITH_STATISTICS
/* Check for lost hits. */
if (!fLooped)
#endif
/*
* Setup the new entry.
*/
/*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/
if (pThis->fLegacyMode)
{
| (HCPhys & X86_PTE_PG_MASK);
}
else
{
| (HCPhys & X86_PTE_PAE_PG_MASK);
/*Log6(("pgmR0DynMapPageSlow: #%x - %RHp %p %#llx\n", iFreePage, HCPhys, paPages[iFreePage].pvPage, uNew));*/
}
return iFreePage;
}
/**
* Maps a page into the pool.
*
* @returns Page index on success, UINT32_MAX on failure.
* @param pThis The dynamic mapping cache instance.
* @param HCPhys The address of the page to be mapped.
* @param iRealCpu The real cpu set index. (optimization)
* @param pVM The shared VM structure, for statistics only.
* @param ppvPage Where to the page address.
*/
DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVM pVM, void **ppvPage)
{
#ifdef VBOX_WITH_STATISTICS
#endif
/*
* Find an entry, if possible a matching one. The HCPhys address is hashed
* down to a page index, collisions are handled by linear searching.
* Optimized for a hit in the first 3 pages.
*
* Field easy hits here and defer the tedious searching and inserting
* to pgmR0DynMapPageSlow().
*/
else
{
{
}
else
{
{
}
else
{
{
return iPage;
}
}
}
}
/*
* Reference it, update statistics and get the return address.
*/
if (cRefs == 1)
{
AssertMsg(pThis->cLoad <= pThis->cPages - pThis->cGuardPages, ("%d/%d\n", pThis->cLoad, pThis->cPages - pThis->cGuardPages));
}
else if (RT_UNLIKELY(cRefs <= 0))
{
}
/*
* Invalidate the entry?
*/
if (RT_UNLIKELY(fInvalidateIt))
/*
* Do the actual invalidation outside the spinlock.
*/
if (RT_UNLIKELY(fInvalidateIt))
{
}
return iPage;
}
/**
* Assert the the integrity of the pool.
*
* @returns VBox status code.
*/
VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)
{
/*
* Basic pool stuff that doesn't require any lock, just assumes we're a user.
*/
if (!pThis)
return VINF_SUCCESS;
return VERR_INVALID_PARAMETER;
int rc = VINF_SUCCESS;
do { \
if (RT_UNLIKELY(!(expr))) \
{ \
RTAssertMsg2Weak a; \
return VERR_INTERNAL_ERROR; \
} \
} while (0)
/*
* Check that the PTEs are correct.
*/
if (pThis->fLegacyMode)
{
while (iPage-- > 0)
{
CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage));
{
#ifdef PGMR0DYNMAP_GUARD_NP
#else
#endif
cGuard++;
}
{
CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
cLoad++;
}
else
}
}
else
{
while (iPage-- > 0)
{
CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage));
{
#ifdef PGMR0DYNMAP_GUARD_NP
#else
#endif
cGuard++;
}
{
CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
cLoad++;
}
else
}
}
return VINF_SUCCESS;
}
/**
* Signals the start of a new set of mappings.
*
* Mostly for strictness. PGMDynMapHCPage won't work unless this
* API is called.
*
* @param pVCpu The shared data for the current virtual CPU.
*/
{
}
/**
* Starts or migrates the autoset of a virtual CPU.
*
* This is used by HWACCMR0Enter. When we've longjumped out of the HWACCM
* execution loop with the set open, we'll migrate it when re-entering. While
* under normal circumstances, we'll start it so VMXR0LoadGuestState can access
* guest memory.
*
* @returns @c true if started, @c false if migrated.
* @param pVCpu The shared data for the current virtual CPU.
* @thread EMT
*/
{
if (fStartIt)
else
return fStartIt;
}
/**
* Worker that performs the actual flushing of the set.
*
* @param pSet The set to flush.
* @param cEntries The number of entries.
*/
{
/*
* Release any pages it's referencing.
*/
if ( cEntries != 0
{
while (i-- > 0)
{
}
}
}
/**
* Releases the dynamic memory mappings made by PGMDynMapHCPage and associates
* since the PGMDynMapStartAutoSet call.
*
* @param pVCpu The shared data for the current virtual CPU.
*/
{
/*
* Close and flush the set.
*/
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
}
/**
* Flushes the set if it's above a certain threshold.
*
* @param pVCpu The shared data for the current virtual CPU.
*/
{
AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
/*
* Only flush it if it's 45% full.
*/
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
{
AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
}
}
/**
* Migrates the automatic mapping set of the current vCPU if it's active and
* necessary.
*
* This is called when re-entering the hardware assisted execution mode after a
* nip down to ring-3. We run the risk that the CPU might have change and we
* will therefore make sure all the cache entries currently in the auto set will
* be valid on the new CPU. If the cpu didn't change nothing will happen as all
* the entries will have been flagged as invalidated.
*
* @param pVCpu The shared data for the current virtual CPU.
* @thread EMT
*/
{
{
if (i != PGMMAPSET_CLOSED)
{
{
while (i-- > 0)
{
{
}
}
}
}
}
}
/**
* Worker function that flushes the current subset.
*
* This is called when the set is popped or when the set
* hash a too high load. As also pointed out elsewhere, the
* whole subset thing is a hack for working around code that
* accesses too many pages. Like PGMPool.
*
* @param pSet The set which subset to flush.
*/
{
if ( i > iSubset
{
while (i-- > iSubset)
{
}
}
}
/**
* Creates a subset.
*
* A subset is a hack to avoid having to rewrite code that touches a lot of
* pages. It prevents the mapping set from being overflowed by automatically
* flushing previous mappings when a certain threshold is reached.
*
* Pages mapped after calling this function are only valid until the next page
* is mapped.
*
* @returns The index of the previous subset. Pass this to
* PGMDynMapPopAutoSubset when poping it.
* @param pVCpu Pointer to the virtual cpu data.
*/
{
return iPrevSubset;
}
/**
* Pops a subset created by a previous call to PGMDynMapPushAutoSubset.
*
* @param pVCpu Pointer to the virtual cpu data.
* @param iPrevSubset What PGMDynMapPushAutoSubset returned.
*/
{
LogFlow(("PGMDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
{
}
}
/**
* As a final resort for a full auto set, try merge duplicate entries.
*
* @param pSet The set.
*/
{
{
uint32_t j = i + 1;
{
j++;
{
/* merge j into i removing j. */
{
}
else
{
}
}
else
{
/* migrate the max number of refs from j into i and quit the inner loop. */
break;
}
}
}
}
/**
* Common worker code for PGMDynMapHCPhys, pgmR0DynMapHCPageInlined and
* pgmR0DynMapGCPageInlined.
*
* @returns VINF_SUCCESS, bails out to ring-3 on failure.
* @param pVM The shared VM structure (for statistics).
* @param pSet The set.
* @param HCPhys The physical address of the page.
* @param ppv Where to store the address of the mapping on success.
*
* @remarks This is a very hot path.
*/
{
LogFlow(("pgmR0DynMapHCPageCommon: pVM=%p pSet=%p HCPhys=%RHp ppv=%p\n",
#ifdef VBOX_WITH_STATISTICS
#endif
AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
/*
* Map it.
*/
void *pvPage;
{
RTAssertMsg2Weak("PGMDynMapHCPage: cLoad=%u/%u cPages=%u cGuardPages=%u\n",
g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages, g_pPGMR0DynMap->cGuardPages);
return VERR_PGM_DYNMAP_FAILED;
}
/*
* Add the page to the auto reference set.
*
* The typical usage pattern means that the same pages will be mapped
* several times in the same set. We can catch most of these
* remappings by looking a few pages back into the set. (The searching
* and set optimizing path will hardly ever be used when doing this.)
*/
if (i-- < 5)
{
}
/* Any of the last 5 pages? */
/* Don't bother searching unless we're above a 60% load. */
{
}
else
{
/* Search the rest of the set. */
i -= 4;
while (i-- > 0)
{
break;
}
if (i < 0)
{
{
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
}
{
}
{
}
else
{
/* We're screwed. */
RTAssertMsg2Weak("PGMDynMapHCPage: set is full!\n");
return VERR_PGM_DYNMAP_FULL_SET;
}
}
}
return VINF_SUCCESS;
}
#if 0 /* Not used in R0, should internalized the other PGMDynMapHC/GCPage too. */
/* documented elsewhere - a bit of a mess. */
{
#ifdef VBOX_WITH_STATISTICS
#endif
/*
* Validate state.
*/
/*
* Call common code.
*/
return rc;
}
#endif
#if 0 /*def DEBUG*/
/** For pgmR0DynMapTest3PerCpu. */
typedef struct PGMR0DYNMAPTEST
{
typedef PGMR0DYNMAPTEST *PPGMR0DYNMAPTEST;
/**
* Checks that the content of the page is the same on all CPUs, i.e. that there
* are no CPU specfic PTs or similar nasty stuff involved.
*
* @param idCpu The current CPU.
* @param pvUser1 Pointer a PGMR0DYNMAPTEST structure.
* @param pvUser2 Unused, ignored.
*/
{
}
/**
* Performs some basic tests in debug builds.
*/
{
LogRel(("pgmR0DynMapTest: ****** START ******\n"));
uint32_t i;
/*
* Assert internal integrity first.
*/
LogRel(("Test #0\n"));
int rc = PGMR0DynMapAssertIntegrity();
if (RT_FAILURE(rc))
return rc;
g_fPGMR0DynMapTestRunning = true;
/*
* Simple test, map CR3 twice and check that we're getting the
* same mapping address back.
*/
LogRel(("Test #1\n"));
ASMIntEnable();
if ( RT_SUCCESS(rc2)
&& RT_SUCCESS(rc)
{
LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
/*
* Check that the simple set overflow code works by filling it
* with more CR3 mappings.
*/
LogRel(("Test #2\n"));
{
}
ASMIntEnable();
{
}
{
LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries) / 2));
}
{
LogRel(("\n"));
}
if (RT_SUCCESS(rc))
if (RT_SUCCESS(rc))
{
/*
* Trigger an set optimization run (exactly).
*/
LogRel(("Test #3\n"));
{
}
ASMIntEnable();
{
}
{
LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
}
LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
if (RT_SUCCESS(rc))
if (RT_SUCCESS(rc))
{
/*
* Trigger an overflow error.
*/
LogRel(("Test #4\n"));
{
if (RT_SUCCESS(rc))
if (RT_FAILURE(rc))
break;
}
ASMIntEnable();
if (rc == VERR_PGM_DYNMAP_FULL_SET)
{
/* flush the set. */
LogRel(("Test #5\n"));
ASMIntEnable();
}
else
{
}
}
}
}
else
{
if (RT_SUCCESS(rc))
}
/*
* Check that everyone sees the same stuff.
*/
if (RT_SUCCESS(rc))
{
LogRel(("Test #5\n"));
if (RT_SUCCESS(rc))
{
ASMIntEnable();
if (RT_FAILURE(rc))
{
}
else
LogRel(("pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n",
}
else
{
ASMIntEnable();
}
}
/*
* Clean up.
*/
LogRel(("Cleanup.\n"));
ASMIntEnable();
if (RT_SUCCESS(rc))
else
g_fPGMR0DynMapTestRunning = false;
pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
LogRel(("pgmR0DynMapTest: ****** END ******\n"));
return rc;
}
#endif /* DEBUG */