GMMR0.cpp revision 0ae6a9e68e1944203a4569e5bb2d9edcf536a290
5b281ba489ca18f0380d7efc7a5108b606cce449vboxsync * GMM - Global Memory Manager.
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * Copyright (C) 2007 Oracle Corporation
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * available from http://www.virtualbox.org. This file is free software;
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * you can redistribute it and/or modify it under the terms of the GNU
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * General Public License (GPL) as published by the Free Software
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** @page pg_gmm GMM - The Global Memory Manager
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * As the name indicates, this component is responsible for global memory
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * management. Currently only guest RAM is allocated from the GMM, but this
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * may change to include shadow page tables and other bits later.
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * Guest RAM is managed as individual pages, but allocated from the host OS
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * in chunks for reasons of portability / efficiency. To minimize the memory
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * footprint all tracking structure must be as small as possible without
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * unnecessary performance penalties.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The allocation chunks has fixed sized, the size defined at compile time
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * by the #GMM_CHUNK_SIZE \#define.
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * Each chunk is given an unquie ID. Each page also has a unique ID. The
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * relation ship between the two IDs is:
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync * GMM_CHUNK_SHIFT = log2(GMM_CHUNK_SIZE / PAGE_SIZE);
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync * idPage = (idChunk << GMM_CHUNK_SHIFT) | iPage;
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * Where iPage is the index of the page within the chunk. This ID scheme
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * permits for efficient chunk and page lookup, but it relies on the chunk size
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * to be set at compile time. The chunks are organized in an AVL tree with their
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * IDs being the keys.
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * The physical address of each page in an allocation chunk is maintained by
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * the #RTR0MEMOBJ and obtained using #RTR0MemObjGetPagePhysAddr. There is no
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * need to duplicate this information (it'll cost 8-bytes per page if we did).
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * So what do we need to track per page? Most importantly we need to know
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * which state the page is in:
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Private - Allocated for (eventually) backing one particular VM page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Shared - Readonly page that is used by one or more VMs and treated
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * as COW by PGM.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Free - Not used by anyone.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * For the page replacement operations (sharing, defragmenting and freeing)
7ccd30dd4bbced565b32c255a11640cd4093abb6vboxsync * to be somewhat efficient, private pages needs to be associated with a
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * particular page in a particular VM.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * Tracking the usage of shared pages is impractical and expensive, so we'll
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * settle for a reference counting system instead.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * Free pages will be chained on LIFOs
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * systems a 32-bit bitfield will have to suffice because of address space
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * limitations. The #GMMPAGE structure shows the details.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @section sec_gmm_alloc_strat Page Allocation Strategy
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The strategy for allocating pages has to take fragmentation and shared
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * pages into account, or we may end up with with 2000 chunks with only
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * a few pages in each. Shared pages cannot easily be reallocated because
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * of the inaccurate usage accounting (see above). Private pages can be
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * reallocated by a defragmentation thread in the same manner that sharing
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The first approach is to manage the free pages in two sets depending on
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * whether they are mainly for the allocation of shared or private pages.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * In the initial implementation there will be almost no possibility for
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * mixing shared and private pages in the same chunk (only if we're really
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * stressed on memory), but when we implement forking of VMs and have to
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * deal with lots of COW pages it'll start getting kind of interesting.
d605d5391db09e6395a1c091f148f4b86af84bd3vboxsync * The sets are lists of chunks with approximately the same number of
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync * free pages. Say the chunk size is 1MB, meaning 256 pages, and a set
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync * consists of 16 lists. So, the first list will contain the chunks with
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * 1-7 free pages, the second covers 8-15, and so on. The chunks will be
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * moved between the lists as pages are freed up or allocated.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @section sec_gmm_costs Costs
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The per page cost in kernel space is 32-bit plus whatever RTR0MEMOBJ
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync * entails. In addition there is the chunk cost of approximately
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * (sizeof(RT0MEMOBJ) + sizof(CHUNK)) / 2^CHUNK_SHIFT bytes per page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * On Windows the per page #RTR0MEMOBJ cost is 32-bit on 32-bit windows
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * and 64-bit on 64-bit windows (a PFN_NUMBER in the MDL). So, 64-bit per page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The cost on Linux is identical, but here it's because of sizeof(struct page *).
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @section sec_gmm_legacy Legacy Mode for Non-Tier-1 Platforms
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * In legacy mode the page source is locked user pages and not
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * #RTR0MemObjAllocPhysNC, this means that a page can only be allocated
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * by the VM that locked it. We will make no attempt at implementing
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * page sharing on these systems, just do enough to make it all work.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @subsection sub_gmm_locking Serializing
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * One simple fast mutex will be employed in the initial implementation, not
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync * two as metioned in @ref subsec_pgmPhys_Serializing.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @see @ref subsec_pgmPhys_Serializing
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync * @section sec_gmm_overcommit Memory Over-Commitment Management
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The GVM will have to do the system wide memory over-commitment
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * management. My current ideas are:
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Per VM oc policy that indicates how much to initially commit
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * to it and what to do in a out-of-memory situation.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Prevent overtaxing the host.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * There are some challenges here, the main ones are configurability and
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * security. Should we for instance permit anyone to request 100% memory
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * commitment? Who should be allowed to do runtime adjustments of the
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * config. And how to prevent these settings from being lost when the last
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * VM process exits? The solution is probably to have an optional root
660fd430a3a8cfa16505d04c7c577acf89d45a40vboxsync * daemon the will keep VMMR0.r0 in memory and enable the security measures.
660fd430a3a8cfa16505d04c7c577acf89d45a40vboxsync * @section sec_gmm_numa NUMA
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * NUMA considerations will be designed and implemented a bit later.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The preliminary guesses is that we will have to try allocate memory as
cce6b7c4e45848f22615d68ba5865b71abbfac95vboxsync * close as possible to the CPUs the VM is executed on (EMT and additional CPU
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync * threads). Which means it's mostly about allocation and sharing policies.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * Both the scheduler and allocator interface will to supply some NUMA info
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync * and we'll need to have a way to calc access costs.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/*******************************************************************************
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync* Header Files *
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync*******************************************************************************/
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync/*******************************************************************************
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync* Structures and Typedefs *
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync*******************************************************************************/
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync/** Pointer to set of free chunks. */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync/** Pointer to a GMM allocation chunk. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The per-page tracking structure employed by the GMM.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * On 32-bit hosts we'll some trickery is necessary to compress all
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * the information into 32-bits. When the fSharedFree member is set,
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * the 30th bit decides whether it's a free page or not.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * Because of the different layout on 32-bit and 64-bit hosts, macros
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync * are used to get and set some of the data.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsynctypedef union GMMPAGE
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** Unsigned integer view. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The common view. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The page state. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The view of a private page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The guest page frame number. (Max addressable: 2 ^ 44 - 16) */
7ccd30dd4bbced565b32c255a11640cd4093abb6vboxsync /** The GVM handle. (64K VMs) */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** Reserved. */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** The page state. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The view of a shared page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The host page frame number. (Max addressable: 2 ^ 44 - 16) */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** The reference count (64K VMs). */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** Reserved. Checksum or something? Two hGVMs for forking? */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** The page state. */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** The view of a free page. */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** Reserved. Checksum or something? */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** Reserved. Checksum or something? */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** The page state. */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync#else /* 32-bit */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** Unsigned integer view. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The common view. */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** The page state. */
e190faad3061288ae099cd3ea8a858bd224c00a7vboxsync /** The view of a private page. */
0a79c9258d8fae34fa527f125009ab507561b4edvboxsync /** The guest page frame number. (Max addressable: 2 ^ 36) */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The GVM handle. (127 VMs) */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The top page state bit, MBZ. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The view of a shared page. */
65146b9eb3e96dbac286e55faa020bbfd74037d7vboxsync /** The reference count. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The page state. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The view of a free page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** Reserved. Checksum or something? */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The page state. */
d605d5391db09e6395a1c091f148f4b86af84bd3vboxsync/** Pointer to a GMMPAGE. */
#define GMM_PAGE_STATE_PRIVATE 0
typedef struct GMMCHUNKMAP
} GMMCHUNKMAP;
typedef enum GMMCHUNKTYPE
GMMCHUNKTYPE_INVALID = 0,
} GMMCHUNKTYPE;
typedef struct GMMCHUNK
} GMMCHUNK;
typedef struct GMMCHUNKTLBE
} GMMCHUNKTLBE;
typedef struct GMMCHUNKTLB
} GMMCHUNKTLB;
typedef struct GMMCHUNKFREESET
typedef struct GMM
bool fLegacyAllocationMode;
bool fBoundMemoryMode;
} GMM;
#if defined(VBOX_STRICT) && 0
# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
#if defined(VBOX_STRICT) && 0
# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
#if defined(VBOX_STRICT) && 0
# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
if (!pGMM)
return VERR_NO_MEMORY;
# if defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
return VINF_SUCCESS;
return rc;
#ifdef VBOX_WITH_PAGE_SHARING
SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
if (cPrivatePages)
while (pCur)
SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
case GMMOCPOLICY_NO_OC:
#ifndef VBOX_STRICT
unsigned cPrivate = 0;
unsigned cShared = 0;
unsigned cFree = 0;
while (iPage-- > 0)
cFree++;
cPrivate++;
cFree++;
cShared++;
SUPR0Printf("gmmR0CleanupVMScanChunk: Chunk %p/%#x has bogus stats - free=%d/%d private=%d/%d shared=%d/%d\n",
AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
return rc;
AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0InitialReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
return rc;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
cErrors++;
return cErrors;
return cErrors;
return pChunk;
return NULL;
if (pPrev)
if (pNext)
return idChunk;
AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
int rc;
if (pChunk)
LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
if (ppChunk)
return VINF_SUCCESS;
return rc;
static int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
int rc;
AssertReturn(enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS || enmChunkType == GMMCHUNKTYPE_CONTINUOUS, VERR_INVALID_PARAMETER);
return rc;
return VERR_INTERNAL_ERROR_4;
if (!pChunk)
return rc;
return VERR_INTERNAL_ERROR_5;
return rc;
return VERR_INTERNAL_ERROR_5;
return VINF_SUCCESS;
pPage->u = 0;
static int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
return VERR_GMM_HIT_GLOBAL_LIMIT;
switch (enmAccount)
case GMMACCOUNT_BASE:
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages));
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
case GMMACCOUNT_SHADOW:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
case GMMACCOUNT_FIXED:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
return VERR_GMM_SEED_ME;
return VERR_GMM_SEED_ME;
switch (enmAccount)
return VINF_SUCCESS;
GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
return rc;
unsigned iPage = 0;
AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not private! %.*Rhxs (type %d)\n", iPage, paPages[iPage].idPage, sizeof(*pPage), pPage, pPage->Common.u2State));
Log(("GMMR0AllocateHandyPages: free shared page %x cRefs=%d\n", paPages[iPage].idSharedPage, pPage->Shared.cRefs));
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
return rc;
GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
return rc;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
return rc;
GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
return rc;
return VERR_NOT_SUPPORTED;
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
return rc;
return rc;
return rc;
return VERR_NOT_SUPPORTED;
Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
return rc;
&& pGVM)
pPage, pPage - &pChunk->aPages[0], idPage, pPage->Common.u2State, pChunk->iFreeHead)); NOREF(idPage);
pPage->u = 0;
#ifdef VBOX_WITH_PAGE_SHARING
DECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
switch (enmAccount)
case GMMACCOUNT_BASE:
case GMMACCOUNT_SHADOW:
case GMMACCOUNT_FIXED:
switch (enmAccount)
return rc;
GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
return rc;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
return rc;
GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
return rc;
switch (enmAction)
case GMMBALLOONACTION_INFLATE:
/* Codepath never taken. Might be interesting in the future to request ballooned memory from guests in low memory conditions.. */
AssertFailed();
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
case GMMBALLOONACTION_DEFLATE:
cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqDeflatePages));
case GMMBALLOONACTION_RESET:
return rc;
return VINF_SUCCESS;
return rc;
return rc;
return rc;
return VINF_SUCCESS;
Log(("gmmR0MapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
return VERR_GMM_CHUNK_NOT_MAPPED;
return VERR_GMM_CHUNK_NOT_FOUND;
return VINF_SUCCESS;
return VERR_GMM_CHUNK_ALREADY_MAPPED;
int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
return VERR_NO_MEMORY;
return rc;
GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, VMCPUID idCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
return rc;
return VERR_INVALID_PARAMETER;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return rc;
return VERR_NOT_SUPPORTED;
rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
return rc;
GMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule,
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
Log(("GMMR0RegisterSharedModule %s %s base %RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
bool fNewModule = false;
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
if (!pRecVM)
if (!pRecVM)
AssertFailed();
goto end;
fNewModule = true;
PGMMSHAREDMODULE pGlobalModule = (PGMMSHAREDMODULE)RTAvlGCPtrGet(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
if (!pGlobalModule)
if (!pGlobalModule)
AssertFailed();
goto end;
for (unsigned i = 0; i < cRegions; i++)
if ( fNewModule
|| pRecVM->fCollision == true) /* colliding module unregistered and new one registerd since the last check */
Log(("GMMR0RegisterSharedModule: using existing module %s cUser=%d!\n", pszModuleName, pGlobalModule->cUsers));
goto end;
end:
return rc;
return VERR_NOT_IMPLEMENTED;
GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
AssertMsgReturn(pReq->Hdr.cbReq >= sizeof(*pReq) && pReq->Hdr.cbReq == RT_UOFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0RegisterSharedModule(pVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
GMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
Log(("GMMR0UnregisterSharedModule %s %s base=%RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
if (!pRecVM)
goto end;
/* Free the ranges, but leave the pages intact as there might still be references; they will be cleared by the COW mechanism. */
end:
return rc;
return VERR_NOT_IMPLEMENTED;
GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0UnregisterSharedModule(pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
#ifdef VBOX_WITH_PAGE_SHARING
* - if a shared page is new, then it changes the GMM page type to shared and returns it in the paPageDesc array
* - if a shared page already exists, then it checks if the VM page is identical and if so frees the VM page and returns the shared page in the paPageDesc array
GMMR0DECL(int) GMMR0SharedModuleCheckRange(PGVM pGVM, PGMMSHAREDMODULE pModule, unsigned idxRegion, unsigned cPages, PGMMSHAREDPAGEDESC paPageDesc)
AssertReturn(cPages == (pModule->aRegions[idxRegion].cbRegion >> PAGE_SHIFT), VERR_INVALID_PARAMETER);
Log(("GMMR0SharedModuleCheckRange %s base %RGv region %d cPages %d\n", pModule->szName, pModule->Core.Key, idxRegion, cPages));
pGlobalRegion->paHCPhysPageID = (uint32_t *)RTMemAlloc(cPages * sizeof(*pGlobalRegion->paHCPhysPageID));
AssertFailed();
goto end;
for (unsigned i = 0; i < cPages; i++)
for (unsigned i = 0; i < cPages; i++)
if (!pPage)
AssertFailed();
goto end;
AssertMsg(paPageDesc[i].GCPhys == (pPage->Private.pfn << 12), ("desc %RGp gmm %RGp\n", paPageDesc[i].HCPhys, (pPage->Private.pfn << 12)));
if (!pPage)
AssertFailed();
goto end;
Log(("Replace existing page guest %RGp host %RHp -> %RHp\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys, ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT));
if (pChunk)
AssertFailed();
goto end;
AssertFailed();
goto end;
/* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */
goto end;
end:
return rc;
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
return rc;
return VERR_NOT_IMPLEMENTED;
#ifdef VBOX_WITH_PAGE_SHARING
Log(("gmmR0CheckSharedModule: check %s %s base=%RGv size=%x collision=%d\n", pGlobalModule->szName, pGlobalModule->szVersion, pGlobalModule->Core.Key, pGlobalModule->cbModule, pLocalModule->fCollision));
#ifdef DEBUG_sandervl
return rc;
return VINF_SUCCESS;
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
# ifndef DEBUG_sandervl
RTAvlGCPtrDoWithAll(&pGVM->gmm.s.pSharedModuleTree, true /* fFromLeft */, gmmR0CheckSharedModule, &Info);
# ifndef DEBUG_sandervl
return rc;
return VERR_NOT_IMPLEMENTED;