GMMR0.cpp revision dfc32e468396f700d7a7baa71e622dc36776e591
5b281ba489ca18f0380d7efc7a5108b606cce449vboxsync * GMM - Global Memory Manager.
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * Copyright (C) 2007 InnoTek Systemberatung GmbH
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * available from http://www.virtualbox.org. This file is free software;
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * you can redistribute it and/or modify it under the terms of the GNU
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * General Public License as published by the Free Software Foundation,
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * distribution. VirtualBox OSE is distributed in the hope that it will
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * be useful, but WITHOUT ANY WARRANTY of any kind.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** @page pg_gmm GMM - The Global Memory Manager
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * As the name indicates, this component is responsible for global memory
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * management. Currently only guest RAM is allocated from the GMM, but this
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * may change to include shadow page tables and other bits later.
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * Guest RAM is managed as individual pages, but allocated from the host OS
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * in chunks for reasons of portability / efficiency. To minimize the memory
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * footprint all tracking structure must be as small as possible without
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * unnecessary performance penalties.
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * The allocation chunks has fixed sized, the size defined at compile time
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * by the GMM_CHUNK_SIZE \#define.
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * Each chunk is given an unquie ID. Each page also has a unique ID. The
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * relation ship between the two IDs is:
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * @verbatim
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync (idChunk << GMM_CHUNK_SHIFT) | iPage
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync @endverbatim
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * Where GMM_CHUNK_SHIFT is log2(GMM_CHUNK_SIZE / PAGE_SIZE) and iPage is
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * the index of the page within the chunk. This ID scheme permits for efficient
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * chunk and page lookup, but it relies on the chunk size to be set at compile
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * time. The chunks are organized in an AVL tree with their IDs being the keys.
0a7b20727716a00270f358a1c546473d8c36e8f3vboxsync * The physical address of each page in an allocation chunk is maintained by
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * the RTR0MEMOBJ and obtained using RTR0MemObjGetPagePhysAddr. There is no
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * need to duplicate this information (it'll cost 8-bytes per page if we did).
3aad980b92149dd95a1ab72ddb8d11d61a28ace6vboxsync * So what do we need to track per page? Most importantly we need to know what
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * state the page is in:
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Private - Allocated for (eventually) backing one particular VM page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Shared - Readonly page that is used by one or more VMs and treated
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * as COW by PGM.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Free - Not used by anyone.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * For the page replacement operations (sharing, defragmenting and freeing)
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * to be somewhat efficient, private pages needs to be associated with a
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * particular page in a particular VM.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * Tracking the usage of shared pages is impractical and expensive, so we'll
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * settle for a reference counting system instead.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * Free pages will be chained on LIFOs
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * systems a 32-bit bitfield will have to suffice because of address space
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * limitations. The GMMPAGE structure shows the details.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @section sec_gmm_alloc_strat Page Allocation Strategy
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The strategy for allocating pages has to take fragmentation and shared
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * pages into account, or we may end up with with 2000 chunks with only
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * a few pages in each. The fragmentation wrt shared pages is that unlike
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * private pages they cannot easily be reallocated. Private pages can be
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * reallocated by a defragmentation thread in the same manner that sharing
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The first approach is to manage the free pages in two sets depending on
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * whether they are mainly for the allocation of shared or private pages.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * In the initial implementation there will be almost no possibility for
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * mixing shared and private pages in the same chunk (only if we're really
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * stressed on memory), but when we implement forking of VMs and have to
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * deal with lots of COW pages it'll start getting kind of interesting.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The sets are lists of chunks with approximately the same number of
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * free pages. Say the chunk size is 1MB, meaning 256 pages, and a set
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * consists of 16 lists. So, the first list will contain the chunks with
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * 1-7 free pages, the second covers 8-15, and so on. The chunks will be
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * moved between the lists as pages are freed up or allocated.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @section sec_gmm_costs Costs
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The per page cost in kernel space is 32-bit plus whatever RTR0MEMOBJ
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * entails. In addition there is the chunk cost of approximately
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * (sizeof(RT0MEMOBJ) + sizof(CHUNK)) / 2^CHUNK_SHIFT bytes per page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * On Windows the per page RTR0MEMOBJ cost is 32-bit on 32-bit windows
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * and 64-bit on 64-bit windows (a PFN_NUMBER in the MDL). So, 64-bit per page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The cost on Linux is identical, but here it's because of sizeof(struct page *).
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @section sec_gmm_legacy Legacy Mode for Non-Tier-1 Platforms
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * In legacy mode the page source is locked user pages and not
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * RTR0MemObjAllocPhysNC, this means that a page can only be allocated
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * by the VM that locked it. We will make no attempt at implementing
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * page sharing on these systems, just do enough to make it all work.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @subsection sub_gmm_locking Serializing
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync * One simple fast mutex will be employed in the initial implementation, not
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync * two as metioned in @ref subsec_pgmPhys_Serializing.
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync * @see subsec_pgmPhys_Serializing
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @section sec_gmm_overcommit Memory Over-Commitment Management
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The GVM will have to do the system wide memory over-commitment
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * management. My current ideas are:
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Per VM oc policy that indicates how much to initially commit
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * to it and what to do in a out-of-memory situation.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * - Prevent overtaxing the host.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * There are some challenges here, the main ones are configurability and
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * security. Should we for instance permit anyone to request 100% memory
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * commitment? Who should be allowed to do runtime adjustments of the
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync * config. And how to prevent these settings from being lost when the last
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync * VM process exits? The solution is probably to have an optional root
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * daemon the will keep VMMR0.r0 in memory and enable the security measures.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * This will not be implemented this week. :-)
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/*******************************************************************************
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync* Header Files *
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync*******************************************************************************/
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/*******************************************************************************
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync* Structures and Typedefs *
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync*******************************************************************************/
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** Pointer to set of free chunks. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** Pointer to a GMM allocation chunk. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The per-page tracking structure employed by the GMM.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * On 32-bit hosts we'll some trickery is necessary to compress all
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * the information into 32-bits. When the fSharedFree member is set,
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * the 30th bit decides whether it's a free page or not.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * Because of the different layout on 32-bit and 64-bit hosts, macros
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * are used to get and set some of the data.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsynctypedef union GMMPAGE
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** Unsigned integer view. */
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync /** The common view. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The page state. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The view of a private page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The guest page frame number. (Max addressable: 2 ^ 44 - 16) */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The GVM handle. (64K VMs) */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** Reserved. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The page state. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The view of a shared page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The reference count. */
8fdb63a0d23d1618724f651b8c3d11be48b44d35vboxsync /** Reserved. Checksum or something? Two hGVMs for forking? */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The page state. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The view of a free page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The index of the next page in the free list. */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** Reserved. Checksum or something? */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The page state. */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync#else /* 32-bit */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** Unsigned integer view. */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The common view. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The page state. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The view of a private page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The guest page frame number. (Max addressable: 2 ^ 36) */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The GVM handle. (127 VMs) */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync /** The top page state bit, MBZ. */
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync /** The view of a shared page. */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The reference count. */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The page state. */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The view of a free page. */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The index of the next page in the free list. */
7e10aea6606a51d35041e5a85f9e4f1bd19c4062vboxsync /** The page state. */
89dfdbb56cf9dddad3c7685b41bda1e4e4c1d6f9vboxsync/** Pointer to a GMMPAGE. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** @name The Page States.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** A private page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** A private page - alternative value used on the 32-bit implemenation.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * This will never be used on 64-bit hosts. */
0c0ad8688637e97e7a78047fcc046fa8b3655951vboxsync/** A shared page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** A free page. */
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** @def GMM_PAGE_IS_PRIVATE
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @returns true if free, false if not.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @param pPage The GMM page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_PRIVATE )
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Private.fZero == 0 )
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** @def GMM_PAGE_IS_FREE
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @returns true if free, false if not.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @param pPage The GMM page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync#define GMM_PAGE_IS_SHARED(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_SHARED )
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** @def GMM_PAGE_IS_FREE
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @returns true if free, false if not.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @param pPage The GMM page.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync#define GMM_PAGE_IS_FREE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_FREE )
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync/** @def GMM_PAGE_PFN_END
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * The end of the the valid guest pfn range, {0..GMM_PAGE_PFN_END-1}.
9e5c26690d45216629b5f588aced8fcfb68c23b6vboxsync * @remark Some of the values outside the range has special meaning, see related \#defines.
typedef struct GMMCHUNKMAP
} GMMCHUNKMAP;
typedef struct GMMCHUNK
} GMMCHUNK;
typedef struct GMMCHUNKTLBE
} GMMCHUNKTLBE;
typedef struct GMMCHUNKTLB
} GMMCHUNKTLB;
typedef struct GMMCHUNKFREESET
typedef struct GMM
bool fLegacyMode;
} GMM;
if (!pGMM)
return VERR_NO_MEMORY;
return VINF_SUCCESS;
return rc;
else if (0)//pGVM->gmm.s.cPrivatePages)
RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0FreeVMPagesInChunk, (void *)pGVM->hSelf);
// pGVM->gmm.s.cBasePages = 0;
// pGVM->gmm.s.cPrivatePages = 0;
// pGVM->gmm.s.cSharedPages = 0;
#ifndef VBOx_STRICT
unsigned cPrivate = 0;
unsigned cShared = 0;
unsigned cFree = 0;
while (iPage-- > 0)
cFree++;
cPrivate++;
cFree++;
cShared++;
GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
return rc;
AssertMsgReturn(pReq->Hdr.cbReq != sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0InitialReservation(pVM, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq != sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return NULL;
return NULL;
if (pPrev)
if (pNext)
return idChunk;
AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%d\n", idChunk), NIL_GVM_HANDLE);
int rc;
if (pChunk)
return VINF_SUCCESS;
return rc;
return rc;
if (!pChunk)
return rc;
return VINF_SUCCESS;
pPage->u = 0;
static int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
return VERR_GMM_HIT_GLOBAL_LIMIT;
switch (enmAccount)
case GMMACCOUNT_BASE:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
case GMMACCOUNT_SHADOW:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
case GMMACCOUNT_FIXED:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
return VERR_GMM_SEED_ME;
return rc;
return VERR_GMM_SEED_ME;
switch (enmAccount)
return VINF_SUCCESS;
GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
unsigned iPage = 0;
AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
return rc;
GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
return rc;
pPage->u = 0;
static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
switch (enmAccount)
case GMMACCOUNT_BASE:
case GMMACCOUNT_SHADOW:
case GMMACCOUNT_FIXED:
switch (enmAccount)
return rc;
GMMR0DECL(int) GMMR0FreePages(PVM pVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
return rc;
GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages, bool fCompleted)
LogFlow(("GMMR0BalloonedPages: pVM=%p cBalloonedPages=%#x cPagestoFree=%#x paPages=%p enmAccount=%d fCompleted=%RTbool\n",
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertMsgReturn(cBalloonedPages >= 0 && cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPagesToFree >= 0 && cPagesToFree <= cBalloonedPages, ("%#x\n", cPagesToFree), VERR_INVALID_PARAMETER);
if (fCompleted)
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx; / VM: Total=%#llx Req=%#llx Actual=%#llx (completed)\n", cBalloonedPages,
pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
if (cPagesToFree)
return rc;
return GMMR0BalloonedPages(pVM, pReq->cBalloonedPages, pReq->cPagesToFree, &pReq->aPages[0], pReq->fCompleted);
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertMsgReturn(cPages >= 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
Log(("GMMR0DeflatedBalloon: cBalloonedPages=%#llx cPages=%#x\n", pGVM->gmm.s.cBalloonedPages, cPages));
return rc;
return rc;
Log(("gmmR0MapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
return VERR_GMM_CHUNK_NOT_MAPPED;
return VERR_GMM_CHUNK_ALREADY_MAPPED;
int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
return VERR_NO_MEMORY;
return rc;
GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
return VERR_INVALID_PARAMETER;
return VERR_NOT_SUPPORTED;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
return VERR_NOT_SUPPORTED;
return rc;