GMMR0.cpp revision dfd840ec1ab0ca676c1b7fcd97a73039971b2317
f0e0bfede101cfa5fbfb4ffe85a839033657b5d7Allen Rabinovich * GMM - Global Memory Manager.
6e847ed42c6e59e53bca347d80876b6ecb5a60f7Allen Rabinovich * Copyright (C) 2007 innotek GmbH
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * This file is part of VirtualBox Open Source Edition (OSE), as
f0e0bfede101cfa5fbfb4ffe85a839033657b5d7Allen Rabinovich * available from http://www.virtualbox.org. This file is free software;
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * you can redistribute it and/or modify it under the terms of the GNU
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * General Public License (GPL) as published by the Free Software
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * Foundation, in version 2 as it comes in the "COPYING" file of the
87fff29257e74dac31f23eac9e31c028a551057fAllen Rabinovich * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich/** @page pg_gmm GMM - The Global Memory Manager
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * As the name indicates, this component is responsible for global memory
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * management. Currently only guest RAM is allocated from the GMM, but this
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * may change to include shadow page tables and other bits later.
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * Guest RAM is managed as individual pages, but allocated from the host OS
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * in chunks for reasons of portability / efficiency. To minimize the memory
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * footprint all tracking structure must be as small as possible without
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * unnecessary performance penalties.
f0e0bfede101cfa5fbfb4ffe85a839033657b5d7Allen Rabinovich * The allocation chunks has fixed sized, the size defined at compile time
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * by the #GMM_CHUNK_SIZE \#define.
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * Each chunk is given an unquie ID. Each page also has a unique ID. The
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * relation ship between the two IDs is:
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * GMM_CHUNK_SHIFT = log2(GMM_CHUNK_SIZE / PAGE_SIZE);
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * idPage = (idChunk << GMM_CHUNK_SHIFT) | iPage;
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * Where iPage is the index of the page within the chunk. This ID scheme
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * permits for efficient chunk and page lookup, but it relies on the chunk size
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * to be set at compile time. The chunks are organized in an AVL tree with their
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * IDs being the keys.
f0e0bfede101cfa5fbfb4ffe85a839033657b5d7Allen Rabinovich * The physical address of each page in an allocation chunk is maintained by
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * the #RTR0MEMOBJ and obtained using #RTR0MemObjGetPagePhysAddr. There is no
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * need to duplicate this information (it'll cost 8-bytes per page if we did).
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * So what do we need to track per page? Most importantly we need to know
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * which state the page is in:
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * - Private - Allocated for (eventually) backing one particular VM page.
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * - Shared - Readonly page that is used by one or more VMs and treated
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * as COW by PGM.
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * - Free - Not used by anyone.
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * For the page replacement operations (sharing, defragmenting and freeing)
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * to be somewhat efficient, private pages needs to be associated with a
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * particular page in a particular VM.
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * Tracking the usage of shared pages is impractical and expensive, so we'll
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * settle for a reference counting system instead.
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * Free pages will be chained on LIFOs
f0e0bfede101cfa5fbfb4ffe85a839033657b5d7Allen Rabinovich * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * systems a 32-bit bitfield will have to suffice because of address space
21257820ee962f4f2275b39088c43fcfd76f7fdeAllen Rabinovich * limitations. The #GMMPAGE structure shows the details.
9c46bf9bc0ac9bab8d409ae4ab6d2a2ddc4dc55bJeff Conniff * @section sec_gmm_alloc_strat Page Allocation Strategy
9c46bf9bc0ac9bab8d409ae4ab6d2a2ddc4dc55bJeff Conniff * The strategy for allocating pages has to take fragmentation and shared
9c46bf9bc0ac9bab8d409ae4ab6d2a2ddc4dc55bJeff Conniff * pages into account, or we may end up with with 2000 chunks with only
9c46bf9bc0ac9bab8d409ae4ab6d2a2ddc4dc55bJeff Conniff * a few pages in each. Shared pages cannot easily be reallocated because
f0e0bfede101cfa5fbfb4ffe85a839033657b5d7Allen Rabinovich * of the inaccurate usage accounting (see above). Private pages can be
e459c407a404c834e5f9759744d1a134d820d41bAllen Rabinovich * reallocated by a defragmentation thread in the same manner that sharing
#include "GMMR0Internal.h"
typedef union GMMPAGE
uint64_t u;
struct GMMPAGECOMMON
} Common;
struct GMMPAGEPRIVATE
} Private;
struct GMMPAGESHARED
} Shared;
struct GMMPAGEFREE
} Free;
uint32_t u;
struct GMMPAGECOMMON
} Common;
struct GMMPAGEPRIVATE
} Private;
struct GMMPAGESHARED
} Shared;
struct GMMPAGEFREE
} Free;
} GMMPAGE;
#define GMM_PAGE_STATE_PRIVATE 0
typedef struct GMMCHUNKMAP
} GMMCHUNKMAP;
typedef struct GMMCHUNK
} GMMCHUNK;
typedef struct GMMCHUNKTLBE
} GMMCHUNKTLBE;
typedef struct GMMCHUNKTLB
} GMMCHUNKTLB;
typedef struct GMMCHUNKFREESET
typedef struct GMM
bool fLegacyMode;
} GMM;
if (!pGMM)
return VERR_NO_MEMORY;
return VINF_SUCCESS;
return rc;
SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
if (cPrivatePages)
while (pCur)
SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
case GMMOCPOLICY_NO_OC:
#ifndef VBOX_STRICT
unsigned cPrivate = 0;
unsigned cShared = 0;
unsigned cFree = 0;
while (iPage-- > 0)
cFree++;
cPrivate++;
cFree++;
cShared++;
SUPR0Printf("gmmR0CleanupVMScanChunk: Chunk %p/%#x has bogus stats - free=%d/%d private=%d/%d shared=%d/%d\n",
AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in legacy mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
return rc;
AssertMsgReturn(pReq->Hdr.cbReq != sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0InitialReservation(pVM, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq != sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return pChunk;
return NULL;
if (pPrev)
if (pNext)
return idChunk;
AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%d\n", idChunk), NIL_GVM_HANDLE);
int rc;
if (pChunk)
return VINF_SUCCESS;
return rc;
return rc;
if (!pChunk)
return rc;
return VINF_SUCCESS;
pPage->u = 0;
static int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
return VERR_GMM_HIT_GLOBAL_LIMIT;
switch (enmAccount)
case GMMACCOUNT_BASE:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
case GMMACCOUNT_SHADOW:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
case GMMACCOUNT_FIXED:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
return VERR_GMM_SEED_ME;
return rc;
return VERR_GMM_SEED_ME;
switch (enmAccount)
return VINF_SUCCESS;
GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
unsigned iPage = 0;
AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
return rc;
GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
return rc;
pPage->u = 0;
static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
switch (enmAccount)
case GMMACCOUNT_BASE:
case GMMACCOUNT_SHADOW:
case GMMACCOUNT_FIXED:
switch (enmAccount)
return rc;
GMMR0DECL(int) GMMR0FreePages(PVM pVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
return rc;
GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages, bool fCompleted)
LogFlow(("GMMR0BalloonedPages: pVM=%p cBalloonedPages=%#x cPagestoFree=%#x paPages=%p enmAccount=%d fCompleted=%RTbool\n",
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertMsgReturn(cBalloonedPages >= 0 && cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPagesToFree >= 0 && cPagesToFree <= cBalloonedPages, ("%#x\n", cPagesToFree), VERR_INVALID_PARAMETER);
if (fCompleted)
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx; / VM: Total=%#llx Req=%#llx Actual=%#llx (completed)\n", cBalloonedPages,
pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
if (cPagesToFree)
return rc;
return GMMR0BalloonedPages(pVM, pReq->cBalloonedPages, pReq->cPagesToFree, &pReq->aPages[0], pReq->fCompleted);
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
AssertMsgReturn(cPages >= 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
Log(("GMMR0DeflatedBalloon: cBalloonedPages=%#llx cPages=%#x\n", pGVM->gmm.s.cBalloonedPages, cPages));
return rc;
return rc;
Log(("gmmR0MapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
return VERR_GMM_CHUNK_NOT_MAPPED;
return VERR_GMM_CHUNK_ALREADY_MAPPED;
int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
return VERR_NO_MEMORY;
return rc;
GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
return VERR_INVALID_PARAMETER;
return VERR_NOT_SUPPORTED;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
if (!pGVM)
return VERR_INVALID_PARAMETER;
return VERR_NOT_OWNER;
return VERR_NOT_SUPPORTED;
return rc;