GMMR0.cpp revision 475cb6dc1deb85a3e1996ba8671c05330095c645
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * GMM - Global Memory Manager.
c58f1213e628a545081c70e26c6b67a841cff880vboxsync * Copyright (C) 2007 Oracle Corporation
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * available from http://www.virtualbox.org. This file is free software;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * you can redistribute it and/or modify it under the terms of the GNU
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * General Public License (GPL) as published by the Free Software
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** @page pg_gmm GMM - The Global Memory Manager
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * As the name indicates, this component is responsible for global memory
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * management. Currently only guest RAM is allocated from the GMM, but this
43747b1f0bc8302a238fb35e55857a5e9aa1933dvboxsync * may change to include shadow page tables and other bits later.
43747b1f0bc8302a238fb35e55857a5e9aa1933dvboxsync * Guest RAM is managed as individual pages, but allocated from the host OS
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * in chunks for reasons of portability / efficiency. To minimize the memory
b0db50948c349fa76655abf252f7946b515e8204vboxsync * footprint all tracking structure must be as small as possible without
2f0d866e126dd288169fed591c259c1c6b4016e5vboxsync * unnecessary performance penalties.
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * The allocation chunks has fixed sized, the size defined at compile time
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * by the #GMM_CHUNK_SIZE \#define.
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * Each chunk is given an unquie ID. Each page also has a unique ID. The
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * relation ship between the two IDs is:
f5e53763b0a581b0299e98028c6c52192eb06785vboxsync * GMM_CHUNK_SHIFT = log2(GMM_CHUNK_SIZE / PAGE_SIZE);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * idPage = (idChunk << GMM_CHUNK_SHIFT) | iPage;
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync * Where iPage is the index of the page within the chunk. This ID scheme
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync * permits for efficient chunk and page lookup, but it relies on the chunk size
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * to be set at compile time. The chunks are organized in an AVL tree with their
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * IDs being the keys.
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * The physical address of each page in an allocation chunk is maintained by
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * the #RTR0MEMOBJ and obtained using #RTR0MemObjGetPagePhysAddr. There is no
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * need to duplicate this information (it'll cost 8-bytes per page if we did).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * So what do we need to track per page? Most importantly we need to know
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * which state the page is in:
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * - Private - Allocated for (eventually) backing one particular VM page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * - Shared - Readonly page that is used by one or more VMs and treated
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * as COW by PGM.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * - Free - Not used by anyone.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * For the page replacement operations (sharing, defragmenting and freeing)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * to be somewhat efficient, private pages needs to be associated with a
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * particular page in a particular VM.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Tracking the usage of shared pages is impractical and expensive, so we'll
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * settle for a reference counting system instead.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Free pages will be chained on LIFOs
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * systems a 32-bit bitfield will have to suffice because of address space
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * limitations. The #GMMPAGE structure shows the details.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @section sec_gmm_alloc_strat Page Allocation Strategy
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * The strategy for allocating pages has to take fragmentation and shared
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * pages into account, or we may end up with with 2000 chunks with only
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * a few pages in each. Shared pages cannot easily be reallocated because
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * of the inaccurate usage accounting (see above). Private pages can be
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * reallocated by a defragmentation thread in the same manner that sharing
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * The first approach is to manage the free pages in two sets depending on
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * whether they are mainly for the allocation of shared or private pages.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * In the initial implementation there will be almost no possibility for
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * mixing shared and private pages in the same chunk (only if we're really
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * stressed on memory), but when we implement forking of VMs and have to
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * deal with lots of COW pages it'll start getting kind of interesting.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * The sets are lists of chunks with approximately the same number of
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * free pages. Say the chunk size is 1MB, meaning 256 pages, and a set
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * consists of 16 lists. So, the first list will contain the chunks with
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * 1-7 free pages, the second covers 8-15, and so on. The chunks will be
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * moved between the lists as pages are freed up or allocated.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @section sec_gmm_costs Costs
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * The per page cost in kernel space is 32-bit plus whatever RTR0MEMOBJ
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * entails. In addition there is the chunk cost of approximately
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * (sizeof(RT0MEMOBJ) + sizof(CHUNK)) / 2^CHUNK_SHIFT bytes per page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * On Windows the per page #RTR0MEMOBJ cost is 32-bit on 32-bit windows
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * and 64-bit on 64-bit windows (a PFN_NUMBER in the MDL). So, 64-bit per page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * The cost on Linux is identical, but here it's because of sizeof(struct page *).
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync * @section sec_gmm_legacy Legacy Mode for Non-Tier-1 Platforms
65697a26b524640b83828b715160c798c43a0424vboxsync * In legacy mode the page source is locked user pages and not
65697a26b524640b83828b715160c798c43a0424vboxsync * #RTR0MemObjAllocPhysNC, this means that a page can only be allocated
6f76f373e9274aa29ec1485be3bfebafb670af0evboxsync * by the VM that locked it. We will make no attempt at implementing
6f76f373e9274aa29ec1485be3bfebafb670af0evboxsync * page sharing on these systems, just do enough to make it all work.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * @subsection sub_gmm_locking Serializing
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * One simple fast mutex will be employed in the initial implementation, not
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * two as metioned in @ref subsec_pgmPhys_Serializing.
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync * @see @ref subsec_pgmPhys_Serializing
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @section sec_gmm_overcommit Memory Over-Commitment Management
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * The GVM will have to do the system wide memory over-commitment
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * management. My current ideas are:
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * - Per VM oc policy that indicates how much to initially commit
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * to it and what to do in a out-of-memory situation.
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * - Prevent overtaxing the host.
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * There are some challenges here, the main ones are configurability and
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * security. Should we for instance permit anyone to request 100% memory
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * commitment? Who should be allowed to do runtime adjustments of the
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * config. And how to prevent these settings from being lost when the last
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * VM process exits? The solution is probably to have an optional root
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * daemon the will keep VMMR0.r0 in memory and enable the security measures.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * @section sec_gmm_numa NUMA
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * NUMA considerations will be designed and implemented a bit later.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * The preliminary guesses is that we will have to try allocate memory as
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * close as possible to the CPUs the VM is executed on (EMT and additional CPU
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * threads). Which means it's mostly about allocation and sharing policies.
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync * Both the scheduler and allocator interface will to supply some NUMA info
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync * and we'll need to have a way to calc access costs.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync/*******************************************************************************
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync* Header Files *
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync*******************************************************************************/
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync/*******************************************************************************
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync* Structures and Typedefs *
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync*******************************************************************************/
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync/** Pointer to set of free chunks. */
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync/** Pointer to a GMM allocation chunk. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * The per-page tracking structure employed by the GMM.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * On 32-bit hosts we'll some trickery is necessary to compress all
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * the information into 32-bits. When the fSharedFree member is set,
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * the 30th bit decides whether it's a free page or not.
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * Because of the different layout on 32-bit and 64-bit hosts, macros
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * are used to get and set some of the data.
93f91841f87620d1cb6d0238b3d0d5e52cd3b9a4vboxsynctypedef union GMMPAGE
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** Unsigned integer view. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The common view. */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync /** The page state. */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync /** The view of a private page. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The guest page frame number. (Max addressable: 2 ^ 44 - 16) */
65697a26b524640b83828b715160c798c43a0424vboxsync /** The GVM handle. (64K VMs) */
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync /** Reserved. */
65697a26b524640b83828b715160c798c43a0424vboxsync /** The page state. */
65697a26b524640b83828b715160c798c43a0424vboxsync /** The view of a shared page. */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync /** The host page frame number. (Max addressable: 2 ^ 44 - 16) */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync /** The reference count (64K VMs). */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync /** Reserved. Checksum or something? Two hGVMs for forking? */
65697a26b524640b83828b715160c798c43a0424vboxsync /** The page state. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The view of a free page. */
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync /** Reserved. Checksum or something? */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** Reserved. Checksum or something? */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The page state. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#else /* 32-bit */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** Unsigned integer view. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The common view. */
e18d16f31765f000e2a4a111a3df6d211cd825f1vboxsync /** The page state. */
e18d16f31765f000e2a4a111a3df6d211cd825f1vboxsync /** The view of a private page. */
5761ae063fc6a6dcc410a6b56ddc286ba16855c8vboxsync /** The guest page frame number. (Max addressable: 2 ^ 36) */
5761ae063fc6a6dcc410a6b56ddc286ba16855c8vboxsync /** The GVM handle. (127 VMs) */
5761ae063fc6a6dcc410a6b56ddc286ba16855c8vboxsync /** The top page state bit, MBZ. */
5761ae063fc6a6dcc410a6b56ddc286ba16855c8vboxsync /** The view of a shared page. */
5761ae063fc6a6dcc410a6b56ddc286ba16855c8vboxsync /** The reference count. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The page state. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The view of a free page. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync /** Reserved. Checksum or something? */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The page state. */
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync/** Pointer to a GMMPAGE. */
64f58e4154eaa20c47782b429eeaff09070369bfvboxsync/** @name The Page States.
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync/** A private page. */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync/** A private page - alternative value used on the 32-bit implemenation.
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync * This will never be used on 64-bit hosts. */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync/** A shared page. */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync/** A free page. */
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync/** @def GMM_PAGE_IS_PRIVATE
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync * @returns true if private, false if not.
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync * @param pPage The GMM page.
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_PRIVATE )
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Private.fZero == 0 )
eb8371e8297aff06a1982468835ff62e1ab77b30vboxsync/** @def GMM_PAGE_IS_SHARED
7ccfefe49db4cd93c3701d7b60873ebf404b5b87vboxsync * @returns true if shared, false if not.
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * @param pPage The GMM page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#define GMM_PAGE_IS_SHARED(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_SHARED )
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** @def GMM_PAGE_IS_FREE
65697a26b524640b83828b715160c798c43a0424vboxsync * @returns true if free, false if not.
65697a26b524640b83828b715160c798c43a0424vboxsync * @param pPage The GMM page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync#define GMM_PAGE_IS_FREE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_FREE )
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync/** @def GMM_PAGE_PFN_LAST
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * The last valid guest pfn range.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * @remark Some of the values outside the range has special meaning,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * see GMM_PAGE_PFN_UNSHAREABLE.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncAssertCompile(GMM_PAGE_PFN_LAST == (GMM_GCPHYS_LAST >> PAGE_SHIFT));
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync/** @def GMM_PAGE_PFN_UNSHAREABLE
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * Indicates that this page isn't used for normal guest memory and thus isn't shareable.
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0xfffffff1)
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0x00fffff1)
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsyncAssertCompile(GMM_PAGE_PFN_UNSHAREABLE == (GMM_GCPHYS_UNSHAREABLE >> PAGE_SHIFT));
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync * A GMM allocation chunk ring-3 mapping record.
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync * This should really be associated with a session and not a VM, but
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync * it's simpler to associated with a VM and cleanup with the VM object
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync * is destroyed.
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsynctypedef struct GMMCHUNKMAP
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync /** The mapping object. */
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync /** The VM owning the mapping. */
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync/** Pointer to a GMM allocation chunk mapping. */
a9f41cb889f53e8407561a6155052c441eb0fc5fvboxsync GMMCHUNKTYPE_CONTINUOUS = 2, /* one 2 MB continuous physical range. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * A GMM allocation chunk.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsynctypedef struct GMMCHUNK
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The AVL node core.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * The Key is the chunk ID. */
a9f41cb889f53e8407561a6155052c441eb0fc5fvboxsync /** The memory object.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * Either from RTR0MemObjAllocPhysNC or RTR0MemObjLockUser depending on
0dd6dfbebcda0af90da4413aaea5f3b9d1817556vboxsync * what the host can dish up with. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** Pointer to the next chunk in the free list. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** Pointer to the previous chunk in the free list. */
a9f41cb889f53e8407561a6155052c441eb0fc5fvboxsync /** Pointer to the free set this chunk belongs to. NULL for
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * chunks with no free pages. */
0dd6dfbebcda0af90da4413aaea5f3b9d1817556vboxsync /** Pointer to an array of mappings. */
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync /** The number of mappings. */
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync /** The head of the list of free pages. UINT16_MAX is the NIL value. */
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync /** The number of free pages. */
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync /** The GVM handle of the VM that first allocated pages from this chunk, this
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync * is used as a preference when there are several chunks to choose from.
4569bf0ad094b40d2e177299a00d37e94d28616cvboxsync * When in bound memory mode this isn't a preference any longer. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The number of private pages. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The number of shared pages. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** Chunk type */
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsync /** The pages. */
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsync * An allocation chunk TLB entry.
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsynctypedef struct GMMCHUNKTLBE
c2f2661efd8da5281e2a3af6ddd10e737d333909vboxsync /** The chunk id. */
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsync /** Pointer to the chunk. */
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsync/** Pointer to an allocation chunk TLB entry. */
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsync/** The number of entries tin the allocation chunk TLB. */
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsync/** Gets the TLB entry index for the given Chunk ID. */
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsync#define GMM_CHUNKTLB_IDX(idChunk) ( (idChunk) & (GMM_CHUNKTLB_ENTRIES - 1) )
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsync * An allocation chunk TLB.
fc78e01f665145ab3641c5f8095e9ae984ddcb84vboxsynctypedef struct GMMCHUNKTLB
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The TLB entries. */
36411046d85fccaa66061120a064225fd1b5ae01vboxsync/** Pointer to an allocation chunk TLB. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** The GMMCHUNK::cFree shift count. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** The GMMCHUNK::cFree mask for use when considering relinking a chunk. */
65697a26b524640b83828b715160c798c43a0424vboxsync/** The number of lists in set. */
65697a26b524640b83828b715160c798c43a0424vboxsync#define GMM_CHUNK_FREE_SET_LISTS (GMM_CHUNK_NUM_PAGES >> GMM_CHUNK_FREE_SET_SHIFT)
65697a26b524640b83828b715160c798c43a0424vboxsync * A set of free chunks.
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync /** The number of free pages in the set. */
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync /** Chunks ordered by increasing number of free pages. */
6f76f373e9274aa29ec1485be3bfebafb670af0evboxsync * The GMM instance data.
6f76f373e9274aa29ec1485be3bfebafb670af0evboxsynctypedef struct GMM
65697a26b524640b83828b715160c798c43a0424vboxsync /** Magic / eye catcher. GMM_MAGIC */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync /** The fast mutex protecting the GMM.
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync * More fine grained locking can be implemented later if necessary. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The chunk tree. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The chunk TLB. */
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync /** The private free set. */
96a7e06717e2d7398642eadb5ebab1bf13fbe2dbvboxsync /** The shared free set. */
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync /** Shared module tree (global). */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** todo seperate trees for distinctly different guest OSes. */
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync /** The maximum number of pages we're allowed to allocate.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @gcfgm 64-bit GMM/MaxPages Direct.
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync * @gcfgm 32-bit GMM/PctPages Relative to the number of host pages. */
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync /** The number of pages that has been reserved.
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync * The deal is that cReservedPages - cOverCommittedPages <= cMaxPages. */
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync /** The number of pages that we have over-committed in reservations. */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /** The number of actually allocated (committed if you like) pages. */
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync /** The number of pages that are shared. A subset of cAllocatedPages. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The number of pages that are shared that has been left behind by
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * VMs not doing proper cleanups. */
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync /** The number of allocation chunks.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * (The number of pages we've allocated from the host can be derived from this.) */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /** The number of current ballooned pages. */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /** The legacy allocation mode indicator.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This is determined at initialization time. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The bound memory mode indicator.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * When set, the memory will be bound to a specific VM and never
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * shared. This is always set if fLegacyAllocationMode is set.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * (Also determined at initialization time.) */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /** The number of registered VMs. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** The previous allocated Chunk ID.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Used as a hint to avoid scanning the whole bitmap. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** Chunk ID allocation bitmap.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Bits of allocated IDs are set, free ones are clear.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * The NIL id (0) is marked allocated. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uint32_t bmChunkId[(GMM_CHUNKID_LAST + 1 + 31) / 32];
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** Pointer to the GMM instance. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** The value of GMM::u32Magic (Katsuhiro Otomo). */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/*******************************************************************************
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync* Global Variables *
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync*******************************************************************************/
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** Pointer to the GMM instance data. */
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync/** Macro for obtaining and validating the g_pGMM pointer.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * On failure it will return from the invoking function with the specified return value.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @param pGMM The name of the pGMM variable.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * VBox status codes.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync AssertMsgReturn((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic), (rc)); \
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync } while (0)
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync/** Macro for obtaining and validating the g_pGMM pointer, void function variant.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * On failure it will return from the invoking function.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @param pGMM The name of the pGMM variable.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsgReturnVoid((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic)); \
36411046d85fccaa66061120a064225fd1b5ae01vboxsync } while (0)
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync/** @def GMM_CHECK_SANITY_UPON_ENTERING
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * Checks the sanity of the GMM instance data before making changes.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * This is macro is a stub by default and must be enabled manually in the code.
36411046d85fccaa66061120a064225fd1b5ae01vboxsync * @returns true if sane, false if not.
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync * @param pGMM The name of the pGMM variable.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync#if defined(VBOX_STRICT) && 0
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (true)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** @def GMM_CHECK_SANITY_UPON_LEAVING
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Checks the sanity of the GMM instance data after making changes.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This is macro is a stub by default and must be enabled manually in the code.
b0db50948c349fa76655abf252f7946b515e8204vboxsync * @returns true if sane, false if not.
b0db50948c349fa76655abf252f7946b515e8204vboxsync * @param pGMM The name of the pGMM variable.
b0db50948c349fa76655abf252f7946b515e8204vboxsync#if defined(VBOX_STRICT) && 0
b0db50948c349fa76655abf252f7946b515e8204vboxsync# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
b0db50948c349fa76655abf252f7946b515e8204vboxsync/** @def GMM_CHECK_SANITY_IN_LOOPS
b0db50948c349fa76655abf252f7946b515e8204vboxsync * Checks the sanity of the GMM instance in the allocation loops.
b0db50948c349fa76655abf252f7946b515e8204vboxsync * This is macro is a stub by default and must be enabled manually in the code.
b0db50948c349fa76655abf252f7946b515e8204vboxsync * @returns true if sane, false if not.
b0db50948c349fa76655abf252f7946b515e8204vboxsync * @param pGMM The name of the pGMM variable.
b0db50948c349fa76655abf252f7946b515e8204vboxsync#if defined(VBOX_STRICT) && 0
b0db50948c349fa76655abf252f7946b515e8204vboxsync# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
b0db50948c349fa76655abf252f7946b515e8204vboxsync/*******************************************************************************
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync* Internal Functions *
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync*******************************************************************************/
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGMM);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM);
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM);
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsyncDECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
b0db50948c349fa76655abf252f7946b515e8204vboxsyncDECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk);
b0db50948c349fa76655abf252f7946b515e8204vboxsyncstatic uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo);
b0db50948c349fa76655abf252f7946b515e8204vboxsyncstatic void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
b0db50948c349fa76655abf252f7946b515e8204vboxsyncstatic void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsyncstatic int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * Initializes the GMM component.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * This is called when the VMMR0.r0 module is loaded and protected by the
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * loader semaphore.
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Allocate the instance data and the lock(s).
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
a39ea3668b7019c23a68936259545f9b71bce1aavboxsync * Check and see if RTR0MemObjAllocPhysNC works.
da3503c04ce76e653401396fe2795a9bc2427a1dvboxsync#if 0 /* later, see #3170. */
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync# if defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync * Query system page count and guess a reasonable cMaxPages value.
5cece736b63cc917f3781c8c7e46fd3ea98c4e20vboxsync pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
5cece736b63cc917f3781c8c7e46fd3ea98c4e20vboxsync * Terminates the GMM component.
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * Take care / be paranoid...
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync SUPR0Printf("GMMR0Term: u32Magic=%#x\n", pGMM->u32Magic);
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * Undo what init did and free all the resources we've acquired.
36411046d85fccaa66061120a064225fd1b5ae01vboxsync /* Destroy the fundamentals. */
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync /* free any chunks still hanging around. */
36411046d85fccaa66061120a064225fd1b5ae01vboxsync RTAvlU32Destroy(&pGMM->pChunks, gmmR0TermDestroyChunk, pGMM);
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync /* finally the instance data itself. */
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * RTAvlU32Destroy callback.
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * @returns 0
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * @param pNode The node to destroy.
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * @param pvGMM The GMM handle.
71f6a34b72f9cc873da208630959de49df1a28a5vboxsyncstatic DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM)
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
0c4004948fca34f2db87e7b38013137e9472c306vboxsync SUPR0Printf("GMMR0Term: %p/%#x: cFree=%d cPrivate=%d cShared=%d cMappings=%d\n", pChunk,
0c4004948fca34f2db87e7b38013137e9472c306vboxsync pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappings);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync SUPR0Printf("GMMR0Term: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
0c4004948fca34f2db87e7b38013137e9472c306vboxsync pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * Initializes the per-VM data for the GMM.
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * This is called from within the GVMM lock (from GVMMR0CreateVM)
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * and should only initialize the data members so GMMR0CleanupVM
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * can deal with them. We reserve no memory or anything here,
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * that's done later in GMMR0InitVM.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @param pGVM Pointer to the Global VM structure.
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync AssertCompile(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding));
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * Cleans up when a VM is terminating.
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * @param pGVM Pointer to the Global VM structure.
0c4004948fca34f2db87e7b38013137e9472c306vboxsync LogFlow(("GMMR0CleanupVM: pGVM=%p:{.pVM=%p, .hSelf=%#x}\n", pGVM, pGVM->pVM, pGVM->hSelf));
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync /* Clean up all registered shared modules. */
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * The policy is 'INVALID' until the initial reservation
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync * request has been serviced.
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * If it's the last VM around, we can skip walking all the chunk looking
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * for the pages owned by this VM and instead flush the whole shebang.
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * This takes care of the eventuality that a VM has left shared page
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * references behind (shouldn't happen of course, but you never know).
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync#if 0 /* disabled so it won't hide bugs. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync RTAvlU32Destroy(&pGMM->pChunks, gmmR0CleanupVMDestroyChunk, pGMM);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync memset(&pGMM->bmChunkId[0], 0, sizeof(pGMM->bmChunkId));
1e1273b11e17928ec3c3a8fff45121aa7a169413vboxsync * Walk the entire pool looking for pages that belongs to this VM
1e1273b11e17928ec3c3a8fff45121aa7a169413vboxsync * and left over mappings. (This'll only catch private pages, shared
1e1273b11e17928ec3c3a8fff45121aa7a169413vboxsync * pages will be 'left behind'.)
1e1273b11e17928ec3c3a8fff45121aa7a169413vboxsync uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
1e1273b11e17928ec3c3a8fff45121aa7a169413vboxsync RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0CleanupVMScanChunk, pGVM);
1e1273b11e17928ec3c3a8fff45121aa7a169413vboxsync SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
1e1273b11e17928ec3c3a8fff45121aa7a169413vboxsync /* free empty chunks. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PGMMCHUNK pCur = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* account for shared pages that weren't freed. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages);
36411046d85fccaa66061120a064225fd1b5ae01vboxsync SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
36411046d85fccaa66061120a064225fd1b5ae01vboxsync pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages;
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync * Update the over-commitment management statistics.
cab115cfa31c584def7069312a1e23c3fc88533bvboxsync pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
4c42ef40805493bd6d76103b90f71bcd6dbc0f00vboxsync /** @todo Update GMM->cOverCommittedPages */
4c42ef40805493bd6d76103b90f71bcd6dbc0f00vboxsync /* zap the GVM data. */
36411046d85fccaa66061120a064225fd1b5ae01vboxsync * RTAvlU32DoWithAll callback.
36411046d85fccaa66061120a064225fd1b5ae01vboxsync * @returns 0
36411046d85fccaa66061120a064225fd1b5ae01vboxsync * @param pNode The node to search.
36411046d85fccaa66061120a064225fd1b5ae01vboxsync * @param pvGVM Pointer to the shared VM structure.
6f76f373e9274aa29ec1485be3bfebafb670af0evboxsyncstatic DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGVM)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Look for pages belonging to the VM.
4c42ef40805493bd6d76103b90f71bcd6dbc0f00vboxsync * (Perform some internal checks while we're scanning.)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
da3503c04ce76e653401396fe2795a9bc2427a1dvboxsync unsigned cPrivate = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync unsigned cShared = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync unsigned cFree = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync gmmR0UnlinkChunk(pChunk); /* avoiding cFreePages updates. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync while (iPage-- > 0)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Free the page.
da3503c04ce76e653401396fe2795a9bc2427a1dvboxsync * The reason for not using gmmR0FreePrivatePage here is that we
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * must *not* cause the chunk to be freed from under us - we're in
cba6719bd64ec749967bbe931230452664109857vboxsync * an AVL tree walk here.
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync pChunk->aPages[iPage].Free.iNext = pChunk->iFreeHead;
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
0a3cfc177eb2cca0805ca23cf5f98b0080b2ec21vboxsync * Did it add up?
e18d16f31765f000e2a4a111a3df6d211cd825f1vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: Chunk %p/%#x has bogus stats - free=%d/%d private=%d/%d shared=%d/%d\n",
e18d16f31765f000e2a4a111a3df6d211cd825f1vboxsync pChunk->cFree, cFree, pChunk->cPrivate, cPrivate, pChunk->cShared, cShared);
ea779b55cc87f3e3fadddca4672c6697c82606edvboxsync * Look for the mapping belonging to the terminating VM.
5cece736b63cc917f3781c8c7e46fd3ea98c4e20vboxsync pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
5cece736b63cc917f3781c8c7e46fd3ea98c4e20vboxsync pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
5cece736b63cc917f3781c8c7e46fd3ea98c4e20vboxsync int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */);
5cece736b63cc917f3781c8c7e46fd3ea98c4e20vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * If not in bound memory mode, we should reset the hGVM field
c91345f92b829d3fba05ce7a97206d83c5183ce0vboxsync * if it has our handle in it.
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n",
36411046d85fccaa66061120a064225fd1b5ae01vboxsync AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
71f6a34b72f9cc873da208630959de49df1a28a5vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync * RTAvlU32Destroy callback for GMMR0CleanupVM.
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync * @returns 0
f4aad55f8addd816ef005845842a2418bbdc3ea2vboxsync * @param pNode The node (allocation chunk) to destroy.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvGVM Pointer to the shared VM structure.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM)
5b465a7c1237993faf8bb50120d247f3f0319adavboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: pGVM=%p exepcted %p\n", pChunk,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pChunk->Core.Key, i, pChunk->paMappings[i].pGVM, pGVM);
ad77e3ec3cde24263bc7537575f5cae442bee3b1vboxsync int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
ad77e3ec3cde24263bc7537575f5cae442bee3b1vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pChunk->Core.Key, i, pChunk->paMappings[i].MapObj, rc);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * The initial resource reservations.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This will make memory reservations according to policy and priority. If there aren't
cba6719bd64ec749967bbe931230452664109857vboxsync * sufficient resources available to sustain the VM this function will fail and all
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * future allocations requests will fail as well.
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync * These are just the initial reservations made very very early during the VM creation
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * process and will be adjusted later in the GMMR0UpdateReservation call after the
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * ring-3 init has completed.
cba6719bd64ec749967bbe931230452664109857vboxsync * @returns VBox status code.
cba6719bd64ec749967bbe931230452664109857vboxsync * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
cba6719bd64ec749967bbe931230452664109857vboxsync * @retval VERR_GMM_
GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
return rc;
AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0InitialReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
return rc;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
cErrors++;
return cErrors;
return cErrors;
return pChunk;
return NULL;
if (pPrev)
if (pNext)
return idChunk;
AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
int rc;
if (pChunk)
LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
if (ppChunk)
return VINF_SUCCESS;
return rc;
static int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
int rc;
AssertReturn(enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS || enmChunkType == GMMCHUNKTYPE_CONTINUOUS, VERR_INVALID_PARAMETER);
return rc;
return VERR_INTERNAL_ERROR_4;
if (!pChunk)
return rc;
return VERR_INTERNAL_ERROR_5;
return rc;
return VERR_INTERNAL_ERROR_5;
return VINF_SUCCESS;
pPage->u = 0;
static int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
return VERR_GMM_HIT_GLOBAL_LIMIT;
switch (enmAccount)
case GMMACCOUNT_BASE:
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages));
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
case GMMACCOUNT_SHADOW:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
case GMMACCOUNT_FIXED:
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
return VERR_GMM_SEED_ME;
return VERR_GMM_SEED_ME;
switch (enmAccount)
return VINF_SUCCESS;
GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
return rc;
unsigned iPage = 0;
AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not private! %.*Rhxs (type %d)\n", iPage, paPages[iPage].idPage, sizeof(*pPage), pPage, pPage->Common.u2State));
Log(("GMMR0AllocateHandyPages: free shared page %x cRefs=%d\n", paPages[iPage].idSharedPage, pPage->Shared.cRefs));
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
return rc;
GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
return rc;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
return rc;
GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
return rc;
return VERR_NOT_SUPPORTED;
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
return rc;
return rc;
return rc;
return VERR_NOT_SUPPORTED;
Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
return rc;
&& pGVM)
pPage, pPage - &pChunk->aPages[0], idPage, pPage->Common.u2State, pChunk->iFreeHead)); NOREF(idPage);
pPage->u = 0;
#ifdef VBOX_WITH_PAGE_SHARING
DECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
switch (enmAccount)
case GMMACCOUNT_BASE:
case GMMACCOUNT_SHADOW:
case GMMACCOUNT_FIXED:
switch (enmAccount)
return rc;
GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
return rc;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
return rc;
GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
return rc;
switch (enmAction)
case GMMBALLOONACTION_INFLATE:
/* Codepath never taken. Might be interesting in the future to request ballooned memory from guests in low memory conditions.. */
AssertFailed();
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
case GMMBALLOONACTION_DEFLATE:
cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqDeflatePages));
case GMMBALLOONACTION_RESET:
return rc;
return VINF_SUCCESS;
return rc;
return rc;
return rc;
return VINF_SUCCESS;
Log(("gmmR0MapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
return VERR_GMM_CHUNK_NOT_MAPPED;
return VERR_GMM_CHUNK_NOT_FOUND;
return VINF_SUCCESS;
#ifdef VBOX_WITH_PAGE_SHARING
return VINF_SUCCESS;
return VERR_GMM_CHUNK_ALREADY_MAPPED;
int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
return VERR_NO_MEMORY;
return rc;
GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, VMCPUID idCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
return rc;
return VERR_INVALID_PARAMETER;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return rc;
return VERR_NOT_SUPPORTED;
rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
return rc;
GMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule,
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
Log(("GMMR0RegisterSharedModule %s %s base %RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
bool fNewModule = false;
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
if (!pRecVM)
if (!pRecVM)
AssertFailed();
goto end;
fNewModule = true;
PGMMSHAREDMODULE pGlobalModule = (PGMMSHAREDMODULE)RTAvlGCPtrGet(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
if (!pGlobalModule)
if (!pGlobalModule)
AssertFailed();
goto end;
for (unsigned i = 0; i < cRegions; i++)
if ( fNewModule
|| pRecVM->fCollision == true) /* colliding module unregistered and new one registerd since the last check */
Log(("GMMR0RegisterSharedModule: using existing module %s cUser=%d!\n", pszModuleName, pGlobalModule->cUsers));
goto end;
end:
return rc;
return VERR_NOT_IMPLEMENTED;
GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
AssertMsgReturn(pReq->Hdr.cbReq >= sizeof(*pReq) && pReq->Hdr.cbReq == RT_UOFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0RegisterSharedModule(pVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
GMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
Log(("GMMR0UnregisterSharedModule %s %s base=%RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
if (!pRecVM)
goto end;
/* Free the ranges, but leave the pages intact as there might still be references; they will be cleared by the COW mechanism. */
end:
return rc;
return VERR_NOT_IMPLEMENTED;
GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0UnregisterSharedModule(pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
#ifdef VBOX_WITH_PAGE_SHARING
* - if a shared page is new, then it changes the GMM page type to shared and returns it in the paPageDesc array
* - if a shared page already exists, then it checks if the VM page is identical and if so frees the VM page and returns the shared page in the paPageDesc array
GMMR0DECL(int) GMMR0SharedModuleCheckRange(PGVM pGVM, PGMMSHAREDMODULE pModule, unsigned idxRegion, unsigned cPages, PGMMSHAREDPAGEDESC paPageDesc)
AssertReturn(cPages == (pModule->aRegions[idxRegion].cbRegion >> PAGE_SHIFT), VERR_INVALID_PARAMETER);
Log(("GMMR0SharedModuleCheckRange %s base %RGv region %d cPages %d\n", pModule->szName, pModule->Core.Key, idxRegion, cPages));
pGlobalRegion->paHCPhysPageID = (uint32_t *)RTMemAlloc(cPages * sizeof(*pGlobalRegion->paHCPhysPageID));
AssertFailed();
goto end;
for (unsigned i = 0; i < cPages; i++)
for (unsigned i = 0; i < cPages; i++)
if (!pPage)
AssertFailed();
goto end;
AssertMsg(paPageDesc[i].GCPhys == (pPage->Private.pfn << 12), ("desc %RGp gmm %RGp\n", paPageDesc[i].HCPhys, (pPage->Private.pfn << 12)));
Log(("Replace existing page guest %RGp host %RHp id %x -> id %x\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys, paPageDesc[i].uHCPhysPageId, pGlobalRegion->paHCPhysPageID[i]));
if (!pPage)
AssertFailed();
goto end;
Log(("Replace existing page guest host %RHp -> %RHp\n", paPageDesc[i].HCPhys, ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT));
if (pChunk)
AssertFailed();
goto end;
AssertFailed();
goto end;
/* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */
goto end;
end:
return rc;
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
return rc;
return VERR_NOT_IMPLEMENTED;
#ifdef VBOX_WITH_PAGE_SHARING
Log(("gmmR0CheckSharedModule: check %s %s base=%RGv size=%x collision=%d\n", pGlobalModule->szName, pGlobalModule->szVersion, pGlobalModule->Core.Key, pGlobalModule->cbModule, pLocalModule->fCollision));
#ifdef DEBUG_sandervl
return rc;
return VINF_SUCCESS;
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
# ifndef DEBUG_sandervl
RTAvlGCPtrDoWithAll(&pGVM->gmm.s.pSharedModuleTree, true /* fFromLeft */, gmmR0CheckSharedModule, &Info);
# ifndef DEBUG_sandervl
return rc;
return VERR_NOT_IMPLEMENTED;