GMMR0.cpp revision abbd656ce8faf6bef24026a95237c5c9841069c8
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * GMM - Global Memory Manager.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Copyright (C) 2007 Oracle Corporation
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * available from http://www.virtualbox.org. This file is free software;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * you can redistribute it and/or modify it under the terms of the GNU
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * General Public License (GPL) as published by the Free Software
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @page pg_gmm GMM - The Global Memory Manager
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * As the name indicates, this component is responsible for global memory
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * management. Currently only guest RAM is allocated from the GMM, but this
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * may change to include shadow page tables and other bits later.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Guest RAM is managed as individual pages, but allocated from the host OS
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * in chunks for reasons of portability / efficiency. To minimize the memory
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * footprint all tracking structure must be as small as possible without
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * unnecessary performance penalties.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The allocation chunks has fixed sized, the size defined at compile time
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * by the #GMM_CHUNK_SIZE \#define.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Each chunk is given an unquie ID. Each page also has a unique ID. The
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * relation ship between the two IDs is:
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * GMM_CHUNK_SHIFT = log2(GMM_CHUNK_SIZE / PAGE_SIZE);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * idPage = (idChunk << GMM_CHUNK_SHIFT) | iPage;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Where iPage is the index of the page within the chunk. This ID scheme
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * permits for efficient chunk and page lookup, but it relies on the chunk size
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * to be set at compile time. The chunks are organized in an AVL tree with their
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * IDs being the keys.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The physical address of each page in an allocation chunk is maintained by
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * the #RTR0MEMOBJ and obtained using #RTR0MemObjGetPagePhysAddr. There is no
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * need to duplicate this information (it'll cost 8-bytes per page if we did).
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * So what do we need to track per page? Most importantly we need to know
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * which state the page is in:
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Private - Allocated for (eventually) backing one particular VM page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Shared - Readonly page that is used by one or more VMs and treated
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * as COW by PGM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Free - Not used by anyone.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * For the page replacement operations (sharing, defragmenting and freeing)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * to be somewhat efficient, private pages needs to be associated with a
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * particular page in a particular VM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Tracking the usage of shared pages is impractical and expensive, so we'll
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * settle for a reference counting system instead.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Free pages will be chained on LIFOs
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * systems a 32-bit bitfield will have to suffice because of address space
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * limitations. The #GMMPAGE structure shows the details.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @section sec_gmm_alloc_strat Page Allocation Strategy
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The strategy for allocating pages has to take fragmentation and shared
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * pages into account, or we may end up with with 2000 chunks with only
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * a few pages in each. Shared pages cannot easily be reallocated because
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * of the inaccurate usage accounting (see above). Private pages can be
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * reallocated by a defragmentation thread in the same manner that sharing
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The first approach is to manage the free pages in two sets depending on
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * whether they are mainly for the allocation of shared or private pages.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * In the initial implementation there will be almost no possibility for
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * mixing shared and private pages in the same chunk (only if we're really
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * stressed on memory), but when we implement forking of VMs and have to
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * deal with lots of COW pages it'll start getting kind of interesting.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The sets are lists of chunks with approximately the same number of
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * free pages. Say the chunk size is 1MB, meaning 256 pages, and a set
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * consists of 16 lists. So, the first list will contain the chunks with
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * 1-7 free pages, the second covers 8-15, and so on. The chunks will be
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * moved between the lists as pages are freed up or allocated.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @section sec_gmm_costs Costs
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The per page cost in kernel space is 32-bit plus whatever RTR0MEMOBJ
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * entails. In addition there is the chunk cost of approximately
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * (sizeof(RT0MEMOBJ) + sizof(CHUNK)) / 2^CHUNK_SHIFT bytes per page.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * On Windows the per page #RTR0MEMOBJ cost is 32-bit on 32-bit windows
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * and 64-bit on 64-bit windows (a PFN_NUMBER in the MDL). So, 64-bit per page.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * The cost on Linux is identical, but here it's because of sizeof(struct page *).
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * @section sec_gmm_legacy Legacy Mode for Non-Tier-1 Platforms
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * In legacy mode the page source is locked user pages and not
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * #RTR0MemObjAllocPhysNC, this means that a page can only be allocated
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * by the VM that locked it. We will make no attempt at implementing
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * page sharing on these systems, just do enough to make it all work.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * @subsection sub_gmm_locking Serializing
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * One simple fast mutex will be employed in the initial implementation, not
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * two as metioned in @ref subsec_pgmPhys_Serializing.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @see @ref subsec_pgmPhys_Serializing
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @section sec_gmm_overcommit Memory Over-Commitment Management
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The GVM will have to do the system wide memory over-commitment
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * management. My current ideas are:
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Per VM oc policy that indicates how much to initially commit
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * to it and what to do in a out-of-memory situation.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Prevent overtaxing the host.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * There are some challenges here, the main ones are configurability and
e2ba4c7fd718dba1d5b73cd9e40486df3ce06e77vboxsync * security. Should we for instance permit anyone to request 100% memory
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync * commitment? Who should be allowed to do runtime adjustments of the
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * config. And how to prevent these settings from being lost when the last
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * VM process exits? The solution is probably to have an optional root
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * daemon the will keep VMMR0.r0 in memory and enable the security measures.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @section sec_gmm_numa NUMA
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * NUMA considerations will be designed and implemented a bit later.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * The preliminary guesses is that we will have to try allocate memory as
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * close as possible to the CPUs the VM is executed on (EMT and additional CPU
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * threads). Which means it's mostly about allocation and sharing policies.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Both the scheduler and allocator interface will to supply some NUMA info
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * and we'll need to have a way to calc access costs.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/*******************************************************************************
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync* Header Files *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync*******************************************************************************/
54ed927d658674ced4387afbd1877a27cb975a76vboxsync/*******************************************************************************
54ed927d658674ced4387afbd1877a27cb975a76vboxsync* Structures and Typedefs *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync*******************************************************************************/
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync/** Pointer to set of free chunks. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** Pointer to a GMM allocation chunk. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The per-page tracking structure employed by the GMM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * On 32-bit hosts we'll some trickery is necessary to compress all
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * the information into 32-bits. When the fSharedFree member is set,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * the 30th bit decides whether it's a free page or not.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Because of the different layout on 32-bit and 64-bit hosts, macros
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * are used to get and set some of the data.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsynctypedef union GMMPAGE
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Unsigned integer view. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The common view. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The page state. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The view of a private page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The guest page frame number. (Max addressable: 2 ^ 44 - 16) */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The GVM handle. (64K VMs) */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** Reserved. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The page state. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The view of a shared page. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The host page frame number. (Max addressable: 2 ^ 44 - 16) */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The reference count (64K VMs). */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Reserved. Checksum or something? Two hGVMs for forking? */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The page state. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The view of a free page. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Reserved. Checksum or something? */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Reserved. Checksum or something? */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The page state. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync#else /* 32-bit */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Unsigned integer view. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The common view. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The page state. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The view of a private page. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The guest page frame number. (Max addressable: 2 ^ 36) */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The GVM handle. (127 VMs) */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The top page state bit, MBZ. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The view of a shared page. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The reference count. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The page state. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The view of a free page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Reserved. Checksum or something? */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The page state. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** Pointer to a GMMPAGE. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @name The Page States.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** A private page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** A private page - alternative value used on the 32-bit implemenation.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * This will never be used on 64-bit hosts. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** A shared page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** A free page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @def GMM_PAGE_IS_PRIVATE
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns true if private, false if not.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pPage The GMM page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_PRIVATE )
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Private.fZero == 0 )
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @def GMM_PAGE_IS_SHARED
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns true if shared, false if not.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pPage The GMM page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define GMM_PAGE_IS_SHARED(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_SHARED )
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @def GMM_PAGE_IS_FREE
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns true if free, false if not.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pPage The GMM page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define GMM_PAGE_IS_FREE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_FREE )
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @def GMM_PAGE_PFN_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The last valid guest pfn range.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @remark Some of the values outside the range has special meaning,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * see GMM_PAGE_PFN_UNSHAREABLE.
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsyncAssertCompile(GMM_PAGE_PFN_LAST == (GMM_GCPHYS_LAST >> PAGE_SHIFT));
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync/** @def GMM_PAGE_PFN_UNSHAREABLE
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * Indicates that this page isn't used for normal guest memory and thus isn't shareable.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0xfffffff1)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0x00fffff1)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsyncAssertCompile(GMM_PAGE_PFN_UNSHAREABLE == (GMM_GCPHYS_UNSHAREABLE >> PAGE_SHIFT));
72e9ef1022a910facbd4a232500026befd944d95vboxsync * A GMM allocation chunk ring-3 mapping record.
72e9ef1022a910facbd4a232500026befd944d95vboxsync * This should really be associated with a session and not a VM, but
72e9ef1022a910facbd4a232500026befd944d95vboxsync * it's simpler to associated with a VM and cleanup with the VM object
72e9ef1022a910facbd4a232500026befd944d95vboxsync * is destroyed.
72e9ef1022a910facbd4a232500026befd944d95vboxsynctypedef struct GMMCHUNKMAP
72e9ef1022a910facbd4a232500026befd944d95vboxsync /** The mapping object. */
54ed927d658674ced4387afbd1877a27cb975a76vboxsync /** The VM owning the mapping. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** Pointer to a GMM allocation chunk mapping. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync GMMCHUNKTYPE_CONTINUOUS = 2, /* one 2 MB continuous physical range. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * A GMM allocation chunk.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsynctypedef struct GMMCHUNK
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The AVL node core.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * The Key is the chunk ID. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync /** The memory object.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * Either from RTR0MemObjAllocPhysNC or RTR0MemObjLockUser depending on
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * what the host can dish up with. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync /** Pointer to the next chunk in the free list. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync /** Pointer to the previous chunk in the free list. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Pointer to the free set this chunk belongs to. NULL for
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * chunks with no free pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Pointer to an array of mappings. */
81a042b394e569219fd7bf5c66281e6afc2212a3vboxsync /** The number of mappings. */
81a042b394e569219fd7bf5c66281e6afc2212a3vboxsync /** The head of the list of free pages. UINT16_MAX is the NIL value. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The number of free pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The GVM handle of the VM that first allocated pages from this chunk, this
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * is used as a preference when there are several chunks to choose from.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * When in bound memory mode this isn't a preference any longer. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The number of private pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The number of shared pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Chunk type */
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync /** The pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * An allocation chunk TLB entry.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsynctypedef struct GMMCHUNKTLBE
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync /** The chunk id. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Pointer to the chunk. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** Pointer to an allocation chunk TLB entry. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** The number of entries tin the allocation chunk TLB. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** Gets the TLB entry index for the given Chunk ID. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync#define GMM_CHUNKTLB_IDX(idChunk) ( (idChunk) & (GMM_CHUNKTLB_ENTRIES - 1) )
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * An allocation chunk TLB.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsynctypedef struct GMMCHUNKTLB
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync /** The TLB entries. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** Pointer to an allocation chunk TLB. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** The GMMCHUNK::cFree shift count. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** The GMMCHUNK::cFree mask for use when considering relinking a chunk. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** The number of lists in set. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync#define GMM_CHUNK_FREE_SET_LISTS (GMM_CHUNK_NUM_PAGES >> GMM_CHUNK_FREE_SET_SHIFT)
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * A set of free chunks.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync /** The number of free pages in the set. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync /** Chunks ordered by increasing number of free pages. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * The GMM instance data.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsynctypedef struct GMM
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync /** Magic / eye catcher. GMM_MAGIC */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The fast mutex protecting the GMM.
270236340676d2385b27ea992e07fcb643bb78b6vboxsync * More fine grained locking can be implemented later if necessary. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** The chunk tree. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** The chunk TLB. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** The private free set. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** The shared free set. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** Shared module tree (global). */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync /** todo seperate trees for distinctly different guest OSes. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The maximum number of pages we're allowed to allocate.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @gcfgm 64-bit GMM/MaxPages Direct.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * @gcfgm 32-bit GMM/PctPages Relative to the number of host pages. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The number of pages that has been reserved.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * The deal is that cReservedPages - cOverCommittedPages <= cMaxPages. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The number of pages that we have over-committed in reservations. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /** The number of actually allocated (committed if you like) pages. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /** The number of pages that are shared. A subset of cAllocatedPages. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /** The number of pages that are actually shared between VMs. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /** The number of pages that are shared that has been left behind by
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * VMs not doing proper cleanups. */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /** The number of allocation chunks.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * (The number of pages we've allocated from the host can be derived from this.) */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The number of current ballooned pages. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The legacy allocation mode indicator.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * This is determined at initialization time. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The bound memory mode indicator.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * When set, the memory will be bound to a specific VM and never
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * shared. This is always set if fLegacyAllocationMode is set.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * (Also determined at initialization time.) */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The number of registered VMs. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The previous allocated Chunk ID.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * Used as a hint to avoid scanning the whole bitmap. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** Chunk ID allocation bitmap.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * Bits of allocated IDs are set, free ones are clear.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * The NIL id (0) is marked allocated. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync uint32_t bmChunkId[(GMM_CHUNKID_LAST + 1 + 31) / 32];
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync/** Pointer to the GMM instance. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** The value of GMM::u32Magic (Katsuhiro Otomo). */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/*******************************************************************************
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync* Global Variables *
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync*******************************************************************************/
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/** Pointer to the GMM instance data. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/** Macro for obtaining and validating the g_pGMM pointer.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * On failure it will return from the invoking function with the specified return value.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM The name of the pGMM variable.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * VBox status codes.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync AssertMsgReturn((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic), (rc)); \
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync } while (0)
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/** Macro for obtaining and validating the g_pGMM pointer, void function variant.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * On failure it will return from the invoking function.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM The name of the pGMM variable.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync AssertMsgReturnVoid((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic)); \
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync } while (0)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync/** @def GMM_CHECK_SANITY_UPON_ENTERING
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * Checks the sanity of the GMM instance data before making changes.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * This is macro is a stub by default and must be enabled manually in the code.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * @returns true if sane, false if not.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * @param pGMM The name of the pGMM variable.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync#if defined(VBOX_STRICT) && 0
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (true)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync/** @def GMM_CHECK_SANITY_UPON_LEAVING
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * Checks the sanity of the GMM instance data after making changes.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * This is macro is a stub by default and must be enabled manually in the code.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns true if sane, false if not.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM The name of the pGMM variable.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#if defined(VBOX_STRICT) && 0
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync/** @def GMM_CHECK_SANITY_IN_LOOPS
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * Checks the sanity of the GMM instance in the allocation loops.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * This is macro is a stub by default and must be enabled manually in the code.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns true if sane, false if not.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM The name of the pGMM variable.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#if defined(VBOX_STRICT) && 0
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync/*******************************************************************************
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync* Internal Functions *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync*******************************************************************************/
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsyncstatic DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM);
d17bffd306255c509aa98e8118e9a5456ee2138evboxsyncstatic DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGMM);
d17bffd306255c509aa98e8118e9a5456ee2138evboxsyncstatic DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM);
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM);
8541678784dbe432feebe6d9c1528525e1771397vboxsyncDECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncDECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncstatic uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncstatic void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncstatic void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncstatic int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * Initializes the GMM component.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * This is called when the VMMR0.r0 module is loaded and protected by the
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * loader semaphore.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns VBox status code.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Allocate the instance data and the lock(s).
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * Check and see if RTR0MemObjAllocPhysNC works.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync#if 0 /* later, see #3170. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync# if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * Query system page count and guess a reasonable cMaxPages value.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync * Terminates the GMM component.
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync * Take care / be paranoid...
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync SUPR0Printf("GMMR0Term: u32Magic=%#x\n", pGMM->u32Magic);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * Undo what init did and free all the resources we've acquired.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /* Destroy the fundamentals. */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync /* free any chunks still hanging around. */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync RTAvlU32Destroy(&pGMM->pChunks, gmmR0TermDestroyChunk, pGMM);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync /* finally the instance data itself. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync * RTAvlU32Destroy callback.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns 0
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pNode The node to destroy.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pvGMM The GMM handle.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsyncstatic DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM)
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync SUPR0Printf("GMMR0Term: %p/%#x: cFree=%d cPrivate=%d cShared=%d cMappings=%d\n", pChunk,
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappings);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync SUPR0Printf("GMMR0Term: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * Initializes the per-VM data for the GMM.
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * This is called from within the GVMM lock (from GVMMR0CreateVM)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * and should only initialize the data members so GMMR0CleanupVM
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * can deal with them. We reserve no memory or anything here,
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * that's done later in GMMR0InitVM.
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * @param pGVM Pointer to the Global VM structure.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync AssertCompile(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding));
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * Cleans up when a VM is terminating.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @param pGVM Pointer to the Global VM structure.
270236340676d2385b27ea992e07fcb643bb78b6vboxsync LogFlow(("GMMR0CleanupVM: pGVM=%p:{.pVM=%p, .hSelf=%#x}\n", pGVM, pGVM->pVM, pGVM->hSelf));
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /* Clean up all registered shared modules. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * The policy is 'INVALID' until the initial reservation
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * request has been serviced.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * If it's the last VM around, we can skip walking all the chunk looking
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * for the pages owned by this VM and instead flush the whole shebang.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * This takes care of the eventuality that a VM has left shared page
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * references behind (shouldn't happen of course, but you never know).
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync#if 0 /* disabled so it won't hide bugs. */
72e9ef1022a910facbd4a232500026befd944d95vboxsync RTAvlU32Destroy(&pGMM->pChunks, gmmR0CleanupVMDestroyChunk, pGMM);
72e9ef1022a910facbd4a232500026befd944d95vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
72e9ef1022a910facbd4a232500026befd944d95vboxsync pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
72e9ef1022a910facbd4a232500026befd944d95vboxsync memset(&pGMM->bmChunkId[0], 0, sizeof(pGMM->bmChunkId));
08fc4eb537ed24136b05660d0aa038b336516961vboxsync * Walk the entire pool looking for pages that belong to this VM
08fc4eb537ed24136b05660d0aa038b336516961vboxsync * and left over mappings. (This'll only catch private pages, shared
8541678784dbe432feebe6d9c1528525e1771397vboxsync * pages will be 'left behind'.)
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync /* todo this might be kind of expensive with a lot of VMs and memory hanging around... */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0CleanupVMScanChunk, pGVM);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* free empty chunks. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pCur = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /* account for shared pages that weren't freed. */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /* Clean up balloon statistics in case the VM process crashed. */
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Update the over-commitment management statistics.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** @todo Update GMM->cOverCommittedPages */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* zap the GVM data. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * RTAvlU32DoWithAll callback.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns 0
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pNode The node to search.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pvGVM Pointer to the shared VM structure.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGVM)
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Look for pages belonging to the VM.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * (Perform some internal checks while we're scanning.)
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
54ed927d658674ced4387afbd1877a27cb975a76vboxsync unsigned cPrivate = 0;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync unsigned cShared = 0;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync unsigned cFree = 0;
7528d4a15800321b4013826ce35ad184898dba21vboxsync gmmR0UnlinkChunk(pChunk); /* avoiding cFreePages updates. */
54ed927d658674ced4387afbd1877a27cb975a76vboxsync while (iPage-- > 0)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * Free the page.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * The reason for not using gmmR0FreePrivatePage here is that we
7528d4a15800321b4013826ce35ad184898dba21vboxsync * must *not* cause the chunk to be freed from under us - we're in
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * an AVL tree walk here.
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->aPages[iPage].Free.iNext = pChunk->iFreeHead;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * Did it add up?
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: Chunk %p/%#x has bogus stats - free=%d/%d private=%d/%d shared=%d/%d\n",
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->cFree, cFree, pChunk->cPrivate, cPrivate, pChunk->cShared, cShared);
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Look for the mapping belonging to the terminating VM.
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n",
7528d4a15800321b4013826ce35ad184898dba21vboxsync * If not in bound memory mode, we should reset the hGVM field
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * if it has our handle in it.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n",
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
54ed927d658674ced4387afbd1877a27cb975a76vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * RTAvlU32Destroy callback for GMMR0CleanupVM.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @returns 0
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param pNode The node (allocation chunk) to destroy.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param pvGVM Pointer to the shared VM structure.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: pGVM=%p exepcted %p\n", pChunk,
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->Core.Key, i, pChunk->paMappings[i].pGVM, pGVM);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->Core.Key, i, pChunk->paMappings[i].MapObj, rc);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * The initial resource reservations.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * This will make memory reservations according to policy and priority. If there aren't
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * sufficient resources available to sustain the VM this function will fail and all
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * future allocations requests will fail as well.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * These are just the initial reservations made very very early during the VM creation
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * process and will be adjusted later in the GMMR0UpdateReservation call after the
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * ring-3 init has completed.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @returns VBox status code.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @retval VERR_GMM_
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param pVM Pointer to the shared VM structure.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param idCpu VCPU id
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * This does not include MMIO2 and similar.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param cFixedPages The number of pages that may be allocated for fixed objects like the
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * hyper heap, MMIO2 and similar.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param enmPolicy The OC policy to use on this VM.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param enmPriority The priority in an out-of-memory situation.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @thread The creator thread / EMT.
54ed927d658674ced4387afbd1877a27cb975a76vboxsyncGMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pVM, cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority));
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Validate, get basics and take the semaphore.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Check if we can accomodate this.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync /* ... later ... */
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Update the records.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
7528d4a15800321b4013826ce35ad184898dba21vboxsync LogFlow(("GMMR0InitialReservation: returns %Rrc\n", rc));
7528d4a15800321b4013826ce35ad184898dba21vboxsync * VMMR0 request wrapper for GMMR0InitialReservation.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns see GMMR0InitialReservation.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pVM Pointer to the shared VM structure.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param idCpu VCPU id
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pReq The request packet.
7528d4a15800321b4013826ce35ad184898dba21vboxsyncGMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq)
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Validate input and pass it on.
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync return GMMR0InitialReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
7528d4a15800321b4013826ce35ad184898dba21vboxsync * This updates the memory reservation with the additional MMIO2 and ROM pages.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns VBox status code.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pVM Pointer to the shared VM structure.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param idCpu VCPU id
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * This does not include MMIO2 and similar.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param cFixedPages The number of pages that may be allocated for fixed objects like the
7528d4a15800321b4013826ce35ad184898dba21vboxsync * hyper heap, MMIO2 and similar.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @thread EMT.
7528d4a15800321b4013826ce35ad184898dba21vboxsyncGMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
7528d4a15800321b4013826ce35ad184898dba21vboxsync LogFlow(("GMMR0UpdateReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n",
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Validate, get basics and take the semaphore.
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Check if we can accomodate this.
7528d4a15800321b4013826ce35ad184898dba21vboxsync /* ... later ... */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * Update the records.
7528d4a15800321b4013826ce35ad184898dba21vboxsync pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
7528d4a15800321b4013826ce35ad184898dba21vboxsync pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
7528d4a15800321b4013826ce35ad184898dba21vboxsync LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc));
7528d4a15800321b4013826ce35ad184898dba21vboxsync * VMMR0 request wrapper for GMMR0UpdateReservation.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns see GMMR0UpdateReservation.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pVM Pointer to the shared VM structure.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param idCpu VCPU id
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pReq The request packet.
7528d4a15800321b4013826ce35ad184898dba21vboxsyncGMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq)
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Validate input and pass it on.
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync return GMMR0UpdateReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages);
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Performs sanity checks on a free set.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns Error count.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pGMM Pointer to the GMM instance.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pSet Pointer to the set.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pszSetName The set name.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * @param pszFunction The function from which it was called.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * @param uLine The line number.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsyncstatic uint32_t gmmR0SanityCheckSet(PGMM pGMM, PGMMCHUNKFREESET pSet, const char *pszSetName,
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Count the free pages in all the chunks and match it against pSet->cFreePages.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync /** @todo check that the chunk is hash into the right set. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync SUPR0Printf("GMM insanity: found %#x pages in the %s set, expected %#x. (%s, line %u)\n",
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync cPages, pszSetName, pSet->cFreePages, pszFunction, uLineNo);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Performs some sanity checks on the GMM while owning lock.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @returns Error count.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @param pGMM Pointer to the GMM instance.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @param pszFunction The function from which it is called.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @param uLineNo The line number.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsyncstatic uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Private, "private", pszFunction, uLineNo);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Shared, "shared", pszFunction, uLineNo);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** @todo add more sanity checks. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Looks up a chunk in the tree and fill in the TLB entry for it.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * This is not expected to fail and will bitch if it does.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns Pointer to the allocation chunk, NULL if not found.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGMM Pointer to the GMM instance.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @param idChunk The ID of the chunk to find.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pTlbe Pointer to the TLB entry.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic PGMMCHUNK gmmR0GetChunkSlow(PGMM pGMM, uint32_t idChunk, PGMMCHUNKTLBE pTlbe)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pChunk = (PGMMCHUNK)RTAvlU32Get(&pGMM->pChunks, idChunk);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync AssertMsgReturn(pChunk, ("Chunk %#x not found!\n", idChunk), NULL);
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * Finds a allocation chunk.
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * This is not expected to fail and will bitch if it does.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @returns Pointer to the allocation chunk, NULL if not found.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @param pGMM Pointer to the GMM instance.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @param idChunk The ID of the chunk to find.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsyncDECLINLINE(PGMMCHUNK) gmmR0GetChunk(PGMM pGMM, uint32_t idChunk)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Do a TLB lookup, branch if not in the TLB.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)];
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Finds a page.
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * This is not expected to fail and will bitch if it does.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns Pointer to the page, NULL if not found.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGMM Pointer to the GMM instance.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param idPage The ID of the page to find.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncDECLINLINE(PGMMPAGE) gmmR0GetPage(PGMM pGMM, uint32_t idPage)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return &pChunk->aPages[idPage & GMM_PAGEID_IDX_MASK];
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Unlinks the chunk from the free list it's currently on (if any).
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pChunk The allocation chunk.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pSet->apLists[(pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT] = pNext;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * Links the chunk onto the appropriate free list in the specified free set.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * If no free entries, it's not linked into any list.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * @param pChunk The allocation chunk.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @param pSet The free set.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsyncDECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync unsigned iList = (pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Frees a Chunk ID.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGMM Pointer to the GMM instance.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @param idChunk The Chunk ID to free.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic void gmmR0FreeChunkId(PGMM pGMM, uint32_t idChunk)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsg(ASMBitTest(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk));
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Allocates a new Chunk ID.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns The Chunk ID.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM Pointer to the GMM instance.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync AssertCompile(!((GMM_CHUNKID_LAST + 1) & 31)); /* must be a multiple of 32 */
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Try the next sequential one.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#if 0 /* test the fallback first */
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync && !ASMAtomicBitTestAndSet(&pVMM->bmChunkId[0], idChunk))
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Scan sequentially from the last one.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync idChunk = ASMBitNextClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1, idChunk);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Ok, scan from the start.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * We're not racing anyone, so there is no need to expect failures or have restart loops.
7528d4a15800321b4013826ce35ad184898dba21vboxsync idChunk = ASMBitFirstClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertMsgReturn(idChunk > NIL_GMM_CHUNKID, ("%#x\n", idChunk), NIL_GVM_HANDLE);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * Registers a new chunk of memory.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk. The caller
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * must own the global lock.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @returns VBox status code.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param pGMM Pointer to the GMM instance.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param pSet Pointer to the set.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param MemObj The memory object for the chunk.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param hGVM The affinity of the chunk. NIL_GVM_HANDLE for no
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * affinity.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param enmChunkType Chunk type (continuous or non-continuous)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param ppChunk Chunk address (out)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync PGMMCHUNK pChunk = (PGMMCHUNK)RTMemAllocZ(sizeof(*pChunk));
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Initialize it.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Allocate a Chunk ID and insert it into the tree.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * This has to be done behind the mutex of course.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* bail out */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Allocate one new chunk and add it to the specified free set.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns VBox status code.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pGMM Pointer to the GMM instance.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pSet Pointer to the set.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param hGVM The affinity of the new chunk.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param enmChunkType Chunk type (continuous or non-continuous)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param ppChunk Chunk address (out)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @remarks Called without owning the mutex.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Allocate the memory.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertReturn(enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS || enmChunkType == GMMCHUNKTYPE_CONTINUOUS, VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* Leave the lock temporarily as the allocation might take long. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = RTR0MemObjAllocPhysNC(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = RTR0MemObjAllocPhysEx(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* Grab the lock again. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = gmmR0RegisterChunk(pGMM, pSet, MemObj, hGVM, enmChunkType, ppChunk);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** @todo Check that RTR0MemObjAllocPhysNC always returns VERR_NO_MEMORY on
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * allocation failure. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Attempts to allocate more pages until the requested amount is met.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns VBox status code.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGMM Pointer to the GMM instance data.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGVM The calling VM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pSet Pointer to the free set to grow.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param cPages The number of pages needed.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @remarks Called owning the mutex, but will leave it temporarily while
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * allocating the memory!
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic int gmmR0AllocateMoreChunks(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, uint32_t cPages)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * Try steal free chunks from the other set first. (Only take 100% free chunks.)
8541678784dbe432feebe6d9c1528525e1771397vboxsync PGMMCHUNKFREESET pOtherSet = pSet == &pGMM->Private ? &pGMM->Shared : &pGMM->Private;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync PGMMCHUNK pChunk = pOtherSet->apLists[GMM_CHUNK_FREE_SET_LISTS - 1];
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync while (pChunk && pChunk->cFree != GMM_CHUNK_NUM_PAGES)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * If we need still more pages, allocate new chunks.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Note! We will leave the mutex while doing the allocation,
8541678784dbe432feebe6d9c1528525e1771397vboxsync int rc = gmmR0AllocateOneChunk(pGMM, pSet, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync * The memory is bound to the VM allocating it, so we have to count
8541678784dbe432feebe6d9c1528525e1771397vboxsync * the free pages carefully as well as making sure we brand them with
8541678784dbe432feebe6d9c1528525e1771397vboxsync * our VM handle.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Note! We will leave the mutex while doing the allocation,
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /* Count and see if we've reached the goal. */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /* Allocate more. */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync int rc = gmmR0AllocateOneChunk(pGMM, pSet, hGVM, GMMCHUNKTYPE_NON_CONTINUOUS);
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Allocates one private page.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Worker for gmmR0AllocatePages.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param pGMM Pointer to the GMM instance data.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param hGVM The GVM handle of the VM requesting memory.
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * @param pChunk The chunk to allocate it from.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param pPageDesc The page descriptor.
8541678784dbe432feebe6d9c1528525e1771397vboxsyncstatic void gmmR0AllocatePage(PGMM pGMM, uint32_t hGVM, PGMMCHUNK pChunk, PGMMPAGEDESC pPageDesc)
8541678784dbe432feebe6d9c1528525e1771397vboxsync /* update the chunk stats. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync /* unlink the first free page. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync AssertReleaseMsg(iPage < RT_ELEMENTS(pChunk->aPages), ("%d\n", iPage));
8541678784dbe432feebe6d9c1528525e1771397vboxsync Log3(("A pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x iNext=%#x\n",
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage, iPage, (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage,
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage->Common.u2State, pChunk->iFreeHead, pPage->Free.iNext));
8541678784dbe432feebe6d9c1528525e1771397vboxsync /* make the page private. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync AssertCompile(GMM_GCPHYS_UNSHAREABLE >= GMM_GCPHYS_LAST);
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage->Private.pfn = pPageDesc->HCPhysGCPhys >> PAGE_SHIFT;
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync /* update the page descriptor. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->MemObj, iPage);
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage;
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Common worker for GMMR0AllocateHandyPages and GMMR0AllocatePages.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @returns VBox status code:
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @retval VINF_SUCCESS on success.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or
8541678784dbe432feebe6d9c1528525e1771397vboxsync * gmmR0AllocateMoreChunks is necessary.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
8541678784dbe432feebe6d9c1528525e1771397vboxsync * that is we're trying to allocate more than we've reserved.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param pGMM Pointer to the GMM instance data.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param pGVM Pointer to the shared VM structure.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param cPages The number of pages to allocate.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param paPages Pointer to the page descriptors.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * See GMMPAGEDESC for details on what is expected on input.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param enmAccount The account to charge.
8541678784dbe432feebe6d9c1528525e1771397vboxsyncstatic int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Check allocation limits.
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (RT_UNLIKELY(pGMM->cAllocatedPages + cPages > pGMM->cMaxPages))
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
8541678784dbe432feebe6d9c1528525e1771397vboxsync Log(("gmmR0AllocatePages:Base: Reserved=%#llx Allocated+Ballooned+Requested=%#llx+%#llx+%#x!\n",
8541678784dbe432feebe6d9c1528525e1771397vboxsync pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages));
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync Log(("gmmR0AllocatePages:Shadow: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
8541678784dbe432feebe6d9c1528525e1771397vboxsync pGVM->gmm.s.Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages));
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync Log(("gmmR0AllocatePages:Fixed: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pGVM->gmm.s.Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages));
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * Check if we need to allocate more memory or not. In bound memory mode this
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * is a bit extra work but it's easier to do it upfront than bailing out later.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Pick the pages.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Try make some effort keeping VMs sharing private chunks.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* first round, pick from chunks with an affinity to the VM. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists) && iPage < cPages; i++)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /* second round, pick pages from the 100% empty chunks we just skipped above. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync PGMMCHUNK pCur = pSet->apLists[RT_ELEMENTS(pSet->apLists) - 1];
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* third round, disregard affinity. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Update the account.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages += iPage; break;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages += iPage; break;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages += iPage; break;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn(iPage == cPages, ("%u != %u\n", iPage, cPages), VERR_INTERNAL_ERROR);
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Check if we've reached some threshold and should kick one or two VMs and tell
7528d4a15800321b4013826ce35ad184898dba21vboxsync * them to inflate their balloons a bit more... later.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * Updates the previous allocations and allocates more pages.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * The handy pages are always taken from the 'base' memory account.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * The allocated pages are not cleared and will contains random garbage.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @returns VBox status code:
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @retval VINF_SUCCESS on success.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @retval VERR_NOT_OWNER if the caller is not an EMT.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @retval VERR_GMM_PAGE_NOT_FOUND if one of the pages to update wasn't found.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @retval VERR_GMM_PAGE_NOT_PRIVATE if one of the pages to update wasn't a
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * private page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @retval VERR_GMM_PAGE_NOT_SHARED if one of the pages to update wasn't a
7528d4a15800321b4013826ce35ad184898dba21vboxsync * shared page.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @retval VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't
7528d4a15800321b4013826ce35ad184898dba21vboxsync * owned by the VM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * that is we're trying to allocate more than we've reserved.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pVM Pointer to the shared VM structure.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param idCpu VCPU id
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param cPagesToUpdate The number of pages to update (starting from the head).
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param cPagesToAlloc The number of pages to allocate (starting from the head).
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param paPages The array of page descriptors.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * See GMMPAGEDESC for details on what is expected on input.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @thread EMT.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncGMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync LogFlow(("GMMR0AllocateHandyPages: pVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n",
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Validate, get basics and take the semaphore.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * (This is a relatively busy path, so make predictions where possible.)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn( (cPagesToUpdate && cPagesToUpdate < 1024)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ("cPagesToUpdate=%#x cPagesToAlloc=%#x\n", cPagesToUpdate, cPagesToAlloc),
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync unsigned iPage = 0;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn( ( paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys),
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*|| paPages[iPage].idSharedPage == NIL_GMM_PAGEID*/,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* No allocations before the initial reservation has been made! */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Perform the updates.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Stop on the first error.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idPage);
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not private! %.*Rhxs (type %d)\n", iPage, paPages[iPage].idPage, sizeof(*pPage), pPage, pPage->Common.u2State));
Log(("GMMR0AllocateHandyPages: free shared page %x cRefs=%d\n", paPages[iPage].idSharedPage, pPage->Shared.cRefs));
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
return rc;
GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
return rc;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
return rc;
GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
return rc;
return VERR_NOT_SUPPORTED;
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
return rc;
return rc;
return rc;
return VERR_NOT_SUPPORTED;
Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
return rc;
&& pGVM)
pPage, pPage - &pChunk->aPages[0], idPage, pPage->Common.u2State, pChunk->iFreeHead)); NOREF(idPage);
pPage->u = 0;
#ifdef VBOX_WITH_PAGE_SHARING
DECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
switch (enmAccount)
case GMMACCOUNT_BASE:
case GMMACCOUNT_SHADOW:
case GMMACCOUNT_FIXED:
switch (enmAccount)
return rc;
GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
return rc;
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
return rc;
GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
return rc;
switch (enmAction)
case GMMBALLOONACTION_INFLATE:
if (RT_LIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cBalloonedPages <= pGVM->gmm.s.Reserved.cBasePages))
/* Codepath never taken. Might be interesting in the future to request ballooned memory from guests in low memory conditions.. */
AssertFailed();
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
case GMMBALLOONACTION_DEFLATE:
cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqDeflatePages));
case GMMBALLOONACTION_RESET:
return rc;
return VINF_SUCCESS;
return rc;
return rc;
return rc;
return VINF_SUCCESS;
Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
return VERR_GMM_CHUNK_NOT_MAPPED;
return VERR_GMM_CHUNK_NOT_FOUND;
return VINF_SUCCESS;
#ifdef VBOX_WITH_PAGE_SHARING
return VINF_SUCCESS;
return VERR_GMM_CHUNK_ALREADY_MAPPED;
int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
return VERR_NO_MEMORY;
return rc;
GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
return rc;
return VERR_INVALID_PARAMETER;
return rc;
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return rc;
return VERR_NOT_SUPPORTED;
rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
return rc;
char *pszModuleName;
char *pszVersion;
if ( pInfo
GMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule,
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
Log(("GMMR0RegisterSharedModule %s %s base %RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
bool fNewModule = false;
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
if (!pRecVM)
if (!pRecVM)
AssertFailed();
goto end;
/* Save the region data as they can differ between VMs (address space scrambling or simply different loading order) */
for (unsigned i = 0; i < cRegions; i++)
fNewModule = true;
PGMMSHAREDMODULE pGlobalModule = (PGMMSHAREDMODULE)RTAvlGCPtrGet(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
if ( !pGlobalModule
/* Two identical copies of e.g. Win7 x64 will typically not have a similar virtual address space layout for dlls or kernel modules.
int ret = RTAvlGCPtrDoWithAll(&pGMM->pGlobalSharedModuleTree, true /* fFromLeft */, gmmR0CheckForIdenticalModule, &Info);
if (!pGlobalModule)
if (!pGlobalModule)
AssertFailed();
goto end;
for (unsigned i = 0; i < cRegions; i++)
if ( fNewModule
|| pRecVM->fCollision == true) /* colliding module unregistered and new one registerd since the last check */
Log(("GMMR0RegisterSharedModule: using existing module %s cUser=%d!\n", pszModuleName, pGlobalModule->cUsers));
goto end;
end:
return rc;
return VERR_NOT_IMPLEMENTED;
GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
AssertMsgReturn(pReq->Hdr.cbReq >= sizeof(*pReq) && pReq->Hdr.cbReq == RT_UOFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
/* Pass back return code in the request packet to preserve informational codes. (VMMR3CallR0 chokes on them) */
pReq->rc = GMMR0RegisterSharedModule(pVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
return VINF_SUCCESS;
GMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
Log(("GMMR0UnregisterSharedModule %s %s base=%RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
if (!pRecVM)
goto end;
/* Free the ranges, but leave the pages intact as there might still be references; they will be cleared by the COW mechanism. */
end:
return rc;
return VERR_NOT_IMPLEMENTED;
GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0UnregisterSharedModule(pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
#ifdef VBOX_WITH_PAGE_SHARING
* - if a shared page is new, then it changes the GMM page type to shared and returns it in the paPageDesc array
* - if a shared page already exists, then it checks if the VM page is identical and if so frees the VM page and returns the shared page in the paPageDesc array
GMMR0DECL(int) GMMR0SharedModuleCheckRange(PGVM pGVM, PGMMSHAREDMODULE pModule, unsigned idxRegion, unsigned cPages, PGMMSHAREDPAGEDESC paPageDesc)
AssertReturn(cPages == (pModule->aRegions[idxRegion].cbRegion >> PAGE_SHIFT), VERR_INVALID_PARAMETER);
Log(("GMMR0SharedModuleCheckRange %s base %RGv region %d cPages %d\n", pModule->szName, pModule->Core.Key, idxRegion, cPages));
pGlobalRegion->paHCPhysPageID = (uint32_t *)RTMemAlloc(cPages * sizeof(*pGlobalRegion->paHCPhysPageID));
AssertFailed();
goto end;
for (unsigned i = 0; i < cPages; i++)
for (unsigned i = 0; i < cPages; i++)
if (!pPage)
AssertFailed();
goto end;
AssertMsg(paPageDesc[i].GCPhys == (pPage->Private.pfn << 12), ("desc %RGp gmm %RGp\n", paPageDesc[i].HCPhys, (pPage->Private.pfn << 12)));
Log(("Replace existing page guest %RGp host %RHp id %x -> id %x\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys, paPageDesc[i].uHCPhysPageId, pGlobalRegion->paHCPhysPageID[i]));
if (!pPage)
AssertFailed();
goto end;
Log(("Replace existing page guest host %RHp -> %RHp\n", paPageDesc[i].HCPhys, ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT));
if (pChunk)
AssertFailed();
goto end;
AssertFailed();
goto end;
/* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */
goto end;
end:
return rc;
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
return rc;
return VERR_NOT_IMPLEMENTED;
#ifdef VBOX_WITH_PAGE_SHARING
&& pGlobalModule)
Log(("gmmR0CheckSharedModule: check %s %s base=%RGv size=%x collision=%d\n", pGlobalModule->szName, pGlobalModule->szVersion, pGlobalModule->Core.Key, pGlobalModule->cbModule, pLocalModule->fCollision));
PGMR0SharedModuleCheck(pInfo->pGVM->pVM, pInfo->pGVM, pInfo->idCpu, pGlobalModule, pLocalModule->cRegions, pLocalModule->aRegions);
#ifdef DEBUG_sandervl
return rc;
return VINF_SUCCESS;
#ifdef VBOX_WITH_PAGE_SHARING
return rc;
# ifndef DEBUG_sandervl
RTAvlGCPtrDoWithAll(&pGVM->gmm.s.pSharedModuleTree, true /* fFromLeft */, gmmR0CheckSharedModule, &Info);
# ifndef DEBUG_sandervl
return rc;
return VERR_NOT_IMPLEMENTED;
bool fFoundDuplicate;
goto end;
while (iPage-- > 0)
end:
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
goto end;
if (!pChunk)
AssertFailed();
goto end;
AssertFailed();
goto end;
if (!pPage)
AssertFailed();
goto end;
end:
return rc;