GMMR0.cpp revision 4cab7fd7194d21a77b4953bc22c50f24187e05e7
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * GMM - Global Memory Manager.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Copyright (C) 2007 Oracle Corporation
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * available from http://www.virtualbox.org. This file is free software;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * you can redistribute it and/or modify it under the terms of the GNU
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * General Public License (GPL) as published by the Free Software
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @page pg_gmm GMM - The Global Memory Manager
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * As the name indicates, this component is responsible for global memory
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * management. Currently only guest RAM is allocated from the GMM, but this
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * may change to include shadow page tables and other bits later.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Guest RAM is managed as individual pages, but allocated from the host OS
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * in chunks for reasons of portability / efficiency. To minimize the memory
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * footprint all tracking structure must be as small as possible without
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * unnecessary performance penalties.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The allocation chunks has fixed sized, the size defined at compile time
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * by the #GMM_CHUNK_SIZE \#define.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Each chunk is given an unquie ID. Each page also has a unique ID. The
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * relation ship between the two IDs is:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * GMM_CHUNK_SHIFT = log2(GMM_CHUNK_SIZE / PAGE_SIZE);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * idPage = (idChunk << GMM_CHUNK_SHIFT) | iPage;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Where iPage is the index of the page within the chunk. This ID scheme
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * permits for efficient chunk and page lookup, but it relies on the chunk size
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * to be set at compile time. The chunks are organized in an AVL tree with their
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * IDs being the keys.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The physical address of each page in an allocation chunk is maintained by
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * the #RTR0MEMOBJ and obtained using #RTR0MemObjGetPagePhysAddr. There is no
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * need to duplicate this information (it'll cost 8-bytes per page if we did).
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * So what do we need to track per page? Most importantly we need to know
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * which state the page is in:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * - Private - Allocated for (eventually) backing one particular VM page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * - Shared - Readonly page that is used by one or more VMs and treated
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * as COW by PGM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * - Free - Not used by anyone.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * For the page replacement operations (sharing, defragmenting and freeing)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * to be somewhat efficient, private pages needs to be associated with a
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * particular page in a particular VM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Tracking the usage of shared pages is impractical and expensive, so we'll
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * settle for a reference counting system instead.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Free pages will be chained on LIFOs
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * systems a 32-bit bitfield will have to suffice because of address space
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * limitations. The #GMMPAGE structure shows the details.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @section sec_gmm_alloc_strat Page Allocation Strategy
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The strategy for allocating pages has to take fragmentation and shared
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * pages into account, or we may end up with with 2000 chunks with only
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * a few pages in each. Shared pages cannot easily be reallocated because
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * of the inaccurate usage accounting (see above). Private pages can be
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * reallocated by a defragmentation thread in the same manner that sharing
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync * The first approach is to manage the free pages in two sets depending on
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync * whether they are mainly for the allocation of shared or private pages.
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync * In the initial implementation there will be almost no possibility for
b84a3f2aac9529d5c5840512b12d81bc62d0e665vboxsync * mixing shared and private pages in the same chunk (only if we're really
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * stressed on memory), but when we implement forking of VMs and have to
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * deal with lots of COW pages it'll start getting kind of interesting.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The sets are lists of chunks with approximately the same number of
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * free pages. Say the chunk size is 1MB, meaning 256 pages, and a set
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * consists of 16 lists. So, the first list will contain the chunks with
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * 1-7 free pages, the second covers 8-15, and so on. The chunks will be
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * moved between the lists as pages are freed up or allocated.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @section sec_gmm_costs Costs
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The per page cost in kernel space is 32-bit plus whatever RTR0MEMOBJ
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * entails. In addition there is the chunk cost of approximately
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * (sizeof(RT0MEMOBJ) + sizof(CHUNK)) / 2^CHUNK_SHIFT bytes per page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * On Windows the per page #RTR0MEMOBJ cost is 32-bit on 32-bit windows
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * and 64-bit on 64-bit windows (a PFN_NUMBER in the MDL). So, 64-bit per page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The cost on Linux is identical, but here it's because of sizeof(struct page *).
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync * @section sec_gmm_legacy Legacy Mode for Non-Tier-1 Platforms
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * In legacy mode the page source is locked user pages and not
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * #RTR0MemObjAllocPhysNC, this means that a page can only be allocated
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * by the VM that locked it. We will make no attempt at implementing
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * page sharing on these systems, just do enough to make it all work.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @subsection sub_gmm_locking Serializing
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * One simple fast mutex will be employed in the initial implementation, not
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * two as metioned in @ref subsec_pgmPhys_Serializing.
b84a3f2aac9529d5c5840512b12d81bc62d0e665vboxsync * @see @ref subsec_pgmPhys_Serializing
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @section sec_gmm_overcommit Memory Over-Commitment Management
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The GVM will have to do the system wide memory over-commitment
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * management. My current ideas are:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * - Per VM oc policy that indicates how much to initially commit
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * to it and what to do in a out-of-memory situation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * - Prevent overtaxing the host.
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync * There are some challenges here, the main ones are configurability and
b84a3f2aac9529d5c5840512b12d81bc62d0e665vboxsync * security. Should we for instance permit anyone to request 100% memory
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * commitment? Who should be allowed to do runtime adjustments of the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * config. And how to prevent these settings from being lost when the last
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VM process exits? The solution is probably to have an optional root
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * daemon the will keep VMMR0.r0 in memory and enable the security measures.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @section sec_gmm_numa NUMA
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * NUMA considerations will be designed and implemented a bit later.
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync * The preliminary guesses is that we will have to try allocate memory as
b84a3f2aac9529d5c5840512b12d81bc62d0e665vboxsync * close as possible to the CPUs the VM is executed on (EMT and additional CPU
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * threads). Which means it's mostly about allocation and sharing policies.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Both the scheduler and allocator interface will to supply some NUMA info
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * and we'll need to have a way to calc access costs.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*******************************************************************************
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync* Header Files *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync*******************************************************************************/
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*******************************************************************************
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync* Structures and Typedefs *
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync*******************************************************************************/
b84a3f2aac9529d5c5840512b12d81bc62d0e665vboxsync/** Pointer to set of free chunks. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Pointer to a GMM allocation chunk. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The per-page tracking structure employed by the GMM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * On 32-bit hosts we'll some trickery is necessary to compress all
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * the information into 32-bits. When the fSharedFree member is set,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * the 30th bit decides whether it's a free page or not.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Because of the different layout on 32-bit and 64-bit hosts, macros
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * are used to get and set some of the data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsynctypedef union GMMPAGE
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync /** Unsigned integer view. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The common view. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The page state. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The view of a private page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The guest page frame number. (Max addressable: 2 ^ 44 - 16) */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The GVM handle. (64K VMs) */
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync /** Reserved. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The page state. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The view of a shared page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The host page frame number. (Max addressable: 2 ^ 44 - 16) */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The reference count (64K VMs). */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Reserved. Checksum or something? Two hGVMs for forking? */
b84a3f2aac9529d5c5840512b12d81bc62d0e665vboxsync /** The page state. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The view of a free page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Reserved. Checksum or something? */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Reserved. Checksum or something? */
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync /** The page state. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#else /* 32-bit */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Unsigned integer view. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The common view. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The page state. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The view of a private page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The guest page frame number. (Max addressable: 2 ^ 36) */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The GVM handle. (127 VMs) */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The top page state bit, MBZ. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The view of a shared page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The reference count. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The page state. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The view of a free page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Reserved. Checksum or something? */
b84a3f2aac9529d5c5840512b12d81bc62d0e665vboxsync /** The page state. */
00550544656e1a1537bad42c4f4bacef814637cavboxsync/** Pointer to a GMMPAGE. */
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync/** @name The Page States.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** A private page. */
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync/** A private page - alternative value used on the 32-bit implemenation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This will never be used on 64-bit hosts. */
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync/** A shared page. */
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync/** A free page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @def GMM_PAGE_IS_PRIVATE
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync * @returns true if private, false if not.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPage The GMM page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_PRIVATE )
968c867cc19737e4e1fd97c396fcf75a3d52dd27vboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Private.fZero == 0 )
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @def GMM_PAGE_IS_SHARED
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns true if shared, false if not.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPage The GMM page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#define GMM_PAGE_IS_SHARED(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_SHARED )
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @def GMM_PAGE_IS_FREE
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns true if free, false if not.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPage The GMM page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#define GMM_PAGE_IS_FREE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_FREE )
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @def GMM_PAGE_PFN_LAST
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The last valid guest pfn range.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @remark Some of the values outside the range has special meaning,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * see GMM_PAGE_PFN_UNSHAREABLE.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncAssertCompile(GMM_PAGE_PFN_LAST == (GMM_GCPHYS_LAST >> PAGE_SHIFT));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @def GMM_PAGE_PFN_UNSHAREABLE
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Indicates that this page isn't used for normal guest memory and thus isn't shareable.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0xfffffff1)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0x00fffff1)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncAssertCompile(GMM_PAGE_PFN_UNSHAREABLE == (GMM_GCPHYS_UNSHAREABLE >> PAGE_SHIFT));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * A GMM allocation chunk ring-3 mapping record.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This should really be associated with a session and not a VM, but
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * it's simpler to associated with a VM and cleanup with the VM object
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * is destroyed.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsynctypedef struct GMMCHUNKMAP
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The mapping object. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The VM owning the mapping. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Pointer to a GMM allocation chunk mapping. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync GMMCHUNKTYPE_CONTINUOUS = 2, /* one 2 MB continuous physical range. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * A GMM allocation chunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsynctypedef struct GMMCHUNK
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The AVL node core.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The Key is the chunk ID. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The memory object.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Either from RTR0MemObjAllocPhysNC or RTR0MemObjLockUser depending on
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * what the host can dish up with. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Pointer to the next chunk in the free list. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Pointer to the previous chunk in the free list. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Pointer to the free set this chunk belongs to. NULL for
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * chunks with no free pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Pointer to an array of mappings. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of mappings. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The head of the list of free pages. UINT16_MAX is the NIL value. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of free pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The GVM handle of the VM that first allocated pages from this chunk, this
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * is used as a preference when there are several chunks to choose from.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * When in bound memory mode this isn't a preference any longer. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of private pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of shared pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Chunk type */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * An allocation chunk TLB entry.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsynctypedef struct GMMCHUNKTLBE
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The chunk id. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Pointer to the chunk. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Pointer to an allocation chunk TLB entry. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** The number of entries tin the allocation chunk TLB. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Gets the TLB entry index for the given Chunk ID. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#define GMM_CHUNKTLB_IDX(idChunk) ( (idChunk) & (GMM_CHUNKTLB_ENTRIES - 1) )
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * An allocation chunk TLB.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsynctypedef struct GMMCHUNKTLB
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The TLB entries. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Pointer to an allocation chunk TLB. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** The GMMCHUNK::cFree shift count. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** The GMMCHUNK::cFree mask for use when considering relinking a chunk. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** The number of lists in set. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#define GMM_CHUNK_FREE_SET_LISTS (GMM_CHUNK_NUM_PAGES >> GMM_CHUNK_FREE_SET_SHIFT)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * A set of free chunks.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of free pages in the set. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Chunks ordered by increasing number of free pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The GMM instance data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsynctypedef struct GMM
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Magic / eye catcher. GMM_MAGIC */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The fast mutex protecting the GMM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * More fine grained locking can be implemented later if necessary. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The chunk tree. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The chunk TLB. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The private free set. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The shared free set. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Shared module tree (global). */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** todo seperate trees for distinctly different guest OSes. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The maximum number of pages we're allowed to allocate.
00550544656e1a1537bad42c4f4bacef814637cavboxsync * @gcfgm 64-bit GMM/MaxPages Direct.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @gcfgm 32-bit GMM/PctPages Relative to the number of host pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of pages that has been reserved.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The deal is that cReservedPages - cOverCommittedPages <= cMaxPages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of pages that we have over-committed in reservations. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of actually allocated (committed if you like) pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of pages that are shared. A subset of cAllocatedPages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of pages that are shared that has been left behind by
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMs not doing proper cleanups. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of allocation chunks.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * (The number of pages we've allocated from the host can be derived from this.) */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of current ballooned pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The legacy allocation mode indicator.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is determined at initialization time. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The bound memory mode indicator.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * When set, the memory will be bound to a specific VM and never
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * shared. This is always set if fLegacyAllocationMode is set.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * (Also determined at initialization time.) */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of registered VMs. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The previous allocated Chunk ID.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Used as a hint to avoid scanning the whole bitmap. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Chunk ID allocation bitmap.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Bits of allocated IDs are set, free ones are clear.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The NIL id (0) is marked allocated. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync uint32_t bmChunkId[(GMM_CHUNKID_LAST + 1 + 31) / 32];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Pointer to the GMM instance. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** The value of GMM::u32Magic (Katsuhiro Otomo). */
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync/*******************************************************************************
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync* Global Variables *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync*******************************************************************************/
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Pointer to the GMM instance data. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Macro for obtaining and validating the g_pGMM pointer.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * On failure it will return from the invoking function with the specified return value.
00550544656e1a1537bad42c4f4bacef814637cavboxsync * @param pGMM The name of the pGMM variable.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VBox status codes.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic), (rc)); \
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync } while (0)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Macro for obtaining and validating the g_pGMM pointer, void function variant.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * On failure it will return from the invoking function.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM The name of the pGMM variable.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturnVoid((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic)); \
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync } while (0)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @def GMM_CHECK_SANITY_UPON_ENTERING
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Checks the sanity of the GMM instance data before making changes.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is macro is a stub by default and must be enabled manually in the code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns true if sane, false if not.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM The name of the pGMM variable.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#if defined(VBOX_STRICT) && 0
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (true)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @def GMM_CHECK_SANITY_UPON_LEAVING
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Checks the sanity of the GMM instance data after making changes.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is macro is a stub by default and must be enabled manually in the code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns true if sane, false if not.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM The name of the pGMM variable.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#if defined(VBOX_STRICT) && 0
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @def GMM_CHECK_SANITY_IN_LOOPS
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync * Checks the sanity of the GMM instance in the allocation loops.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is macro is a stub by default and must be enabled manually in the code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns true if sane, false if not.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM The name of the pGMM variable.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#if defined(VBOX_STRICT) && 0
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*******************************************************************************
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync* Internal Functions *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync*******************************************************************************/
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGMM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Initializes the GMM component.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is called when the VMMR0.r0 module is loaded and protected by the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * loader semaphore.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Allocate the instance data and the lock(s).
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check and see if RTR0MemObjAllocPhysNC works.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#if 0 /* later, see #3170. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# if defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Query system page count and guess a reasonable cMaxPages value.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync * Terminates the GMM component.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take care / be paranoid...
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("GMMR0Term: u32Magic=%#x\n", pGMM->u32Magic);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Undo what init did and free all the resources we've acquired.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Destroy the fundamentals. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* free any chunks still hanging around. */
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync RTAvlU32Destroy(&pGMM->pChunks, gmmR0TermDestroyChunk, pGMM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* finally the instance data itself. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * RTAvlU32Destroy callback.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns 0
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pNode The node to destroy.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pvGMM The GMM handle.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("GMMR0Term: %p/%#x: cFree=%d cPrivate=%d cShared=%d cMappings=%d\n", pChunk,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappings);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("GMMR0Term: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Initializes the per-VM data for the GMM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is called from within the GVMM lock (from GVMMR0CreateVM)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * and should only initialize the data members so GMMR0CleanupVM
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * can deal with them. We reserve no memory or anything here,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * that's done later in GMMR0InitVM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the Global VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertCompile(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Cleans up when a VM is terminating.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the Global VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0CleanupVM: pGVM=%p:{.pVM=%p, .hSelf=%#x}\n", pGVM, pGVM->pVM, pGVM->hSelf));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Clean up all registered shared modules. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The policy is 'INVALID' until the initial reservation
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * request has been serviced.
1cde4dd19cba0507a9cdab737272d88feba05d41vboxsync * If it's the last VM around, we can skip walking all the chunk looking
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * for the pages owned by this VM and instead flush the whole shebang.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This takes care of the eventuality that a VM has left shared page
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * references behind (shouldn't happen of course, but you never know).
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#if 0 /* disabled so it won't hide bugs. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync RTAvlU32Destroy(&pGMM->pChunks, gmmR0CleanupVMDestroyChunk, pGMM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync memset(&pGMM->bmChunkId[0], 0, sizeof(pGMM->bmChunkId));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Walk the entire pool looking for pages that belongs to this VM
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * and left over mappings. (This'll only catch private pages, shared
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * pages will be 'left behind'.)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0CleanupVMScanChunk, pGVM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* free empty chunks. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pCur = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* account for shared pages that weren't freed. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Update the over-commitment management statistics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** @todo Update GMM->cOverCommittedPages */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* zap the GVM data. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * RTAvlU32DoWithAll callback.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns 0
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pNode The node to search.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pvGVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGVM)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Look for pages belonging to the VM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * (Perform some internal checks while we're scanning.)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync unsigned cPrivate = 0;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync unsigned cShared = 0;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync unsigned cFree = 0;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0UnlinkChunk(pChunk); /* avoiding cFreePages updates. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync while (iPage-- > 0)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Free the page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The reason for not using gmmR0FreePrivatePage here is that we
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * must *not* cause the chunk to be freed from under us - we're in
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * an AVL tree walk here.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->aPages[iPage].Free.iNext = pChunk->iFreeHead;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Did it add up?
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: Chunk %p/%#x has bogus stats - free=%d/%d private=%d/%d shared=%d/%d\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->cFree, cFree, pChunk->cPrivate, cPrivate, pChunk->cShared, cShared);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Look for the mapping belonging to the terminating VM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * If not in bound memory mode, we should reset the hGVM field
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * if it has our handle in it.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync * RTAvlU32Destroy callback for GMMR0CleanupVM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns 0
e7d8a97e3787122b211311253a20c1600b441f8avboxsync * @param pNode The node (allocation chunk) to destroy.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pvGVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM)
e7d8a97e3787122b211311253a20c1600b441f8avboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: pGVM=%p exepcted %p\n", pChunk,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->Core.Key, i, pChunk->paMappings[i].pGVM, pGVM);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
e7d8a97e3787122b211311253a20c1600b441f8avboxsync pChunk->Core.Key, i, pChunk->paMappings[i].MapObj, rc);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The initial resource reservations.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This will make memory reservations according to policy and priority. If there aren't
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * sufficient resources available to sustain the VM this function will fail and all
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * future allocations requests will fail as well.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * These are just the initial reservations made very very early during the VM creation
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * process and will be adjusted later in the GMMR0UpdateReservation call after the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * ring-3 init has completed.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_
1167f682bad8a5c086022e181da3bb4028a20ff8vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This does not include MMIO2 and similar.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
1167f682bad8a5c086022e181da3bb4028a20ff8vboxsync * @param cFixedPages The number of pages that may be allocated for fixed objects like the
1167f682bad8a5c086022e181da3bb4028a20ff8vboxsync * hyper heap, MMIO2 and similar.
1167f682bad8a5c086022e181da3bb4028a20ff8vboxsync * @param enmPolicy The OC policy to use on this VM.
1167f682bad8a5c086022e181da3bb4028a20ff8vboxsync * @param enmPriority The priority in an out-of-memory situation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @thread The creator thread / EMT.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pVM, cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate, get basics and take the semaphore.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
1167f682bad8a5c086022e181da3bb4028a20ff8vboxsync AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
1167f682bad8a5c086022e181da3bb4028a20ff8vboxsync AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check if we can accomodate this.
1cde4dd19cba0507a9cdab737272d88feba05d41vboxsync /* ... later ... */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Update the records.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0InitialReservation: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0InitialReservation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0InitialReservation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return GMMR0InitialReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This updates the memory reservation with the additional MMIO2 and ROM pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This does not include MMIO2 and similar.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cFixedPages The number of pages that may be allocated for fixed objects like the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * hyper heap, MMIO2 and similar.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @thread EMT.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0UpdateReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate, get basics and take the semaphore.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check if we can accomodate this.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* ... later ... */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Update the records.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0UpdateReservation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0UpdateReservation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq)
c79685b34475bd901e9bf729f068e621b0ddbbacvboxsync * Validate input and pass it on.
0c5d6027e582baa247e987e184f93d44623e7443vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
0c5d6027e582baa247e987e184f93d44623e7443vboxsync return GMMR0UpdateReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Performs sanity checks on a free set.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns Error count.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pSet Pointer to the set.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pszSetName The set name.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pszFunction The function from which it was called.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param uLine The line number.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic uint32_t gmmR0SanityCheckSet(PGMM pGMM, PGMMCHUNKFREESET pSet, const char *pszSetName,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Count the free pages in all the chunks and match it against pSet->cFreePages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** @todo check that the chunk is hash into the right set. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync SUPR0Printf("GMM insanity: found %#x pages in the %s set, expected %#x. (%s, line %u)\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync cPages, pszSetName, pSet->cFreePages, pszFunction, uLineNo);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Performs some sanity checks on the GMM while owning lock.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns Error count.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pszFunction The function from which it is called.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param uLineNo The line number.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Private, "private", pszFunction, uLineNo);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Shared, "shared", pszFunction, uLineNo);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** @todo add more sanity checks. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Looks up a chunk in the tree and fill in the TLB entry for it.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is not expected to fail and will bitch if it does.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns Pointer to the allocation chunk, NULL if not found.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0c5d6027e582baa247e987e184f93d44623e7443vboxsync * @param idChunk The ID of the chunk to find.
0c5d6027e582baa247e987e184f93d44623e7443vboxsync * @param pTlbe Pointer to the TLB entry.
c79685b34475bd901e9bf729f068e621b0ddbbacvboxsyncstatic PGMMCHUNK gmmR0GetChunkSlow(PGMM pGMM, uint32_t idChunk, PGMMCHUNKTLBE pTlbe)
c79685b34475bd901e9bf729f068e621b0ddbbacvboxsync PGMMCHUNK pChunk = (PGMMCHUNK)RTAvlU32Get(&pGMM->pChunks, idChunk);
0c5d6027e582baa247e987e184f93d44623e7443vboxsync AssertMsgReturn(pChunk, ("Chunk %#x not found!\n", idChunk), NULL);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Finds a allocation chunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is not expected to fail and will bitch if it does.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns Pointer to the allocation chunk, NULL if not found.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idChunk The ID of the chunk to find.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(PGMMCHUNK) gmmR0GetChunk(PGMM pGMM, uint32_t idChunk)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Do a TLB lookup, branch if not in the TLB.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Finds a page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is not expected to fail and will bitch if it does.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns Pointer to the page, NULL if not found.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idPage The ID of the page to find.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(PGMMPAGE) gmmR0GetPage(PGMM pGMM, uint32_t idPage)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return &pChunk->aPages[idPage & GMM_PAGEID_IDX_MASK];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Unlinks the chunk from the free list it's currently on (if any).
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pChunk The allocation chunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pSet->apLists[(pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT] = pNext;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Links the chunk onto the appropriate free list in the specified free set.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * If no free entries, it's not linked into any list.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pChunk The allocation chunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pSet The free set.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync unsigned iList = (pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Frees a Chunk ID.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idChunk The Chunk ID to free.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic void gmmR0FreeChunkId(PGMM pGMM, uint32_t idChunk)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsg(ASMBitTest(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Allocates a new Chunk ID.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns The Chunk ID.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertCompile(!((GMM_CHUNKID_LAST + 1) & 31)); /* must be a multiple of 32 */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Try the next sequential one.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#if 0 /* test the fallback first */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync && !ASMAtomicBitTestAndSet(&pVMM->bmChunkId[0], idChunk))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Scan sequentially from the last one.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync idChunk = ASMBitNextClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1, idChunk);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Ok, scan from the start.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * We're not racing anyone, so there is no need to expect failures or have restart loops.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync idChunk = ASMBitFirstClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(idChunk > NIL_GMM_CHUNKID, ("%#x\n", idChunk), NIL_GVM_HANDLE);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Registers a new chunk of memory.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk. The caller
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * must own the global lock.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pSet Pointer to the set.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param MemObj The memory object for the chunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param hGVM The affinity of the chunk. NIL_GVM_HANDLE for no
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * affinity.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param enmChunkType Chunk type (continuous or non-continuous)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param ppChunk Chunk address (out)
30dab499e07d428e2234ed5a6136cb53360cc93fvboxsyncstatic int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pChunk = (PGMMCHUNK)RTMemAllocZ(sizeof(*pChunk));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Initialize it.
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Allocate a Chunk ID and insert it into the tree.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This has to be done behind the mutex of course.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* bail out */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Allocate one new chunk and add it to the specified free set.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pSet Pointer to the set.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param hGVM The affinity of the new chunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param enmChunkType Chunk type (continuous or non-continuous)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param ppChunk Chunk address (out)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @remarks Called without owning the mutex.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Allocate the memory.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertReturn(enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS || enmChunkType == GMMCHUNKTYPE_CONTINUOUS, VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Leave the lock temporarily as the allocation might take long. */
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync rc = RTR0MemObjAllocPhysNC(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = RTR0MemObjAllocPhysEx(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Grab the lock again. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0RegisterChunk(pGMM, pSet, MemObj, hGVM, enmChunkType, ppChunk);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** @todo Check that RTR0MemObjAllocPhysNC always returns VERR_NO_MEMORY on
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * allocation failure. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Attempts to allocate more pages until the requested amount is met.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync * @param pGMM Pointer to the GMM instance data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM The calling VM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pSet Pointer to the free set to grow.
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync * @param cPages The number of pages needed.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @remarks Called owning the mutex, but will leave it temporarily while
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * allocating the memory!
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic int gmmR0AllocateMoreChunks(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, uint32_t cPages)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Try steal free chunks from the other set first. (Only take 100% free chunks.)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNKFREESET pOtherSet = pSet == &pGMM->Private ? &pGMM->Shared : &pGMM->Private;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pChunk = pOtherSet->apLists[GMM_CHUNK_FREE_SET_LISTS - 1];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync while (pChunk && pChunk->cFree != GMM_CHUNK_NUM_PAGES)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * If we need still more pages, allocate new chunks.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Note! We will leave the mutex while doing the allocation,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = gmmR0AllocateOneChunk(pGMM, pSet, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The memory is bound to the VM allocating it, so we have to count
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * the free pages carefully as well as making sure we brand them with
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * our VM handle.
377f1df8d6ec248927bcdf0efabf87ab55c4a615vboxsync * Note! We will leave the mutex while doing the allocation,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Count and see if we've reached the goal. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Allocate more. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = gmmR0AllocateOneChunk(pGMM, pSet, hGVM, GMMCHUNKTYPE_NON_CONTINUOUS);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Allocates one private page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Worker for gmmR0AllocatePages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param hGVM The GVM handle of the VM requesting memory.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pChunk The chunk to allocate it from.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPageDesc The page descriptor.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic void gmmR0AllocatePage(PGMM pGMM, uint32_t hGVM, PGMMCHUNK pChunk, PGMMPAGEDESC pPageDesc)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* update the chunk stats. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* unlink the first free page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertReleaseMsg(iPage < RT_ELEMENTS(pChunk->aPages), ("%d\n", iPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log3(("A pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x iNext=%#x\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPage, iPage, (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPage->Common.u2State, pChunk->iFreeHead, pPage->Free.iNext));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* make the page private. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertCompile(GMM_GCPHYS_UNSHAREABLE >= GMM_GCPHYS_LAST);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPage->Private.pfn = pPageDesc->HCPhysGCPhys >> PAGE_SHIFT;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* update the page descriptor. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->MemObj, iPage);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Common worker for GMMR0AllocateHandyPages and GMMR0AllocatePages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VINF_SUCCESS on success.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * gmmR0AllocateMoreChunks is necessary.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * that is we're trying to allocate more than we've reserved.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cPages The number of pages to allocate.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param paPages Pointer to the page descriptors.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * See GMMPAGEDESC for details on what is expected on input.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param enmAccount The account to charge.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check allocation limits.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY(pGMM->cAllocatedPages + cPages > pGMM->cMaxPages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0AllocatePages:Base: Reserved=%#llx Allocated+Ballooned+Requested=%#llx+%#llx+%#x!\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0AllocatePages:Shadow: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGVM->gmm.s.Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0AllocatePages:Fixed: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGVM->gmm.s.Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check if we need to allocate more memory or not. In bound memory mode this
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * is a bit extra work but it's easier to do it upfront than bailing out later.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Pick the pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Try make some effort keeping VMs sharing private chunks.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* first round, pick from chunks with an affinity to the VM. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists) && iPage < cPages; i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync /* second round, pick pages from the 100% empty chunks we just skipped above. */
11923fc977be1686f5428c3e790c04d0701a074cvboxsync PGMMCHUNK pCur = pSet->apLists[RT_ELEMENTS(pSet->apLists) - 1];
11923fc977be1686f5428c3e790c04d0701a074cvboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync /* third round, disregard affinity. */
11923fc977be1686f5428c3e790c04d0701a074cvboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * Update the account.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages += iPage; break;
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages += iPage; break;
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages += iPage; break;
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync AssertMsgReturn(iPage == cPages, ("%u != %u\n", iPage, cPages), VERR_INTERNAL_ERROR);
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * Check if we've reached some threshold and should kick one or two VMs and tell
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * them to inflate their balloons a bit more... later.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * Updates the previous allocations and allocates more pages.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * The handy pages are always taken from the 'base' memory account.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * The allocated pages are not cleared and will contains random garbage.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * @returns VBox status code:
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * @retval VINF_SUCCESS on success.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * @retval VERR_NOT_OWNER if the caller is not an EMT.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * @retval VERR_GMM_PAGE_NOT_FOUND if one of the pages to update wasn't found.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * @retval VERR_GMM_PAGE_NOT_PRIVATE if one of the pages to update wasn't a
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * private page.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync * @retval VERR_GMM_PAGE_NOT_SHARED if one of the pages to update wasn't a
11923fc977be1686f5428c3e790c04d0701a074cvboxsync * shared page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * owned by the VM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * that is we're trying to allocate more than we've reserved.
21e3c13c566c3687cba3ac7ccf8c9c85e8a08708vboxsync * @param pVM Pointer to the shared VM structure.
21e3c13c566c3687cba3ac7ccf8c9c85e8a08708vboxsync * @param idCpu VCPU id
21e3c13c566c3687cba3ac7ccf8c9c85e8a08708vboxsync * @param cPagesToUpdate The number of pages to update (starting from the head).
21e3c13c566c3687cba3ac7ccf8c9c85e8a08708vboxsync * @param cPagesToAlloc The number of pages to allocate (starting from the head).
21e3c13c566c3687cba3ac7ccf8c9c85e8a08708vboxsync * @param paPages The array of page descriptors.
21e3c13c566c3687cba3ac7ccf8c9c85e8a08708vboxsync * See GMMPAGEDESC for details on what is expected on input.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @thread EMT.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0AllocateHandyPages: pVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate, get basics and take the semaphore.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * (This is a relatively busy path, so make predictions where possible.)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn( (cPagesToUpdate && cPagesToUpdate < 1024)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("cPagesToUpdate=%#x cPagesToAlloc=%#x\n", cPagesToUpdate, cPagesToAlloc),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync unsigned iPage = 0;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn( ( paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /*|| paPages[iPage].idSharedPage == NIL_GMM_PAGEID*/,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
30dab499e07d428e2234ed5a6136cb53360cc93fvboxsync /* No allocations before the initial reservation has been made! */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Perform the updates.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Stop on the first error.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idPage);
30dab499e07d428e2234ed5a6136cb53360cc93fvboxsync AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_LIKELY(paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPage->Private.pfn = paPages[iPage].HCPhysGCPhys >> PAGE_SHIFT;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync else if (paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* else: NIL_RTHCPHYS nothing */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not owner! hGVM=%#x hSelf=%#x\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync iPage, paPages[iPage].idPage, pPage->Private.hGVM, pGVM->hSelf));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not private! %.*Rhxs\n", iPage, paPages[iPage].idPage, sizeof(*pPage), pPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (private)\n", iPage, paPages[iPage].idPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idSharedPage);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0FreeSharedPage(pGMM, paPages[iPage].idSharedPage, pPage);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not shared!\n", iPage, paPages[iPage].idSharedPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Join paths with GMMR0AllocatePages for the allocation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0AllocatePages(pGMM, pGVM, cPagesToAlloc, paPages, GMMACCOUNT_BASE);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->Private, cPagesToAlloc);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0AllocateHandyPages: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Allocate one or more pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is typically used for ROMs and MMIO2 (VRAM) during VM creation.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The allocated pages are not cleared and will contains random garbage.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VINF_SUCCESS on success.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_NOT_OWNER if the caller is not an EMT.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
953292a637f7ecfbb7340914e718919c84464f54vboxsync * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * that is we're trying to allocate more than we've reserved.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cPages The number of pages to allocate.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param paPages Pointer to the page descriptors.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * See GMMPAGEDESC for details on what is expected on input.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param enmAccount The account to charge.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @thread EMT.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate, get basics and take the semaphore.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn( paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK)),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("#%#x: %RHp enmAccount=%d\n", iPage, paPages[iPage].HCPhysGCPhys, enmAccount),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* No allocations before the initial reservation has been made! */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * gmmR0AllocatePages seed loop.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0AllocatePages(pGMM, pGVM, cPages, paPages, enmAccount);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->Private, cPages);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0AllocatePages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0AllocatePages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
fb9af443dbf06990f4956d683286ddce29c4dca6vboxsyncGMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0]),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0])),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages]),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages])),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return GMMR0AllocatePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Allocate a large page to represent guest RAM
62984798ef379bf4be6c0ecbb3e61d3d8e30dcecvboxsync * The allocated pages are not cleared and will contains random garbage.
fb9af443dbf06990f4956d683286ddce29c4dca6vboxsync * @returns VBox status code:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VINF_SUCCESS on success.
cea26cf0a0d390c2cca75cb19cb0e86c580e9d77vboxsync * @retval VERR_NOT_OWNER if the caller is not an EMT.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * that is we're trying to allocate more than we've reserved.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0AllocatePages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cbPage Large page size
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0AllocateLargePage: pVM=%p cbPage=%x\n", pVM, cbPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertReturn(cbPage == GMM_CHUNK_SIZE, VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate, get basics and take the semaphore.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
62984798ef379bf4be6c0ecbb3e61d3d8e30dcecvboxsync const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
2afbe132eb7931e0125141eabe3a48e08f1ffab5vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Allocate a new continous chunk. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0AllocateOneChunk(pGMM, &pGMM->Private, pGVM->hSelf, GMMCHUNKTYPE_CONTINUOUS, &pChunk);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Unlink the new chunk from the free list. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Allocate all pages. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Return the first page as we'll use the whole chunk as one big page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Update accounting. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Free a large page
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idPage Large page id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0FreeLargePage(PVM pVM, VMCPUID idCpu, uint32_t idPage)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0FreeLargePage: pVM=%p idPage=%x\n", pVM, idPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate, get basics and take the semaphore.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Release the memory immediately. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Update accounting. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0FreeLargePage: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0FreeLargePage.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0FreeLargePage.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0FreeLargePageReq(PVM pVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMFREEPAGESREQ),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(GMMFREEPAGESREQ)),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return GMMR0FreeLargePage(pVM, idCpu, pReq->idPage);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Frees a chunk, giving it back to the host OS.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM This is set when called from GMMR0CleanupVM so we can
fb9af443dbf06990f4956d683286ddce29c4dca6vboxsync * unmap and free the chunk in one go.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pChunk The chunk to free.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Cleanup hack! Unmap the chunk from the callers address space.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * If there are current mappings of the chunk, then request the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMs to unmap them. Reposition the chunk in the free list so
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * it won't be a likely candidate for allocations.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** @todo R0 -> VM request */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* The chunk can be owned by more than one VM if fBoundMemoryMode is false! */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Try free the memory object.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, false /* fFreeMappings */);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Unlink it from everywhere.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key);
62984798ef379bf4be6c0ecbb3e61d3d8e30dcecvboxsync PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pChunk->Core.Key)];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Free the Chunk ID and struct.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Free page worker.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The caller does all the statistic decrementing, we do all the incrementing.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pChunk Pointer to the chunk this page belongs to.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idPage The Page ID.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPage Pointer to the page.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic void gmmR0FreePageWorker(PGMM pGMM, PGMMCHUNK pChunk, uint32_t idPage, PGMMPAGE pPage)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log3(("F pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPage, pPage - &pChunk->aPages[0], idPage, pPage->Common.u2State, pChunk->iFreeHead)); NOREF(idPage);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Put the page on the free list.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pChunk->iFreeHead < RT_ELEMENTS(pChunk->aPages) || pChunk->iFreeHead == UINT16_MAX);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Update statistics (the cShared/cPrivate stats are up to date already),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * and relink the chunk if necessary.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if ((pChunk->cFree & GMM_CHUNK_FREE_SET_MASK) == 0)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &pGMM->Shared : &pGMM->Private);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * If the chunk becomes empty, consider giving memory back to the host OS.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The current strategy is to try give it back if there are other chunks
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * in this free list, meaning if there are at least 240 free pages in this
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * category. Note that since there are probably mappings of the chunk,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * it won't be freed up instantly, which probably screws up this logic
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY( pChunk->cFree == GMM_CHUNK_NUM_PAGES
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Frees a shared page, the page is known to exist and be valid and such.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idPage The Page ID
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPage The page structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(void) gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Converts a private page to a shared page, the page is known to exist and be valid and such.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the GVM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param HCPhys Host physical address
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idPage The Page ID
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPage The page structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Modify the page structure. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pPage->Shared.pfn = (uint32_t)(HCPhys >> PAGE_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Increase the use count of a shared page, the page is known to exist and be valid and such.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the GVM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPage The page structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(void) gmmR0UseSharedPage(PGMM pGMM, PGVM pGVM, PGMMPAGE pPage)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Frees a private page, the page is known to exist and be valid and such.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idPage The Page ID
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pPage The page structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncDECLINLINE(void) gmmR0FreePrivatePage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Common worker for GMMR0FreePages and GMMR0BalloonedPages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval xxx
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cPages The number of pages to free.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param paPages Pointer to the page descriptors.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param enmAccount The account this relates to.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check that the request isn't impossible wrt to the account status.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages < cPages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cShadowPages, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages < cPages))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cFixedPages, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Walk the descriptors and free the pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Statistics (except the account) are being updated as we go along,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * unlike the alloc code. Also, stop on the first error.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0AllocatePages: #%#x/%#x: not owner! hGVM=%#x hSelf=%#x\n", iPage, idPage,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0AllocatePages: #%#x/%#x: already free!\n", iPage, idPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0AllocatePages: #%#x/%#x: not found!\n", iPage, idPage));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Update the account.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages -= iPage; break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages -= iPage; break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages -= iPage; break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Any threshold stuff to be done here?
761a066e976face73a49d0edb1daeedd16b4d70fvboxsync * Free one or more pages.
761a066e976face73a49d0edb1daeedd16b4d70fvboxsync * This is typically used at reset time or power off.
690be237b3d7b59a8a25ecf07d9a94aa432fd385vboxsync * @returns VBox status code:
690be237b3d7b59a8a25ecf07d9a94aa432fd385vboxsync * @retval xxx
761a066e976face73a49d0edb1daeedd16b4d70fvboxsync * @param pVM Pointer to the shared VM structure.
761a066e976face73a49d0edb1daeedd16b4d70fvboxsync * @param idCpu VCPU id
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync * @param cPages The number of pages to allocate.
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync * @param paPages Pointer to the page descriptors containing the Page IDs for each page.
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync * @param enmAccount The account this relates to.
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync * @thread EMT.
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsyncGMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take the semaphore and call the worker function.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0FreePages(pGMM, pGVM, cPages, paPages, enmAccount);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0FreePages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0FreePages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0]),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0])),
953292a637f7ecfbb7340914e718919c84464f54vboxsync AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages]),
953292a637f7ecfbb7340914e718919c84464f54vboxsync ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages])),
953292a637f7ecfbb7340914e718919c84464f54vboxsync return GMMR0FreePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
953292a637f7ecfbb7340914e718919c84464f54vboxsync * Report back on a memory ballooning request.
953292a637f7ecfbb7340914e718919c84464f54vboxsync * The request may or may not have been initiated by the GMM. If it was initiated
953292a637f7ecfbb7340914e718919c84464f54vboxsync * by the GMM it is important that this function is called even if no pages were
953292a637f7ecfbb7340914e718919c84464f54vboxsync * ballooned.
953292a637f7ecfbb7340914e718919c84464f54vboxsync * @returns VBox status code:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @retval VERR_GMM_OVERCOMMITED_TRY_AGAIN_IN_A_BIT - reset condition
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * indicating that we won't necessarily have sufficient RAM to boot
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * the VM again and that it should pause until this changes (we'll try
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * balloon some other VM). (For standard deflate we have little choice
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * but to hope the VM won't use the memory that was returned to it.)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param enmAction Inflate/deflate/reset
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cBalloonedPages The number of pages that was ballooned.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @thread EMT.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0BalloonedPages: pVM=%p enmAction=%d cBalloonedPages=%#x\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take the sempahore and do some more validations.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pGVM->gmm.s.Allocated.cBasePages >= cBalloonedPages)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Record the ballooned memory.
62984798ef379bf4be6c0ecbb3e61d3d8e30dcecvboxsync /* Codepath never taken. Might be interesting in the future to request ballooned memory from guests in low memory conditions.. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGVM->gmm.s.cReqActuallyBalloonedPages += cBalloonedPages;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx (user)\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Deflate. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pGVM->gmm.s.cBalloonedPages >= cBalloonedPages)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Record the ballooned memory.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx Req=%#llx\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqDeflatePages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Anything we need to do here now when the request has been completed?
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx (user)\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Reset to an empty balloon. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0BalloonedPages: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0BalloonedPages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0BalloonedPages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
62984798ef379bf4be6c0ecbb3e61d3d8e30dcecvboxsync * @param idCpu VCPU id
62984798ef379bf4be6c0ecbb3e61d3d8e30dcecvboxsync * @param pReq The request packet.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMBALLOONEDPAGESREQ),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMBALLOONEDPAGESREQ)),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return GMMR0BalloonedPages(pVM, idCpu, pReq->enmAction, pReq->cBalloonedPages);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Return memory statistics for the hypervisor
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
a95fedb133944ec689b02e94077b0387bda0262bvboxsyncGMMR0DECL(int) GMMR0QueryHypervisorMemoryStatsReq(PVM pVM, PGMMMEMSTATSREQ pReq)
a95fedb133944ec689b02e94077b0387bda0262bvboxsync * Validate input and pass it on.
a95fedb133944ec689b02e94077b0387bda0262bvboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMMEMSTATSREQ),
a95fedb133944ec689b02e94077b0387bda0262bvboxsync ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMMEMSTATSREQ)),
a95fedb133944ec689b02e94077b0387bda0262bvboxsync * Validate input and get the basics.
a95fedb133944ec689b02e94077b0387bda0262bvboxsync pReq->cFreePages = (pGMM->cChunks << (GMM_CHUNK_SHIFT- PAGE_SHIFT)) - pGMM->cAllocatedPages;
021a33be84282e41b811563b5f60f3ada196af3evboxsync * Return memory statistics for the VM
021a33be84282e41b811563b5f60f3ada196af3evboxsync * @returns VBox status code:
021a33be84282e41b811563b5f60f3ada196af3evboxsync * @param pVM Pointer to the shared VM structure.
021a33be84282e41b811563b5f60f3ada196af3evboxsync * @parma idCpu Cpu id.
021a33be84282e41b811563b5f60f3ada196af3evboxsync * @param pReq The request packet.
021a33be84282e41b811563b5f60f3ada196af3evboxsyncGMMR0DECL(int) GMMR0QueryMemoryStatsReq(PVM pVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMMEMSTATSREQ),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMMEMSTATSREQ)),
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take the sempahore and do some more validations.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pReq->cAllocPages = pGVM->gmm.s.Allocated.cBasePages;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pReq->cBalloonedPages = pGVM->gmm.s.cBalloonedPages;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pReq->cFreePages = pReq->cMaxPages - pReq->cAllocPages;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR3QueryVMMemoryStats: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Unmaps a chunk previously mapped into the address space of the current process.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the Global VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pChunk Pointer to the chunk to be unmapped.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Find the mapping and try unmapping it.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* unmap */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* update the record. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0MapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Maps a chunk into the user address space of the current process.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGMM Pointer to the GMM instance data.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the Global VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pChunk Pointer to the chunk to be mapped.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param ppvR3 Where to store the ring-3 address of the mapping.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * In the VERR_GMM_CHUNK_ALREADY_MAPPED case, this will be
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * contain the address of the existing mapping.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic int gmmR0MapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * If we're in legacy mode this is simple.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check to see if the chunk is already mapped.
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
cce0c6096dee0c5353bb74431dc47b05f87a1c6dvboxsync Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Do the mapping.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* reallocate the array? */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* insert new entry */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk->paMappings[pChunk->cMappings].MapObj = MapObj;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check if a chunk is mapped into the specified VM
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns mapped yes/no
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pGVM Pointer to the Global VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pChunk Pointer to the chunk to be mapped.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param ppvR3 Where to store the ring-3 address of the mapping.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic int gmmR0IsChunkMapped(PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Check to see if the chunk is already mapped.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return true;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return false;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Map a chunk and/or unmap another chunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The mapping and unmapping applies to the current process.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This API does two things because it saves a kernel call per mapping when
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * when the ring-3 mapping cache is full.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM The VM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idChunkMap The chunk to map. NIL_GMM_CHUNKID if nothing to map.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idChunkUnmap The chunk to unmap. NIL_GMM_CHUNKID if nothing to unmap.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param ppvR3 Where to store the address of the mapped chunk. NULL is ok if nothing to map.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @thread EMT
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, VMCPUID idCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0MapUnmapChunk: pVM=%p idChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n",
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(idChunkMap <= GMM_CHUNKID_LAST, ("%#x\n", idChunkMap), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(idChunkUnmap <= GMM_CHUNKID_LAST, ("%#x\n", idChunkUnmap), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take the semaphore and do the work.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The unmapping is done last since it's easier to undo a mapping than
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * undoing an unmapping. The ring-3 mapping cache cannot not be so big
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * that it pushes the user virtual address space to within a chunk of
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * it it's limits, so, no problem here.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0MapUnmapChunk: idChunkMap=%#x\n", idChunkMap));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMCHUNK pUnmap = gmmR0GetChunk(pGMM, idChunkUnmap);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0MapUnmapChunk: idChunkUnmap=%#x\n", idChunkUnmap));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0MapUnmapChunk: returns %Rrc\n", rc));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0MapUnmapChunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0MapUnmapChunk.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, VMCPUID idCpu, PGMMMAPUNMAPCHUNKREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return GMMR0MapUnmapChunk(pVM, idCpu, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Legacy mode API for supplying pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The specified user address points to a allocation chunk sized block that
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * will be locked down and used by the GMM when the GM asks for pages.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM The VM.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pvR3 Pointer to the chunk size memory block to lock down.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertReturn(!(PAGE_OFFSET_MASK & pvR3), VERR_INVALID_POINTER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0SeedChunk: not in legacy allocation mode!\n"));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Lock the memory before taking the semaphore.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Grab the lock. */
3df4d0fc61060dd1eb530bf39f2184c39eec7dfbvboxsync * Add a new chunk with our hGVM.
3df4d0fc61060dd1eb530bf39f2184c39eec7dfbvboxsync rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, MemObj, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync LogFlow(("GMMR0SeedChunk: rc=%d (pvR3=%p)\n", rc, pvR3));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Registers a new shared module for the VM
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM VM handle
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pszModuleName Module name
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pszVersion Module version
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param GCBaseAddr Module base address
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cbModule Module size
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cRegions Number of shared region descriptors
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pRegions Shared region(s)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule,
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync unsigned cRegions, VMMDEVSHAREDREGIONDESC *pRegions)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take the sempahore and do some more validations.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync bool fNewModule = false;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Check if this module is already locally registered. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pRecVM = (PGMMSHAREDMODULEPERVM)RTMemAllocZ(sizeof(*pRecVM));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync bool ret = RTAvlGCPtrInsert(&pGVM->gmm.s.pSharedModuleTree, &pRecVM->Core);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Check if this module is already globally registered. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMSHAREDMODULE pGlobalModule = (PGMMSHAREDMODULE)RTAvlGCPtrGet(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGlobalModule = (PGMMSHAREDMODULE)RTMemAllocZ(RT_OFFSETOF(GMMSHAREDMODULE, aRegions[cRegions]));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Input limit already safe; no need to check again. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** todo replace with RTStrCopy */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (unsigned i = 0; i < cRegions; i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGlobalModule->aRegions[i].GCRegionAddr = pRegions[i].GCRegionAddr;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGlobalModule->aRegions[i].cbRegion = pRegions[i].cbRegion;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGlobalModule->aRegions[i].paHCPhysPageID = NULL; /* uninitialized. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Save reference. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Make sure the name and version are identical. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** todo replace with RTStrNCmp */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Save reference. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync || pRecVM->fCollision == true) /* colliding module unregistered and new one registerd since the last check */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0RegisterSharedModule.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0RegisterSharedModule.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
953292a637f7ecfbb7340914e718919c84464f54vboxsync AssertMsgReturn(pReq->Hdr.cbReq >= sizeof(*pReq) && pReq->Hdr.cbReq == RT_UOFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
953292a637f7ecfbb7340914e718919c84464f54vboxsync return GMMR0RegisterSharedModule(pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
953292a637f7ecfbb7340914e718919c84464f54vboxsync * Unregisters a shared module for the VM
953292a637f7ecfbb7340914e718919c84464f54vboxsync * @returns VBox status code.
953292a637f7ecfbb7340914e718919c84464f54vboxsync * @param pVM VM handle
953292a637f7ecfbb7340914e718919c84464f54vboxsync * @param idCpu VCPU id
953292a637f7ecfbb7340914e718919c84464f54vboxsync * @param pszModuleName Module name
953292a637f7ecfbb7340914e718919c84464f54vboxsync * @param pszVersion Module version
953292a637f7ecfbb7340914e718919c84464f54vboxsync * @param GCBaseAddr Module base address
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cbModule Module size
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take the sempahore and do some more validations.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Remove reference to global shared module. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Free the ranges, but leave the pages intact as there might still be references; they will be cleared by the COW mechanism. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = VERR_PGM_SHARED_MODULE_REGISTRATION_INCONSISTENCY;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VMMR0 request wrapper for GMMR0UnregisterSharedModule.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns see GMMR0UnregisterSharedModule.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM Pointer to the shared VM structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq The request packet.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and pass it on.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return GMMR0UnregisterSharedModule(pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Checks specified shared module range for changes
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Performs the following tasks:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * - if a shared page is new, then it changes the GMM page type to shared and returns it in the paPageDesc array
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * - if a shared page already exists, then it checks if the VM page is identical and if so frees the VM page and returns the shared page in the paPageDesc array
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM VM handle
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pReq Module description
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idxRegion Region index
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param cPages Number of entries in the paPageDesc array
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param paPageDesc Page descriptor array (in/out)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0SharedModuleCheckRange(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq, unsigned idxRegion, unsigned cPages, PGMMSHAREDPAGEDESC paPageDesc)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertReturn(idxRegion < pReq->cRegions, VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertReturn(cPages == (pReq->aRegions[idxRegion].cbRegion >> PAGE_SHIFT), VERR_INVALID_PARAMETER);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("GMMR0SharedModuleCheckRange %s base %RGv region %d cPages %d\n", pReq->szName, pReq->GCBaseAddr, idxRegion, cPages));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take the sempahore and do some more validations.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMSHAREDMODULEPERVM pLocalModule = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, pReq->GCBaseAddr);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMSHAREDMODULE pGlobalModule = pLocalModule->pGlobalModule;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMSHAREDREGIONDESC pGlobalRegion = &pGlobalModule->aRegions[idxRegion];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* First time; create a page descriptor array. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGlobalRegion->paHCPhysPageID = (uint32_t *)RTMemAlloc(cPages * sizeof(*pGlobalRegion->paHCPhysPageID));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Invalidate all descriptors. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (unsigned i = 0; i < cPages; i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Check all pages in the region. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (unsigned i = 0; i < cPages; i++)
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync /* Valid page present? */
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync /* We've seen this shared page for the first time? */
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync if (pGlobalRegion->paHCPhysPageID == NIL_GMM_PAGEID)
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync /* Easy case: just change the internal page type. */
a1d83f29ade4c8f9fe95fc75d3fb2642f36081c1vboxsync PGMMPAGE pPage = gmmR0GetPage(pGMM, paPageDesc[i].uHCPhysPageId);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("New shared page guest %RGp host %RHp\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(paPageDesc[i].HCPhys == (pPage->Private.pfn << 12));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync gmmR0ConvertToSharedPage(pGMM, pGVM, paPageDesc[i].HCPhys, paPageDesc[i].uHCPhysPageId, pPage);
953292a637f7ecfbb7340914e718919c84464f54vboxsync /* Keep track of these references. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pGlobalRegion->paHCPhysPageID[i] = paPageDesc[i].uHCPhysPageId;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(paPageDesc[i].uHCPhysPageId != pGlobalRegion->paHCPhysPageID[i]);
953292a637f7ecfbb7340914e718919c84464f54vboxsync /* Get the shared page source. */
953292a637f7ecfbb7340914e718919c84464f54vboxsync PGMMPAGE pPage = gmmR0GetPage(pGMM, pGlobalRegion->paHCPhysPageID[i]);
953292a637f7ecfbb7340914e718919c84464f54vboxsync Assert(pPage->Common.u2State == GMM_PAGE_STATE_SHARED);
953292a637f7ecfbb7340914e718919c84464f54vboxsync Log(("Replace existing page guest %RGp host %RHp -> %RHp\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys, pPage->Shared.pfn << PAGE_SHIFT));
953292a637f7ecfbb7340914e718919c84464f54vboxsync /* Calculate the virtual address of the local page. */
953292a637f7ecfbb7340914e718919c84464f54vboxsync pChunk = gmmR0GetChunk(pGMM, paPageDesc[i].uHCPhysPageId >> GMM_CHUNKID_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pbLocalPage = pbChunk + ((paPageDesc[i].uHCPhysPageId & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Calculate the virtual address of the shared page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pChunk = gmmR0GetChunk(pGMM, pGlobalRegion->paHCPhysPageID[i] >> GMM_CHUNKID_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pChunk); /* can't fail as gmmR0GetPage succeeded. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0MapChunk(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pbSharedPage = pbChunk + ((pGlobalRegion->paHCPhysPageID[i] & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** todo write ASMMemComparePage. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Log(("Unexpected differences found between local and shared page; skip\n"));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Free the old local page. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync rc = gmmR0FreePages(pGMM, pGVM, 1, &PageDesc, GMMACCOUNT_BASE);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /* Pass along the new physical address & page id. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync paPageDesc[i].HCPhys = pPage->Shared.pfn << PAGE_SHIFT;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync paPageDesc[i].uHCPhysPageId = pGlobalRegion->paHCPhysPageID[i];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * RTAvlU32Destroy callback.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns 0
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pNode The node to destroy.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pvGVM The GVM handle.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncstatic DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Removes all shared modules for the specified VM
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @returns VBox status code.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param pVM VM handle
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * @param idCpu VCPU id
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncGMMR0DECL(int) GMMR0ResetSharedModules(PVM pVM, VMCPUID idCpu)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Validate input and get the basics.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Take the sempahore and do some more validations.