GMMR0.cpp revision abbd656ce8faf6bef24026a95237c5c9841069c8
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/* $Id$ */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @file
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * GMM - Global Memory Manager.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/*
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Copyright (C) 2007 Oracle Corporation
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * available from http://www.virtualbox.org. This file is free software;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * you can redistribute it and/or modify it under the terms of the GNU
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * General Public License (GPL) as published by the Free Software
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @page pg_gmm GMM - The Global Memory Manager
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * As the name indicates, this component is responsible for global memory
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * management. Currently only guest RAM is allocated from the GMM, but this
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * may change to include shadow page tables and other bits later.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Guest RAM is managed as individual pages, but allocated from the host OS
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * in chunks for reasons of portability / efficiency. To minimize the memory
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * footprint all tracking structure must be as small as possible without
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * unnecessary performance penalties.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The allocation chunks has fixed sized, the size defined at compile time
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * by the #GMM_CHUNK_SIZE \#define.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Each chunk is given an unquie ID. Each page also has a unique ID. The
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * relation ship between the two IDs is:
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @code
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * GMM_CHUNK_SHIFT = log2(GMM_CHUNK_SIZE / PAGE_SIZE);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * idPage = (idChunk << GMM_CHUNK_SHIFT) | iPage;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @endcode
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Where iPage is the index of the page within the chunk. This ID scheme
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * permits for efficient chunk and page lookup, but it relies on the chunk size
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * to be set at compile time. The chunks are organized in an AVL tree with their
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * IDs being the keys.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The physical address of each page in an allocation chunk is maintained by
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * the #RTR0MEMOBJ and obtained using #RTR0MemObjGetPagePhysAddr. There is no
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * need to duplicate this information (it'll cost 8-bytes per page if we did).
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * So what do we need to track per page? Most importantly we need to know
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * which state the page is in:
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Private - Allocated for (eventually) backing one particular VM page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Shared - Readonly page that is used by one or more VMs and treated
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * as COW by PGM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Free - Not used by anyone.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * For the page replacement operations (sharing, defragmenting and freeing)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * to be somewhat efficient, private pages needs to be associated with a
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * particular page in a particular VM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Tracking the usage of shared pages is impractical and expensive, so we'll
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * settle for a reference counting system instead.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Free pages will be chained on LIFOs
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * systems a 32-bit bitfield will have to suffice because of address space
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * limitations. The #GMMPAGE structure shows the details.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @section sec_gmm_alloc_strat Page Allocation Strategy
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The strategy for allocating pages has to take fragmentation and shared
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * pages into account, or we may end up with with 2000 chunks with only
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * a few pages in each. Shared pages cannot easily be reallocated because
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * of the inaccurate usage accounting (see above). Private pages can be
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * reallocated by a defragmentation thread in the same manner that sharing
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * is done.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The first approach is to manage the free pages in two sets depending on
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * whether they are mainly for the allocation of shared or private pages.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * In the initial implementation there will be almost no possibility for
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * mixing shared and private pages in the same chunk (only if we're really
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * stressed on memory), but when we implement forking of VMs and have to
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * deal with lots of COW pages it'll start getting kind of interesting.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The sets are lists of chunks with approximately the same number of
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * free pages. Say the chunk size is 1MB, meaning 256 pages, and a set
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * consists of 16 lists. So, the first list will contain the chunks with
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * 1-7 free pages, the second covers 8-15, and so on. The chunks will be
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * moved between the lists as pages are freed up or allocated.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @section sec_gmm_costs Costs
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The per page cost in kernel space is 32-bit plus whatever RTR0MEMOBJ
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * entails. In addition there is the chunk cost of approximately
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * (sizeof(RT0MEMOBJ) + sizof(CHUNK)) / 2^CHUNK_SHIFT bytes per page.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * On Windows the per page #RTR0MEMOBJ cost is 32-bit on 32-bit windows
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * and 64-bit on 64-bit windows (a PFN_NUMBER in the MDL). So, 64-bit per page.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * The cost on Linux is identical, but here it's because of sizeof(struct page *).
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * @section sec_gmm_legacy Legacy Mode for Non-Tier-1 Platforms
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * In legacy mode the page source is locked user pages and not
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * #RTR0MemObjAllocPhysNC, this means that a page can only be allocated
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * by the VM that locked it. We will make no attempt at implementing
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * page sharing on these systems, just do enough to make it all work.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * @subsection sub_gmm_locking Serializing
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * One simple fast mutex will be employed in the initial implementation, not
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * two as metioned in @ref subsec_pgmPhys_Serializing.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @see @ref subsec_pgmPhys_Serializing
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @section sec_gmm_overcommit Memory Over-Commitment Management
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The GVM will have to do the system wide memory over-commitment
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * management. My current ideas are:
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Per VM oc policy that indicates how much to initially commit
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * to it and what to do in a out-of-memory situation.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * - Prevent overtaxing the host.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * There are some challenges here, the main ones are configurability and
e2ba4c7fd718dba1d5b73cd9e40486df3ce06e77vboxsync * security. Should we for instance permit anyone to request 100% memory
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync * commitment? Who should be allowed to do runtime adjustments of the
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * config. And how to prevent these settings from being lost when the last
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * VM process exits? The solution is probably to have an optional root
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * daemon the will keep VMMR0.r0 in memory and enable the security measures.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @section sec_gmm_numa NUMA
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync *
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * NUMA considerations will be designed and implemented a bit later.
08fc4eb537ed24136b05660d0aa038b336516961vboxsync *
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * The preliminary guesses is that we will have to try allocate memory as
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * close as possible to the CPUs the VM is executed on (EMT and additional CPU
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * threads). Which means it's mostly about allocation and sharing policies.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Both the scheduler and allocator interface will to supply some NUMA info
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * and we'll need to have a way to calc access costs.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync *
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/*******************************************************************************
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync* Header Files *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync*******************************************************************************/
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define LOG_GROUP LOG_GROUP_GMM
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <VBox/vm.h>
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <VBox/gmm.h>
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include "GMMR0Internal.h"
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <VBox/gvm.h>
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <VBox/pgm.h>
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <VBox/log.h>
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <VBox/param.h>
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <VBox/err.h>
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <iprt/asm.h>
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#include <iprt/avl.h>
54ed927d658674ced4387afbd1877a27cb975a76vboxsync#include <iprt/mem.h>
54ed927d658674ced4387afbd1877a27cb975a76vboxsync#include <iprt/memobj.h>
54ed927d658674ced4387afbd1877a27cb975a76vboxsync#include <iprt/semaphore.h>
54ed927d658674ced4387afbd1877a27cb975a76vboxsync#include <iprt/string.h>
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync/*******************************************************************************
54ed927d658674ced4387afbd1877a27cb975a76vboxsync* Structures and Typedefs *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync*******************************************************************************/
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync/** Pointer to set of free chunks. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsynctypedef struct GMMCHUNKFREESET *PGMMCHUNKFREESET;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** Pointer to a GMM allocation chunk. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsynctypedef struct GMMCHUNK *PGMMCHUNK;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The per-page tracking structure employed by the GMM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * On 32-bit hosts we'll some trickery is necessary to compress all
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * the information into 32-bits. When the fSharedFree member is set,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * the 30th bit decides whether it's a free page or not.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Because of the different layout on 32-bit and 64-bit hosts, macros
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * are used to get and set some of the data.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsynctypedef union GMMPAGE
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
e2ba4c7fd718dba1d5b73cd9e40486df3ce06e77vboxsync#if HC_ARCH_BITS == 64
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Unsigned integer view. */
e2ba4c7fd718dba1d5b73cd9e40486df3ce06e77vboxsync uint64_t u;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The common view. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync struct GMMPAGECOMMON
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t uStuff1 : 32;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t uStuff2 : 30;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The page state. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t u2State : 2;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync } Common;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The view of a private page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync struct GMMPAGEPRIVATE
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The guest page frame number. (Max addressable: 2 ^ 44 - 16) */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t pfn;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The GVM handle. (64K VMs) */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t hGVM : 16;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** Reserved. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync uint32_t u16Reserved : 14;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The page state. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync uint32_t u2State : 2;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync } Private;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The view of a shared page. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync struct GMMPAGESHARED
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The host page frame number. (Max addressable: 2 ^ 44 - 16) */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t pfn;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The reference count (64K VMs). */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t cRefs : 16;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Reserved. Checksum or something? Two hGVMs for forking? */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t u14Reserved : 14;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The page state. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t u2State : 2;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync } Shared;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The view of a free page. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync struct GMMPAGEFREE
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint16_t iNext;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Reserved. Checksum or something? */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint16_t u16Reserved0;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Reserved. Checksum or something? */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t u30Reserved1 : 30;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The page state. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t u2State : 2;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync } Free;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync#else /* 32-bit */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Unsigned integer view. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t u;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The common view. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync struct GMMPAGECOMMON
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t uStuff : 30;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The page state. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint32_t u2State : 2;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync } Common;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The view of a private page. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync struct GMMPAGEPRIVATE
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The guest page frame number. (Max addressable: 2 ^ 36) */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync uint32_t pfn : 24;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The GVM handle. (127 VMs) */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync uint32_t hGVM : 7;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The top page state bit, MBZ. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync uint32_t fZero : 1;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync } Private;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The view of a shared page. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync struct GMMPAGESHARED
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync {
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The reference count. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync uint32_t cRefs : 30;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync /** The page state. */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync uint32_t u2State : 2;
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync } Shared;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The view of a free page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync struct GMMPAGEFREE
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The index of the next page in the free list. UINT16_MAX is NIL. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t iNext : 16;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Reserved. Checksum or something? */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t u14Reserved : 14;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The page state. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t u2State : 2;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync } Free;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#endif
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync} GMMPAGE;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncAssertCompileSize(GMMPAGE, sizeof(RTHCUINTPTR));
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** Pointer to a GMMPAGE. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsynctypedef GMMPAGE *PGMMPAGE;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @name The Page States.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @{ */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** A private page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define GMM_PAGE_STATE_PRIVATE 0
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** A private page - alternative value used on the 32-bit implemenation.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * This will never be used on 64-bit hosts. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define GMM_PAGE_STATE_PRIVATE_32 1
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** A shared page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define GMM_PAGE_STATE_SHARED 2
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** A free page. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define GMM_PAGE_STATE_FREE 3
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @} */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @def GMM_PAGE_IS_PRIVATE
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns true if private, false if not.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pPage The GMM page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#if HC_ARCH_BITS == 64
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_PRIVATE )
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#else
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Private.fZero == 0 )
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#endif
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @def GMM_PAGE_IS_SHARED
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns true if shared, false if not.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pPage The GMM page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define GMM_PAGE_IS_SHARED(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_SHARED )
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @def GMM_PAGE_IS_FREE
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns true if free, false if not.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pPage The GMM page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#define GMM_PAGE_IS_FREE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_FREE )
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** @def GMM_PAGE_PFN_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * The last valid guest pfn range.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @remark Some of the values outside the range has special meaning,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * see GMM_PAGE_PFN_UNSHAREABLE.
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync#if HC_ARCH_BITS == 64
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync# define GMM_PAGE_PFN_LAST UINT32_C(0xfffffff0)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync#else
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync# define GMM_PAGE_PFN_LAST UINT32_C(0x00fffff0)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync#endif
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsyncAssertCompile(GMM_PAGE_PFN_LAST == (GMM_GCPHYS_LAST >> PAGE_SHIFT));
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync/** @def GMM_PAGE_PFN_UNSHAREABLE
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * Indicates that this page isn't used for normal guest memory and thus isn't shareable.
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync#if HC_ARCH_BITS == 64
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0xfffffff1)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync#else
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0x00fffff1)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync#endif
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsyncAssertCompile(GMM_PAGE_PFN_UNSHAREABLE == (GMM_GCPHYS_UNSHAREABLE >> PAGE_SHIFT));
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
72e9ef1022a910facbd4a232500026befd944d95vboxsync * A GMM allocation chunk ring-3 mapping record.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync *
72e9ef1022a910facbd4a232500026befd944d95vboxsync * This should really be associated with a session and not a VM, but
72e9ef1022a910facbd4a232500026befd944d95vboxsync * it's simpler to associated with a VM and cleanup with the VM object
72e9ef1022a910facbd4a232500026befd944d95vboxsync * is destroyed.
72e9ef1022a910facbd4a232500026befd944d95vboxsync */
72e9ef1022a910facbd4a232500026befd944d95vboxsynctypedef struct GMMCHUNKMAP
72e9ef1022a910facbd4a232500026befd944d95vboxsync{
72e9ef1022a910facbd4a232500026befd944d95vboxsync /** The mapping object. */
72e9ef1022a910facbd4a232500026befd944d95vboxsync RTR0MEMOBJ MapObj;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync /** The VM owning the mapping. */
72e9ef1022a910facbd4a232500026befd944d95vboxsync PGVM pGVM;
72e9ef1022a910facbd4a232500026befd944d95vboxsync} GMMCHUNKMAP;
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** Pointer to a GMM allocation chunk mapping. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsynctypedef struct GMMCHUNKMAP *PGMMCHUNKMAP;
270236340676d2385b27ea992e07fcb643bb78b6vboxsync
270236340676d2385b27ea992e07fcb643bb78b6vboxsynctypedef enum GMMCHUNKTYPE
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync{
270236340676d2385b27ea992e07fcb643bb78b6vboxsync GMMCHUNKTYPE_INVALID = 0,
270236340676d2385b27ea992e07fcb643bb78b6vboxsync GMMCHUNKTYPE_NON_CONTINUOUS = 1, /* 4 kb pages */
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync GMMCHUNKTYPE_CONTINUOUS = 2, /* one 2 MB continuous physical range. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync GMMCHUNKTYPE_32BIT_HACK = 0x7fffffff
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync} GMMCHUNKTYPE;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync/**
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * A GMM allocation chunk.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsynctypedef struct GMMCHUNK
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync{
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The AVL node core.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * The Key is the chunk ID. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync AVLU32NODECORE Core;
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync /** The memory object.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * Either from RTR0MemObjAllocPhysNC or RTR0MemObjLockUser depending on
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * what the host can dish up with. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync RTR0MEMOBJ MemObj;
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync /** Pointer to the next chunk in the free list. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync PGMMCHUNK pFreeNext;
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync /** Pointer to the previous chunk in the free list. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync PGMMCHUNK pFreePrev;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** Pointer to the free set this chunk belongs to. NULL for
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * chunks with no free pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNKFREESET pSet;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Pointer to an array of mappings. */
81a042b394e569219fd7bf5c66281e6afc2212a3vboxsync PGMMCHUNKMAP paMappings;
81a042b394e569219fd7bf5c66281e6afc2212a3vboxsync /** The number of mappings. */
81a042b394e569219fd7bf5c66281e6afc2212a3vboxsync uint16_t cMappings;
81a042b394e569219fd7bf5c66281e6afc2212a3vboxsync /** The head of the list of free pages. UINT16_MAX is the NIL value. */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync uint16_t iFreeHead;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The number of free pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint16_t cFree;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The GVM handle of the VM that first allocated pages from this chunk, this
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * is used as a preference when there are several chunks to choose from.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * When in bound memory mode this isn't a preference any longer. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint16_t hGVM;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The number of private pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint16_t cPrivate;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The number of shared pages. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint16_t cShared;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Chunk type */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync GMMCHUNKTYPE enmType;
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync /** The pages. */
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync GMMPAGE aPages[GMM_CHUNK_SIZE >> PAGE_SHIFT];
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync} GMMCHUNK;
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * An allocation chunk TLB entry.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsynctypedef struct GMMCHUNKTLBE
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync{
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync /** The chunk id. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t idChunk;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** Pointer to the chunk. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pChunk;
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync} GMMCHUNKTLBE;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** Pointer to an allocation chunk TLB entry. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsynctypedef GMMCHUNKTLBE *PGMMCHUNKTLBE;
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** The number of entries tin the allocation chunk TLB. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync#define GMM_CHUNKTLB_ENTRIES 32
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** Gets the TLB entry index for the given Chunk ID. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync#define GMM_CHUNKTLB_IDX(idChunk) ( (idChunk) & (GMM_CHUNKTLB_ENTRIES - 1) )
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/**
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * An allocation chunk TLB.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsynctypedef struct GMMCHUNKTLB
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync{
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync /** The TLB entries. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync GMMCHUNKTLBE aEntries[GMM_CHUNKTLB_ENTRIES];
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync} GMMCHUNKTLB;
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** Pointer to an allocation chunk TLB. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsynctypedef GMMCHUNKTLB *PGMMCHUNKTLB;
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** The GMMCHUNK::cFree shift count. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync#define GMM_CHUNK_FREE_SET_SHIFT 4
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** The GMMCHUNK::cFree mask for use when considering relinking a chunk. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync#define GMM_CHUNK_FREE_SET_MASK 15
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/** The number of lists in set. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync#define GMM_CHUNK_FREE_SET_LISTS (GMM_CHUNK_NUM_PAGES >> GMM_CHUNK_FREE_SET_SHIFT)
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/**
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * A set of free chunks.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsynctypedef struct GMMCHUNKFREESET
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync /** The number of free pages in the set. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync uint64_t cFreePages;
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync /** Chunks ordered by increasing number of free pages. */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync PGMMCHUNK apLists[GMM_CHUNK_FREE_SET_LISTS];
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync} GMMCHUNKFREESET;
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/**
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * The GMM instance data.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsynctypedef struct GMM
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync{
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync /** Magic / eye catcher. GMM_MAGIC */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t u32Magic;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The fast mutex protecting the GMM.
270236340676d2385b27ea992e07fcb643bb78b6vboxsync * More fine grained locking can be implemented later if necessary. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync RTSEMFASTMUTEX Mtx;
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** The chunk tree. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync PAVLU32NODECORE pChunks;
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** The chunk TLB. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync GMMCHUNKTLB ChunkTLB;
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** The private free set. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync GMMCHUNKFREESET Private;
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** The shared free set. */
270236340676d2385b27ea992e07fcb643bb78b6vboxsync GMMCHUNKFREESET Shared;
e2ba4c7fd718dba1d5b73cd9e40486df3ce06e77vboxsync
270236340676d2385b27ea992e07fcb643bb78b6vboxsync /** Shared module tree (global). */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync /** todo seperate trees for distinctly different guest OSes. */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync PAVLGCPTRNODECORE pGlobalSharedModuleTree;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The maximum number of pages we're allowed to allocate.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @gcfgm 64-bit GMM/MaxPages Direct.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * @gcfgm 32-bit GMM/PctPages Relative to the number of host pages. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync uint64_t cMaxPages;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /** The number of pages that has been reserved.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * The deal is that cReservedPages - cOverCommittedPages <= cMaxPages. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync uint64_t cReservedPages;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The number of pages that we have over-committed in reservations. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync uint64_t cOverCommittedPages;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /** The number of actually allocated (committed if you like) pages. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync uint64_t cAllocatedPages;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /** The number of pages that are shared. A subset of cAllocatedPages. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync uint64_t cSharedPages;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /** The number of pages that are actually shared between VMs. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint64_t cDuplicatePages;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /** The number of pages that are shared that has been left behind by
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * VMs not doing proper cleanups. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync uint64_t cLeftBehindSharedPages;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /** The number of allocation chunks.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * (The number of pages we've allocated from the host can be derived from this.) */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t cChunks;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** The number of current ballooned pages. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync uint64_t cBalloonedPages;
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The legacy allocation mode indicator.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * This is determined at initialization time. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync bool fLegacyAllocationMode;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The bound memory mode indicator.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * When set, the memory will be bound to a specific VM and never
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * shared. This is always set if fLegacyAllocationMode is set.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * (Also determined at initialization time.) */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync bool fBoundMemoryMode;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The number of registered VMs. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync uint16_t cRegisteredVMs;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** The previous allocated Chunk ID.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * Used as a hint to avoid scanning the whole bitmap. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync uint32_t idChunkPrev;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /** Chunk ID allocation bitmap.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * Bits of allocated IDs are set, free ones are clear.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * The NIL id (0) is marked allocated. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync uint32_t bmChunkId[(GMM_CHUNKID_LAST + 1 + 31) / 32];
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync} GMM;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync/** Pointer to the GMM instance. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsynctypedef GMM *PGMM;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/** The value of GMM::u32Magic (Katsuhiro Otomo). */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync#define GMM_MAGIC 0x19540414
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/*******************************************************************************
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync* Global Variables *
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync*******************************************************************************/
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/** Pointer to the GMM instance data. */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsyncstatic PGMM g_pGMM = NULL;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/** Macro for obtaining and validating the g_pGMM pointer.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * On failure it will return from the invoking function with the specified return value.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM The name of the pGMM variable.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * VBox status codes.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync#define GMM_GET_VALID_INSTANCE(pGMM, rc) \
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync do { \
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync (pGMM) = g_pGMM; \
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync AssertPtrReturn((pGMM), (rc)); \
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync AssertMsgReturn((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic), (rc)); \
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync } while (0)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/** Macro for obtaining and validating the g_pGMM pointer, void function variant.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * On failure it will return from the invoking function.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM The name of the pGMM variable.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync#define GMM_GET_VALID_INSTANCE_VOID(pGMM) \
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync do { \
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync (pGMM) = g_pGMM; \
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync AssertPtrReturnVoid((pGMM)); \
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync AssertMsgReturnVoid((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic)); \
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync } while (0)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync/** @def GMM_CHECK_SANITY_UPON_ENTERING
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * Checks the sanity of the GMM instance data before making changes.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync *
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * This is macro is a stub by default and must be enabled manually in the code.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync *
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * @returns true if sane, false if not.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * @param pGMM The name of the pGMM variable.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync#if defined(VBOX_STRICT) && 0
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync#else
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (true)
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync#endif
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync/** @def GMM_CHECK_SANITY_UPON_LEAVING
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * Checks the sanity of the GMM instance data after making changes.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * This is macro is a stub by default and must be enabled manually in the code.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns true if sane, false if not.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM The name of the pGMM variable.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#if defined(VBOX_STRICT) && 0
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#else
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (true)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#endif
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync/** @def GMM_CHECK_SANITY_IN_LOOPS
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * Checks the sanity of the GMM instance in the allocation loops.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync *
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * This is macro is a stub by default and must be enabled manually in the code.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns true if sane, false if not.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM The name of the pGMM variable.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#if defined(VBOX_STRICT) && 0
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#else
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (true)
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync#endif
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync/*******************************************************************************
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync* Internal Functions *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync*******************************************************************************/
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsyncstatic DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM);
d17bffd306255c509aa98e8118e9a5456ee2138evboxsyncstatic DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGMM);
d17bffd306255c509aa98e8118e9a5456ee2138evboxsyncstatic DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM);
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM);
8541678784dbe432feebe6d9c1528525e1771397vboxsyncDECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncDECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncstatic uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncstatic void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncstatic void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsyncstatic int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync/**
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * Initializes the GMM component.
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync *
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * This is called when the VMMR0.r0 module is loaded and protected by the
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * loader semaphore.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns VBox status code.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsyncGMMR0DECL(int) GMMR0Init(void)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync{
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync LogFlow(("GMMInit:\n"));
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync /*
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Allocate the instance data and the lock(s).
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync PGMM pGMM = (PGMM)RTMemAllocZ(sizeof(*pGMM));
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (!pGMM)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync return VERR_NO_MEMORY;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync pGMM->u32Magic = GMM_MAGIC;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync int rc = RTSemFastMutexCreate(&pGMM->Mtx);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync if (RT_SUCCESS(rc))
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /*
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * Check and see if RTR0MemObjAllocPhysNC works.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync#if 0 /* later, see #3170. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync RTR0MEMOBJ MemObj;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync if (RT_SUCCESS(rc))
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync rc = RTR0MemObjFree(MemObj, true);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync AssertRC(rc);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync }
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync else if (rc == VERR_NOT_SUPPORTED)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync else
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync#else
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync# if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->fLegacyAllocationMode = false;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync# if ARCH_BITS == 32
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->fBoundMemoryMode = true;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync# else
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->fBoundMemoryMode = false;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync# endif
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync# else
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->fLegacyAllocationMode = true;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->fBoundMemoryMode = true;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync# endif
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync#endif
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /*
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * Query system page count and guess a reasonable cMaxPages value.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync g_pGMM = pGMM;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync return VINF_SUCCESS;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync }
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync RTMemFree(pGMM);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync SUPR0Printf("GMMR0Init: failed! rc=%d\n", rc);
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync return rc;
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync}
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync/**
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync * Terminates the GMM component.
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsyncGMMR0DECL(void) GMMR0Term(void)
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync{
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync LogFlow(("GMMTerm:\n"));
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync /*
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync * Take care / be paranoid...
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync PGMM pGMM = g_pGMM;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync if (!VALID_PTR(pGMM))
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync return;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync if (pGMM->u32Magic != GMM_MAGIC)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync {
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync SUPR0Printf("GMMR0Term: u32Magic=%#x\n", pGMM->u32Magic);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync return;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync }
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync /*
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * Undo what init did and free all the resources we've acquired.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync */
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync /* Destroy the fundamentals. */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync g_pGMM = NULL;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync pGMM->u32Magic++;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync RTSemFastMutexDestroy(pGMM->Mtx);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync pGMM->Mtx = NIL_RTSEMFASTMUTEX;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync /* free any chunks still hanging around. */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync RTAvlU32Destroy(&pGMM->pChunks, gmmR0TermDestroyChunk, pGMM);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync /* finally the instance data itself. */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync RTMemFree(pGMM);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync LogFlow(("GMMTerm: done\n"));
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync}
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync/**
8541678784dbe432feebe6d9c1528525e1771397vboxsync * RTAvlU32Destroy callback.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns 0
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pNode The node to destroy.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pvGMM The GMM handle.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsyncstatic DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM)
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync{
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync SUPR0Printf("GMMR0Term: %p/%#x: cFree=%d cPrivate=%d cShared=%d cMappings=%d\n", pChunk,
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappings);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync if (RT_FAILURE(rc))
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync {
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync SUPR0Printf("GMMR0Term: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync AssertRC(rc);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync }
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync pChunk->MemObj = NIL_RTR0MEMOBJ;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync RTMemFree(pChunk->paMappings);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->paMappings = NULL;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync RTMemFree(pChunk);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync NOREF(pvGMM);
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync return 0;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync}
72e9ef1022a910facbd4a232500026befd944d95vboxsync
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync/**
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * Initializes the per-VM data for the GMM.
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync *
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * This is called from within the GVMM lock (from GVMMR0CreateVM)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * and should only initialize the data members so GMMR0CleanupVM
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * can deal with them. We reserve no memory or anything here,
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * that's done later in GMMR0InitVM.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync *
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * @param pGVM Pointer to the Global VM structure.
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync */
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsyncGMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync{
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync AssertCompile(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding));
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync pGVM->gmm.s.enmPolicy = GMMOCPOLICY_INVALID;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync pGVM->gmm.s.enmPriority = GMMPRIORITY_INVALID;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pGVM->gmm.s.fMayAllocate = false;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync}
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync/**
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * Cleans up when a VM is terminating.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync *
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @param pGVM Pointer to the Global VM structure.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsyncGMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM)
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync{
270236340676d2385b27ea992e07fcb643bb78b6vboxsync LogFlow(("GMMR0CleanupVM: pGVM=%p:{.pVM=%p, .hSelf=%#x}\n", pGVM, pGVM->pVM, pGVM->hSelf));
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync PGMM pGMM;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync GMM_GET_VALID_INSTANCE_VOID(pGMM);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync int rc = RTSemFastMutexRequest(pGMM->Mtx);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync AssertRC(rc);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync GMM_CHECK_SANITY_UPON_ENTERING(pGMM);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync#ifdef VBOX_WITH_PAGE_SHARING
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /* Clean up all registered shared modules. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync#endif
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /*
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * The policy is 'INVALID' until the initial reservation
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * request has been serviced.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync if ( pGVM->gmm.s.enmPolicy > GMMOCPOLICY_INVALID
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync && pGVM->gmm.s.enmPolicy < GMMOCPOLICY_END)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync /*
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * If it's the last VM around, we can skip walking all the chunk looking
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * for the pages owned by this VM and instead flush the whole shebang.
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync *
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * This takes care of the eventuality that a VM has left shared page
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync * references behind (shouldn't happen of course, but you never know).
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync Assert(pGMM->cRegisteredVMs);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pGMM->cRegisteredVMs--;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync#if 0 /* disabled so it won't hide bugs. */
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync if (!pGMM->cRegisteredVMs)
8541678784dbe432feebe6d9c1528525e1771397vboxsync {
72e9ef1022a910facbd4a232500026befd944d95vboxsync RTAvlU32Destroy(&pGMM->pChunks, gmmR0CleanupVMDestroyChunk, pGMM);
72e9ef1022a910facbd4a232500026befd944d95vboxsync
72e9ef1022a910facbd4a232500026befd944d95vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
72e9ef1022a910facbd4a232500026befd944d95vboxsync {
72e9ef1022a910facbd4a232500026befd944d95vboxsync pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pGMM->ChunkTLB.aEntries[i].pChunk = NULL;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync }
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
72e9ef1022a910facbd4a232500026befd944d95vboxsync memset(&pGMM->Private, 0, sizeof(pGMM->Private));
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync memset(&pGMM->Shared, 0, sizeof(pGMM->Shared));
72e9ef1022a910facbd4a232500026befd944d95vboxsync
72e9ef1022a910facbd4a232500026befd944d95vboxsync memset(&pGMM->bmChunkId[0], 0, sizeof(pGMM->bmChunkId));
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pGMM->cReservedPages = 0;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pGMM->cOverCommittedPages = 0;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync pGMM->cAllocatedPages = 0;
e2ba4c7fd718dba1d5b73cd9e40486df3ce06e77vboxsync pGMM->cSharedPages = 0;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pGMM->cDuplicatePages = 0;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pGMM->cLeftBehindSharedPages = 0;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync pGMM->cChunks = 0;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pGMM->cBalloonedPages = 0;
e2ba4c7fd718dba1d5b73cd9e40486df3ce06e77vboxsync }
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync else
e2ba4c7fd718dba1d5b73cd9e40486df3ce06e77vboxsync#endif
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync {
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync /*
08fc4eb537ed24136b05660d0aa038b336516961vboxsync * Walk the entire pool looking for pages that belong to this VM
08fc4eb537ed24136b05660d0aa038b336516961vboxsync * and left over mappings. (This'll only catch private pages, shared
8541678784dbe432feebe6d9c1528525e1771397vboxsync * pages will be 'left behind'.)
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync */
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync /* todo this might be kind of expensive with a lot of VMs and memory hanging around... */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0CleanupVMScanChunk, pGVM);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (pGVM->gmm.s.cPrivatePages)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGMM->cAllocatedPages -= cPrivatePages;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* free empty chunks. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (cPrivatePages)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pCur = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync while (pCur)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pNext = pCur->pFreeNext;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if ( pCur->cFree == GMM_CHUNK_NUM_PAGES
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync && ( !pGMM->fBoundMemoryMode
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync || pCur->hGVM == pGVM->hSelf))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync gmmR0FreeChunk(pGMM, pGVM, pCur);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pCur = pNext;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync }
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync }
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /* account for shared pages that weren't freed. */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (pGVM->gmm.s.cSharedPages)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync {
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync }
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /* Clean up balloon statistics in case the VM process crashed. */
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /*
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Update the over-commitment management statistics.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync + pGVM->gmm.s.Reserved.cFixedPages
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync + pGVM->gmm.s.Reserved.cShadowPages;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync switch (pGVM->gmm.s.enmPolicy)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync case GMMOCPOLICY_NO_OC:
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync break;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync default:
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** @todo Update GMM->cOverCommittedPages */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync break;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* zap the GVM data. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGVM->gmm.s.enmPolicy = GMMOCPOLICY_INVALID;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGVM->gmm.s.enmPriority = GMMPRIORITY_INVALID;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGVM->gmm.s.fMayAllocate = false;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync RTSemFastMutexRelease(pGMM->Mtx);
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync LogFlow(("GMMR0CleanupVM: returns\n"));
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync}
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * RTAvlU32DoWithAll callback.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns 0
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pNode The node to search.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pvGVM Pointer to the shared VM structure.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGVM)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGVM pGVM = (PGVM)pvGVM;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync /*
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Look for pages belonging to the VM.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * (Perform some internal checks while we're scanning.)
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync */
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync#ifndef VBOX_STRICT
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync#endif
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync unsigned cPrivate = 0;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync unsigned cShared = 0;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync unsigned cFree = 0;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync gmmR0UnlinkChunk(pChunk); /* avoiding cFreePages updates. */
7528d4a15800321b4013826ce35ad184898dba21vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync uint16_t hGVM = pGVM->hSelf;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync unsigned iPage = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync while (iPage-- > 0)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (GMM_PAGE_IS_PRIVATE(&pChunk->aPages[iPage]))
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (pChunk->aPages[iPage].Private.hGVM == hGVM)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync /*
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * Free the page.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync *
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * The reason for not using gmmR0FreePrivatePage here is that we
7528d4a15800321b4013826ce35ad184898dba21vboxsync * must *not* cause the chunk to be freed from under us - we're in
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * an AVL tree walk here.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->aPages[iPage].u = 0;
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->aPages[iPage].Free.iNext = pChunk->iFreeHead;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->iFreeHead = iPage;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->cPrivate--;
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->cFree++;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pGVM->gmm.s.cPrivatePages--;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync cFree++;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync else
54ed927d658674ced4387afbd1877a27cb975a76vboxsync cPrivate++;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync }
7528d4a15800321b4013826ce35ad184898dba21vboxsync else if (GMM_PAGE_IS_FREE(&pChunk->aPages[iPage]))
54ed927d658674ced4387afbd1877a27cb975a76vboxsync cFree++;
7528d4a15800321b4013826ce35ad184898dba21vboxsync else
54ed927d658674ced4387afbd1877a27cb975a76vboxsync cShared++;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync /*
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * Did it add up?
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (RT_UNLIKELY( pChunk->cFree != cFree
54ed927d658674ced4387afbd1877a27cb975a76vboxsync || pChunk->cPrivate != cPrivate
54ed927d658674ced4387afbd1877a27cb975a76vboxsync || pChunk->cShared != cShared))
7528d4a15800321b4013826ce35ad184898dba21vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: Chunk %p/%#x has bogus stats - free=%d/%d private=%d/%d shared=%d/%d\n",
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->cFree, cFree, pChunk->cPrivate, cPrivate, pChunk->cShared, cShared);
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->cFree = cFree;
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->cPrivate = cPrivate;
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->cShared = cShared;
7528d4a15800321b4013826ce35ad184898dba21vboxsync }
7528d4a15800321b4013826ce35ad184898dba21vboxsync }
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync /*
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Look for the mapping belonging to the terminating VM.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsync for (unsigned i = 0; i < pChunk->cMappings; i++)
7528d4a15800321b4013826ce35ad184898dba21vboxsync if (pChunk->paMappings[i].pGVM == pGVM)
7528d4a15800321b4013826ce35ad184898dba21vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync RTR0MEMOBJ MemObj = pChunk->paMappings[i].MapObj;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->cMappings--;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (i < pChunk->cMappings)
7528d4a15800321b4013826ce35ad184898dba21vboxsync pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->paMappings[pChunk->cMappings].pGVM = NULL;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
7528d4a15800321b4013826ce35ad184898dba21vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (RT_FAILURE(rc))
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n",
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk, pChunk->Core.Key, i, MemObj, rc);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertRC(rc);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync break;
7528d4a15800321b4013826ce35ad184898dba21vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync /*
7528d4a15800321b4013826ce35ad184898dba21vboxsync * If not in bound memory mode, we should reset the hGVM field
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * if it has our handle in it.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync */
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (pChunk->hGVM == pGVM->hSelf)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (!g_pGMM->fBoundMemoryMode)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->hGVM = NIL_GVM_HANDLE;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync else if (pChunk->cFree != GMM_CHUNK_NUM_PAGES)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n",
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk, pChunk->Core.Key, pChunk->cFree);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync gmmR0UnlinkChunk(pChunk);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->cFree = GMM_CHUNK_NUM_PAGES;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync return 0;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync}
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync/**
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * RTAvlU32Destroy callback for GMMR0CleanupVM.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync *
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @returns 0
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param pNode The node (allocation chunk) to destroy.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param pvGVM Pointer to the shared VM structure.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync */
54ed927d658674ced4387afbd1877a27cb975a76vboxsync/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync{
54ed927d658674ced4387afbd1877a27cb975a76vboxsync PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync PGVM pGVM = (PGVM)pvGVM;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync for (unsigned i = 0; i < pChunk->cMappings; i++)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (pChunk->paMappings[i].pGVM != pGVM)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: pGVM=%p exepcted %p\n", pChunk,
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->Core.Key, i, pChunk->paMappings[i].pGVM, pGVM);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (RT_FAILURE(rc))
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->Core.Key, i, pChunk->paMappings[i].MapObj, rc);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertRC(rc);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (RT_FAILURE(rc))
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertRC(rc);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->MemObj = NIL_RTR0MEMOBJ;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync RTMemFree(pChunk->paMappings);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync pChunk->paMappings = NULL;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync RTMemFree(pChunk);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync return 0;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync}
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync/**
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * The initial resource reservations.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync *
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * This will make memory reservations according to policy and priority. If there aren't
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * sufficient resources available to sustain the VM this function will fail and all
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * future allocations requests will fail as well.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync *
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * These are just the initial reservations made very very early during the VM creation
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * process and will be adjusted later in the GMMR0UpdateReservation call after the
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * ring-3 init has completed.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync *
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @returns VBox status code.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @retval VERR_GMM_
54ed927d658674ced4387afbd1877a27cb975a76vboxsync *
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param pVM Pointer to the shared VM structure.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param idCpu VCPU id
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * This does not include MMIO2 and similar.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param cFixedPages The number of pages that may be allocated for fixed objects like the
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * hyper heap, MMIO2 and similar.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param enmPolicy The OC policy to use on this VM.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @param enmPriority The priority in an out-of-memory situation.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync *
54ed927d658674ced4387afbd1877a27cb975a76vboxsync * @thread The creator thread / EMT.
54ed927d658674ced4387afbd1877a27cb975a76vboxsync */
54ed927d658674ced4387afbd1877a27cb975a76vboxsyncGMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync{
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pVM, cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority));
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync /*
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Validate, get basics and take the semaphore.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync */
54ed927d658674ced4387afbd1877a27cb975a76vboxsync PGMM pGMM;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync PGVM pGVM;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync if (RT_FAILURE(rc))
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync return rc;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync AssertReturn(cBasePages, VERR_INVALID_PARAMETER);
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync AssertReturn(cFixedPages, VERR_INVALID_PARAMETER);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync rc = RTSemFastMutexRequest(pGMM->Mtx);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync AssertRC(rc);
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if ( !pGVM->gmm.s.Reserved.cBasePages
54ed927d658674ced4387afbd1877a27cb975a76vboxsync && !pGVM->gmm.s.Reserved.cFixedPages
54ed927d658674ced4387afbd1877a27cb975a76vboxsync && !pGVM->gmm.s.Reserved.cShadowPages)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync /*
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Check if we can accomodate this.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync */
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync /* ... later ... */
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync if (RT_SUCCESS(rc))
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync {
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync /*
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Update the records.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync */
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGVM->gmm.s.Reserved.cBasePages = cBasePages;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGVM->gmm.s.Reserved.cFixedPages = cFixedPages;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGVM->gmm.s.Reserved.cShadowPages = cShadowPages;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGVM->gmm.s.enmPolicy = enmPolicy;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGVM->gmm.s.enmPriority = enmPriority;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGVM->gmm.s.fMayAllocate = true;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync pGMM->cRegisteredVMs++;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync }
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync }
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync else
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync rc = VERR_WRONG_ORDER;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync }
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync else
7528d4a15800321b4013826ce35ad184898dba21vboxsync rc = VERR_INTERNAL_ERROR_5;
7528d4a15800321b4013826ce35ad184898dba21vboxsync RTSemFastMutexRelease(pGMM->Mtx);
7528d4a15800321b4013826ce35ad184898dba21vboxsync LogFlow(("GMMR0InitialReservation: returns %Rrc\n", rc));
7528d4a15800321b4013826ce35ad184898dba21vboxsync return rc;
7528d4a15800321b4013826ce35ad184898dba21vboxsync}
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync/**
7528d4a15800321b4013826ce35ad184898dba21vboxsync * VMMR0 request wrapper for GMMR0InitialReservation.
7528d4a15800321b4013826ce35ad184898dba21vboxsync *
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns see GMMR0InitialReservation.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pVM Pointer to the shared VM structure.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param idCpu VCPU id
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pReq The request packet.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsyncGMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq)
7528d4a15800321b4013826ce35ad184898dba21vboxsync{
7528d4a15800321b4013826ce35ad184898dba21vboxsync /*
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Validate input and pass it on.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertPtrReturn(pVM, VERR_INVALID_POINTER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertPtrReturn(pReq, VERR_INVALID_POINTER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync return GMMR0InitialReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
7528d4a15800321b4013826ce35ad184898dba21vboxsync}
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync/**
7528d4a15800321b4013826ce35ad184898dba21vboxsync * This updates the memory reservation with the additional MMIO2 and ROM pages.
7528d4a15800321b4013826ce35ad184898dba21vboxsync *
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns VBox status code.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
7528d4a15800321b4013826ce35ad184898dba21vboxsync *
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pVM Pointer to the shared VM structure.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param idCpu VCPU id
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * This does not include MMIO2 and similar.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param cFixedPages The number of pages that may be allocated for fixed objects like the
7528d4a15800321b4013826ce35ad184898dba21vboxsync * hyper heap, MMIO2 and similar.
7528d4a15800321b4013826ce35ad184898dba21vboxsync *
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @thread EMT.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsyncGMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
7528d4a15800321b4013826ce35ad184898dba21vboxsync{
7528d4a15800321b4013826ce35ad184898dba21vboxsync LogFlow(("GMMR0UpdateReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n",
7528d4a15800321b4013826ce35ad184898dba21vboxsync pVM, cBasePages, cShadowPages, cFixedPages));
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync /*
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Validate, get basics and take the semaphore.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsync PGMM pGMM;
7528d4a15800321b4013826ce35ad184898dba21vboxsync GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
7528d4a15800321b4013826ce35ad184898dba21vboxsync PGVM pGVM;
7528d4a15800321b4013826ce35ad184898dba21vboxsync int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
7528d4a15800321b4013826ce35ad184898dba21vboxsync if (RT_FAILURE(rc))
7528d4a15800321b4013826ce35ad184898dba21vboxsync return rc;
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertReturn(cBasePages, VERR_INVALID_PARAMETER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertReturn(cFixedPages, VERR_INVALID_PARAMETER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync rc = RTSemFastMutexRequest(pGMM->Mtx);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertRC(rc);
7528d4a15800321b4013826ce35ad184898dba21vboxsync if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
7528d4a15800321b4013826ce35ad184898dba21vboxsync {
7528d4a15800321b4013826ce35ad184898dba21vboxsync if ( pGVM->gmm.s.Reserved.cBasePages
7528d4a15800321b4013826ce35ad184898dba21vboxsync && pGVM->gmm.s.Reserved.cFixedPages
7528d4a15800321b4013826ce35ad184898dba21vboxsync && pGVM->gmm.s.Reserved.cShadowPages)
7528d4a15800321b4013826ce35ad184898dba21vboxsync {
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync /*
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Check if we can accomodate this.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsync /* ... later ... */
7528d4a15800321b4013826ce35ad184898dba21vboxsync if (RT_SUCCESS(rc))
7528d4a15800321b4013826ce35ad184898dba21vboxsync {
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync /*
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * Update the records.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsync pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
7528d4a15800321b4013826ce35ad184898dba21vboxsync + pGVM->gmm.s.Reserved.cFixedPages
7528d4a15800321b4013826ce35ad184898dba21vboxsync + pGVM->gmm.s.Reserved.cShadowPages;
7528d4a15800321b4013826ce35ad184898dba21vboxsync pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync pGVM->gmm.s.Reserved.cBasePages = cBasePages;
7528d4a15800321b4013826ce35ad184898dba21vboxsync pGVM->gmm.s.Reserved.cFixedPages = cFixedPages;
7528d4a15800321b4013826ce35ad184898dba21vboxsync pGVM->gmm.s.Reserved.cShadowPages = cShadowPages;
7528d4a15800321b4013826ce35ad184898dba21vboxsync }
7528d4a15800321b4013826ce35ad184898dba21vboxsync }
7528d4a15800321b4013826ce35ad184898dba21vboxsync else
7528d4a15800321b4013826ce35ad184898dba21vboxsync rc = VERR_WRONG_ORDER;
7528d4a15800321b4013826ce35ad184898dba21vboxsync GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
7528d4a15800321b4013826ce35ad184898dba21vboxsync }
7528d4a15800321b4013826ce35ad184898dba21vboxsync else
7528d4a15800321b4013826ce35ad184898dba21vboxsync rc = VERR_INTERNAL_ERROR_5;
7528d4a15800321b4013826ce35ad184898dba21vboxsync RTSemFastMutexRelease(pGMM->Mtx);
7528d4a15800321b4013826ce35ad184898dba21vboxsync LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc));
7528d4a15800321b4013826ce35ad184898dba21vboxsync return rc;
7528d4a15800321b4013826ce35ad184898dba21vboxsync}
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync/**
7528d4a15800321b4013826ce35ad184898dba21vboxsync * VMMR0 request wrapper for GMMR0UpdateReservation.
7528d4a15800321b4013826ce35ad184898dba21vboxsync *
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns see GMMR0UpdateReservation.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pVM Pointer to the shared VM structure.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param idCpu VCPU id
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pReq The request packet.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsyncGMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq)
7528d4a15800321b4013826ce35ad184898dba21vboxsync{
7528d4a15800321b4013826ce35ad184898dba21vboxsync /*
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Validate input and pass it on.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync AssertPtrReturn(pVM, VERR_INVALID_POINTER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertPtrReturn(pReq, VERR_INVALID_POINTER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync return GMMR0UpdateReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages);
7528d4a15800321b4013826ce35ad184898dba21vboxsync}
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync/**
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Performs sanity checks on a free set.
7528d4a15800321b4013826ce35ad184898dba21vboxsync *
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns Error count.
7528d4a15800321b4013826ce35ad184898dba21vboxsync *
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pGMM Pointer to the GMM instance.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pSet Pointer to the set.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pszSetName The set name.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * @param pszFunction The function from which it was called.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * @param uLine The line number.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync */
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsyncstatic uint32_t gmmR0SanityCheckSet(PGMM pGMM, PGMMCHUNKFREESET pSet, const char *pszSetName,
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync const char *pszFunction, unsigned uLineNo)
7528d4a15800321b4013826ce35ad184898dba21vboxsync{
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync uint32_t cErrors = 0;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync /*
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync * Count the free pages in all the chunks and match it against pSet->cFreePages.
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync */
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync uint32_t cPages = 0;
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync {
0cd5b157d7c4708b8ad79e3acd474ac781dfbcf1vboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
54ed927d658674ced4387afbd1877a27cb975a76vboxsync /** @todo check that the chunk is hash into the right set. */
54ed927d658674ced4387afbd1877a27cb975a76vboxsync cPages += pCur->cFree;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync }
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync if (RT_UNLIKELY(cPages != pSet->cFreePages))
54ed927d658674ced4387afbd1877a27cb975a76vboxsync {
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync SUPR0Printf("GMM insanity: found %#x pages in the %s set, expected %#x. (%s, line %u)\n",
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync cPages, pszSetName, pSet->cFreePages, pszFunction, uLineNo);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync cErrors++;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync return cErrors;
54ed927d658674ced4387afbd1877a27cb975a76vboxsync}
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Performs some sanity checks on the GMM while owning lock.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @returns Error count.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync *
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @param pGMM Pointer to the GMM instance.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @param pszFunction The function from which it is called.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync * @param uLineNo The line number.
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync */
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsyncstatic uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo)
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync{
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync uint32_t cErrors = 0;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Private, "private", pszFunction, uLineNo);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Shared, "shared", pszFunction, uLineNo);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** @todo add more sanity checks. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return cErrors;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync}
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Looks up a chunk in the tree and fill in the TLB entry for it.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * This is not expected to fail and will bitch if it does.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns Pointer to the allocation chunk, NULL if not found.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGMM Pointer to the GMM instance.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @param idChunk The ID of the chunk to find.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pTlbe Pointer to the TLB entry.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic PGMMCHUNK gmmR0GetChunkSlow(PGMM pGMM, uint32_t idChunk, PGMMCHUNKTLBE pTlbe)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pChunk = (PGMMCHUNK)RTAvlU32Get(&pGMM->pChunks, idChunk);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync AssertMsgReturn(pChunk, ("Chunk %#x not found!\n", idChunk), NULL);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pTlbe->idChunk = idChunk;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pTlbe->pChunk = pChunk;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return pChunk;
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync}
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync/**
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * Finds a allocation chunk.
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync *
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * This is not expected to fail and will bitch if it does.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync *
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @returns Pointer to the allocation chunk, NULL if not found.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @param pGMM Pointer to the GMM instance.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @param idChunk The ID of the chunk to find.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync */
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsyncDECLINLINE(PGMMCHUNK) gmmR0GetChunk(PGMM pGMM, uint32_t idChunk)
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync{
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync /*
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Do a TLB lookup, branch if not in the TLB.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync */
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)];
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync if ( pTlbe->idChunk != idChunk
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync || !pTlbe->pChunk)
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync return gmmR0GetChunkSlow(pGMM, idChunk, pTlbe);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync return pTlbe->pChunk;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync}
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync/**
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Finds a page.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync *
be177d2edf33024ee98bf5a84a32615473ac9568vboxsync * This is not expected to fail and will bitch if it does.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns Pointer to the page, NULL if not found.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGMM Pointer to the GMM instance.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param idPage The ID of the page to find.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncDECLINLINE(PGMMPAGE) gmmR0GetPage(PGMM pGMM, uint32_t idPage)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (RT_LIKELY(pChunk))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return &pChunk->aPages[idPage & GMM_PAGEID_IDX_MASK];
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return NULL;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync}
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Unlinks the chunk from the free list it's currently on (if any).
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pChunk The allocation chunk.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncDECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNKFREESET pSet = pChunk->pSet;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (RT_LIKELY(pSet))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pSet->cFreePages -= pChunk->cFree;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pPrev = pChunk->pFreePrev;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pNext = pChunk->pFreeNext;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (pPrev)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pPrev->pFreeNext = pNext;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync else
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pSet->apLists[(pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT] = pNext;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (pNext)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pNext->pFreePrev = pPrev;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync pChunk->pSet = NULL;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync pChunk->pFreeNext = NULL;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync pChunk->pFreePrev = NULL;
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync else
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync {
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync Assert(!pChunk->pFreeNext);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync Assert(!pChunk->pFreePrev);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync Assert(!pChunk->cFree);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync }
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync}
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync/**
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync * Links the chunk onto the appropriate free list in the specified free set.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync *
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * If no free entries, it's not linked into any list.
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync *
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync * @param pChunk The allocation chunk.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @param pSet The free set.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync */
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsyncDECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync{
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync Assert(!pChunk->pSet);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync Assert(!pChunk->pFreeNext);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync Assert(!pChunk->pFreePrev);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync if (pChunk->cFree > 0)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync {
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync pChunk->pSet = pSet;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pChunk->pFreePrev = NULL;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync unsigned iList = (pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->pFreeNext = pSet->apLists[iList];
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync if (pChunk->pFreeNext)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->pFreeNext->pFreePrev = pChunk;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync pSet->apLists[iList] = pChunk;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pSet->cFreePages += pChunk->cFree;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync}
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Frees a Chunk ID.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGMM Pointer to the GMM instance.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * @param idChunk The Chunk ID to free.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic void gmmR0FreeChunkId(PGMM pGMM, uint32_t idChunk)
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync{
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertReturnVoid(idChunk != NIL_GMM_CHUNKID);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsg(ASMBitTest(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk));
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ASMAtomicBitClear(&pGMM->bmChunkId[0], idChunk);
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync}
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Allocates a new Chunk ID.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @returns The Chunk ID.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync * @param pGMM Pointer to the GMM instance.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsyncstatic uint32_t gmmR0AllocateChunkId(PGMM pGMM)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync{
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync AssertCompile(!((GMM_CHUNKID_LAST + 1) & 31)); /* must be a multiple of 32 */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync AssertCompile(NIL_GMM_CHUNKID == 0);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync /*
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Try the next sequential one.
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync int32_t idChunk = ++pGMM->idChunkPrev;
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync#if 0 /* test the fallback first */
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync if ( idChunk <= GMM_CHUNKID_LAST
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync && idChunk > NIL_GMM_CHUNKID
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync && !ASMAtomicBitTestAndSet(&pVMM->bmChunkId[0], idChunk))
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync return idChunk;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync#endif
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync /*
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Scan sequentially from the last one.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync */
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync if ( (uint32_t)idChunk < GMM_CHUNKID_LAST
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync && idChunk > NIL_GMM_CHUNKID)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync {
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync idChunk = ASMBitNextClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1, idChunk);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync if (idChunk > NIL_GMM_CHUNKID)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync {
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync return pGMM->idChunkPrev = idChunk;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync }
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync }
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync /*
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Ok, scan from the start.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * We're not racing anyone, so there is no need to expect failures or have restart loops.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
7528d4a15800321b4013826ce35ad184898dba21vboxsync idChunk = ASMBitFirstClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1);
7528d4a15800321b4013826ce35ad184898dba21vboxsync AssertMsgReturn(idChunk > NIL_GMM_CHUNKID, ("%#x\n", idChunk), NIL_GVM_HANDLE);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync return pGMM->idChunkPrev = idChunk;
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync}
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync/**
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * Registers a new chunk of memory.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync *
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk. The caller
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * must own the global lock.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync *
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @returns VBox status code.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param pGMM Pointer to the GMM instance.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param pSet Pointer to the set.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param MemObj The memory object for the chunk.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param hGVM The affinity of the chunk. NIL_GVM_HANDLE for no
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * affinity.
44f548822e604beb589f687f22c21b2b5f1ee7c6vboxsync * @param enmChunkType Chunk type (continuous or non-continuous)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param ppChunk Chunk address (out)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode);
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync int rc;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync PGMMCHUNK pChunk = (PGMMCHUNK)RTMemAllocZ(sizeof(*pChunk));
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync if (pChunk)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync {
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync /*
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Initialize it.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync */
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->MemObj = MemObj;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->cFree = GMM_CHUNK_NUM_PAGES;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->hGVM = hGVM;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->iFreeHead = 0;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->enmType = enmChunkType;
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync {
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pChunk->aPages[iPage].Free.iNext = iPage + 1;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Allocate a Chunk ID and insert it into the tree.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * This has to be done behind the mutex of course.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pChunk->Core.Key = gmmR0AllocateChunkId(pGMM);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if ( pChunk->Core.Key != NIL_GMM_CHUNKID
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync && pChunk->Core.Key <= GMM_CHUNKID_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync && RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGMM->cChunks++;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync gmmR0LinkChunk(pChunk, pSet);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (ppChunk)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *ppChunk = pChunk;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return VINF_SUCCESS;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* bail out */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = VERR_INTERNAL_ERROR;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync else
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = VERR_INTERNAL_ERROR_5;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync RTMemFree(pChunk);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync else
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync rc = VERR_NO_MEMORY;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync return rc;
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync}
f372af8e6ee2a011213b11cc69f4a29530ff7ce5vboxsync
270236340676d2385b27ea992e07fcb643bb78b6vboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Allocate one new chunk and add it to the specified free set.
7528d4a15800321b4013826ce35ad184898dba21vboxsync *
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @returns VBox status code.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @param pGMM Pointer to the GMM instance.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pSet Pointer to the set.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param hGVM The affinity of the new chunk.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param enmChunkType Chunk type (continuous or non-continuous)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param ppChunk Chunk address (out)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @remarks Called without owning the mutex.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Allocate the memory.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync RTR0MEMOBJ MemObj;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync int rc;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertCompile(GMM_CHUNK_SIZE == _2M);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertReturn(enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS || enmChunkType == GMMCHUNKTYPE_CONTINUOUS, VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* Leave the lock temporarily as the allocation might take long. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync RTSemFastMutexRelease(pGMM->Mtx);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = RTR0MemObjAllocPhysNC(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync else
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = RTR0MemObjAllocPhysEx(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* Grab the lock again. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync int rc2 = RTSemFastMutexRequest(pGMM->Mtx);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertRCReturn(rc2, rc2);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (RT_SUCCESS(rc))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = gmmR0RegisterChunk(pGMM, pSet, MemObj, hGVM, enmChunkType, ppChunk);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (RT_FAILURE(rc))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync RTR0MemObjFree(MemObj, false /* fFreeMappings */);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /** @todo Check that RTR0MemObjAllocPhysNC always returns VERR_NO_MEMORY on
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * allocation failure. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return rc;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync}
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync/**
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Attempts to allocate more pages until the requested amount is met.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @returns VBox status code.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGMM Pointer to the GMM instance data.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pGVM The calling VM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pSet Pointer to the free set to grow.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param cPages The number of pages needed.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @remarks Called owning the mutex, but will leave it temporarily while
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * allocating the memory!
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncstatic int gmmR0AllocateMoreChunks(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, uint32_t cPages)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync Assert(!pGMM->fLegacyAllocationMode);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
54ed927d658674ced4387afbd1877a27cb975a76vboxsync if (!GMM_CHECK_SANITY_IN_LOOPS(pGMM))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync return VERR_INTERNAL_ERROR_4;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (!pGMM->fBoundMemoryMode)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync {
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync /*
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * Try steal free chunks from the other set first. (Only take 100% free chunks.)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync */
8541678784dbe432feebe6d9c1528525e1771397vboxsync PGMMCHUNKFREESET pOtherSet = pSet == &pGMM->Private ? &pGMM->Shared : &pGMM->Private;
8541678784dbe432feebe6d9c1528525e1771397vboxsync while ( pSet->cFreePages < cPages
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync && pOtherSet->cFreePages >= GMM_CHUNK_NUM_PAGES)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync {
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync PGMMCHUNK pChunk = pOtherSet->apLists[GMM_CHUNK_FREE_SET_LISTS - 1];
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync while (pChunk && pChunk->cFree != GMM_CHUNK_NUM_PAGES)
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync pChunk = pChunk->pFreeNext;
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (!pChunk)
8541678784dbe432feebe6d9c1528525e1771397vboxsync break;
8541678784dbe432feebe6d9c1528525e1771397vboxsync
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync gmmR0UnlinkChunk(pChunk);
8541678784dbe432feebe6d9c1528525e1771397vboxsync gmmR0LinkChunk(pChunk, pSet);
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync }
6cd3c708987d8c77397659c5a80ef0eea7ef1fd1vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync /*
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * If we need still more pages, allocate new chunks.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Note! We will leave the mutex while doing the allocation,
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync while (pSet->cFreePages < cPages)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync {
8541678784dbe432feebe6d9c1528525e1771397vboxsync int rc = gmmR0AllocateOneChunk(pGMM, pSet, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (RT_FAILURE(rc))
8541678784dbe432feebe6d9c1528525e1771397vboxsync return rc;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync return VERR_INTERNAL_ERROR_5;
8541678784dbe432feebe6d9c1528525e1771397vboxsync }
8541678784dbe432feebe6d9c1528525e1771397vboxsync }
8541678784dbe432feebe6d9c1528525e1771397vboxsync else
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync {
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /*
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync * The memory is bound to the VM allocating it, so we have to count
8541678784dbe432feebe6d9c1528525e1771397vboxsync * the free pages carefully as well as making sure we brand them with
8541678784dbe432feebe6d9c1528525e1771397vboxsync * our VM handle.
8541678784dbe432feebe6d9c1528525e1771397vboxsync *
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Note! We will leave the mutex while doing the allocation,
8541678784dbe432feebe6d9c1528525e1771397vboxsync */
8541678784dbe432feebe6d9c1528525e1771397vboxsync uint16_t const hGVM = pGVM->hSelf;
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync for (;;)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync {
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /* Count and see if we've reached the goal. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync uint32_t cPagesFound = 0;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (pCur->hGVM == hGVM)
8541678784dbe432feebe6d9c1528525e1771397vboxsync {
8541678784dbe432feebe6d9c1528525e1771397vboxsync cPagesFound += pCur->cFree;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (cPagesFound >= cPages)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync break;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync }
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (cPagesFound >= cPages)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync break;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /* Allocate more. */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync int rc = gmmR0AllocateOneChunk(pGMM, pSet, hGVM, GMMCHUNKTYPE_NON_CONTINUOUS);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (RT_FAILURE(rc))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync return rc;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync return VERR_INTERNAL_ERROR_5;
8541678784dbe432feebe6d9c1528525e1771397vboxsync }
ac91d9d0626a52f79cf554c81b1fdf5f262092bbvboxsync }
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync return VINF_SUCCESS;
8541678784dbe432feebe6d9c1528525e1771397vboxsync}
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync/**
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Allocates one private page.
8541678784dbe432feebe6d9c1528525e1771397vboxsync *
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Worker for gmmR0AllocatePages.
8541678784dbe432feebe6d9c1528525e1771397vboxsync *
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param pGMM Pointer to the GMM instance data.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param hGVM The GVM handle of the VM requesting memory.
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * @param pChunk The chunk to allocate it from.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param pPageDesc The page descriptor.
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync */
8541678784dbe432feebe6d9c1528525e1771397vboxsyncstatic void gmmR0AllocatePage(PGMM pGMM, uint32_t hGVM, PGMMCHUNK pChunk, PGMMPAGEDESC pPageDesc)
8541678784dbe432feebe6d9c1528525e1771397vboxsync{
8541678784dbe432feebe6d9c1528525e1771397vboxsync /* update the chunk stats. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (pChunk->hGVM == NIL_GVM_HANDLE)
8541678784dbe432feebe6d9c1528525e1771397vboxsync pChunk->hGVM = hGVM;
8541678784dbe432feebe6d9c1528525e1771397vboxsync Assert(pChunk->cFree);
8541678784dbe432feebe6d9c1528525e1771397vboxsync pChunk->cFree--;
8541678784dbe432feebe6d9c1528525e1771397vboxsync pChunk->cPrivate++;
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync /* unlink the first free page. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync const uint32_t iPage = pChunk->iFreeHead;
8541678784dbe432feebe6d9c1528525e1771397vboxsync AssertReleaseMsg(iPage < RT_ELEMENTS(pChunk->aPages), ("%d\n", iPage));
8541678784dbe432feebe6d9c1528525e1771397vboxsync PGMMPAGE pPage = &pChunk->aPages[iPage];
8541678784dbe432feebe6d9c1528525e1771397vboxsync Assert(GMM_PAGE_IS_FREE(pPage));
8541678784dbe432feebe6d9c1528525e1771397vboxsync pChunk->iFreeHead = pPage->Free.iNext;
8541678784dbe432feebe6d9c1528525e1771397vboxsync Log3(("A pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x iNext=%#x\n",
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage, iPage, (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage,
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage->Common.u2State, pChunk->iFreeHead, pPage->Free.iNext));
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync /* make the page private. */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pPage->u = 0;
8541678784dbe432feebe6d9c1528525e1771397vboxsync AssertCompile(GMM_PAGE_STATE_PRIVATE == 0);
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage->Private.hGVM = hGVM;
8541678784dbe432feebe6d9c1528525e1771397vboxsync AssertCompile(NIL_RTHCPHYS >= GMM_GCPHYS_LAST);
8541678784dbe432feebe6d9c1528525e1771397vboxsync AssertCompile(GMM_GCPHYS_UNSHAREABLE >= GMM_GCPHYS_LAST);
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (pPageDesc->HCPhysGCPhys <= GMM_GCPHYS_LAST)
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage->Private.pfn = pPageDesc->HCPhysGCPhys >> PAGE_SHIFT;
8541678784dbe432feebe6d9c1528525e1771397vboxsync else
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync /* update the page descriptor. */
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->MemObj, iPage);
8541678784dbe432feebe6d9c1528525e1771397vboxsync Assert(pPageDesc->HCPhysGCPhys != NIL_RTHCPHYS);
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage;
8541678784dbe432feebe6d9c1528525e1771397vboxsync pPageDesc->idSharedPage = NIL_GMM_PAGEID;
8541678784dbe432feebe6d9c1528525e1771397vboxsync}
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync/**
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Common worker for GMMR0AllocateHandyPages and GMMR0AllocatePages.
8541678784dbe432feebe6d9c1528525e1771397vboxsync *
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @returns VBox status code:
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @retval VINF_SUCCESS on success.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or
8541678784dbe432feebe6d9c1528525e1771397vboxsync * gmmR0AllocateMoreChunks is necessary.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
8541678784dbe432feebe6d9c1528525e1771397vboxsync * that is we're trying to allocate more than we've reserved.
8541678784dbe432feebe6d9c1528525e1771397vboxsync *
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param pGMM Pointer to the GMM instance data.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param pGVM Pointer to the shared VM structure.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param cPages The number of pages to allocate.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param paPages Pointer to the page descriptors.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * See GMMPAGEDESC for details on what is expected on input.
8541678784dbe432feebe6d9c1528525e1771397vboxsync * @param enmAccount The account to charge.
8541678784dbe432feebe6d9c1528525e1771397vboxsync */
8541678784dbe432feebe6d9c1528525e1771397vboxsyncstatic int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
8541678784dbe432feebe6d9c1528525e1771397vboxsync{
8541678784dbe432feebe6d9c1528525e1771397vboxsync /*
8541678784dbe432feebe6d9c1528525e1771397vboxsync * Check allocation limits.
8541678784dbe432feebe6d9c1528525e1771397vboxsync */
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (RT_UNLIKELY(pGMM->cAllocatedPages + cPages > pGMM->cMaxPages))
8541678784dbe432feebe6d9c1528525e1771397vboxsync return VERR_GMM_HIT_GLOBAL_LIMIT;
8541678784dbe432feebe6d9c1528525e1771397vboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync switch (enmAccount)
8541678784dbe432feebe6d9c1528525e1771397vboxsync {
8541678784dbe432feebe6d9c1528525e1771397vboxsync case GMMACCOUNT_BASE:
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
8541678784dbe432feebe6d9c1528525e1771397vboxsync {
8541678784dbe432feebe6d9c1528525e1771397vboxsync Log(("gmmR0AllocatePages:Base: Reserved=%#llx Allocated+Ballooned+Requested=%#llx+%#llx+%#x!\n",
8541678784dbe432feebe6d9c1528525e1771397vboxsync pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages));
8541678784dbe432feebe6d9c1528525e1771397vboxsync return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
8541678784dbe432feebe6d9c1528525e1771397vboxsync }
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync break;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync case GMMACCOUNT_SHADOW:
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync {
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync Log(("gmmR0AllocatePages:Shadow: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
8541678784dbe432feebe6d9c1528525e1771397vboxsync pGVM->gmm.s.Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages));
8541678784dbe432feebe6d9c1528525e1771397vboxsync return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
8541678784dbe432feebe6d9c1528525e1771397vboxsync }
8541678784dbe432feebe6d9c1528525e1771397vboxsync break;
8541678784dbe432feebe6d9c1528525e1771397vboxsync case GMMACCOUNT_FIXED:
8541678784dbe432feebe6d9c1528525e1771397vboxsync if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages))
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync {
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync Log(("gmmR0AllocatePages:Fixed: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync pGVM->gmm.s.Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages));
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync }
8541678784dbe432feebe6d9c1528525e1771397vboxsync break;
8541678784dbe432feebe6d9c1528525e1771397vboxsync default:
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
8541678784dbe432feebe6d9c1528525e1771397vboxsync }
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync /*
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * Check if we need to allocate more memory or not. In bound memory mode this
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync * is a bit extra work but it's easier to do it upfront than bailing out later.
bec4d1c0274e4712fe01426313aab120b5ad1c17vboxsync */
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync PGMMCHUNKFREESET pSet = &pGMM->Private;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (pSet->cFreePages < cPages)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync return VERR_GMM_SEED_ME;
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync if (pGMM->fBoundMemoryMode)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint16_t hGVM = pGVM->hSelf;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t cPagesFound = 0;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
7528d4a15800321b4013826ce35ad184898dba21vboxsync if (pCur->hGVM == hGVM)
7528d4a15800321b4013826ce35ad184898dba21vboxsync {
7528d4a15800321b4013826ce35ad184898dba21vboxsync cPagesFound += pCur->cFree;
7528d4a15800321b4013826ce35ad184898dba21vboxsync if (cPagesFound >= cPages)
7528d4a15800321b4013826ce35ad184898dba21vboxsync break;
7528d4a15800321b4013826ce35ad184898dba21vboxsync }
7528d4a15800321b4013826ce35ad184898dba21vboxsync if (cPagesFound < cPages)
7528d4a15800321b4013826ce35ad184898dba21vboxsync return VERR_GMM_SEED_ME;
7528d4a15800321b4013826ce35ad184898dba21vboxsync }
7528d4a15800321b4013826ce35ad184898dba21vboxsync
7528d4a15800321b4013826ce35ad184898dba21vboxsync /*
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Pick the pages.
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync * Try make some effort keeping VMs sharing private chunks.
d17bffd306255c509aa98e8118e9a5456ee2138evboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint16_t hGVM = pGVM->hSelf;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync uint32_t iPage = 0;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* first round, pick from chunks with an affinity to the VM. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists) && iPage < cPages; i++)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pCurFree = NULL;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync PGMMCHUNK pCur = pSet->apLists[i];
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync while (pCur && iPage < cPages)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync PGMMCHUNK pNext = pCur->pFreeNext;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync if ( pCur->hGVM == hGVM
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync && pCur->cFree < GMM_CHUNK_NUM_PAGES)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync gmmR0UnlinkChunk(pCur);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync for (; pCur->cFree && iPage < cPages; iPage++)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync gmmR0LinkChunk(pCur, pSet);
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync pCur = pNext;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync }
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync }
54ed927d658674ced4387afbd1877a27cb975a76vboxsync
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync if (iPage < cPages)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync /* second round, pick pages from the 100% empty chunks we just skipped above. */
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync PGMMCHUNK pCurFree = NULL;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync PGMMCHUNK pCur = pSet->apLists[RT_ELEMENTS(pSet->apLists) - 1];
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync while (pCur && iPage < cPages)
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync PGMMCHUNK pNext = pCur->pFreeNext;
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if ( pCur->cFree == GMM_CHUNK_NUM_PAGES
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync && ( pCur->hGVM == hGVM
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync || !pGMM->fBoundMemoryMode))
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync {
c4b8dd273d2becff4145cf4c634b566e2dd633e6vboxsync gmmR0UnlinkChunk(pCur);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (; pCur->cFree && iPage < cPages; iPage++)
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
c6383709c15c809f8cfb09b5cfe670760f06e2b9vboxsync gmmR0LinkChunk(pCur, pSet);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync pCur = pNext;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if ( iPage < cPages
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync && !pGMM->fBoundMemoryMode)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* third round, disregard affinity. */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync unsigned i = RT_ELEMENTS(pSet->apLists);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync while (i-- > 0 && iPage < cPages)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pCurFree = NULL;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pCur = pSet->apLists[i];
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync while (pCur && iPage < cPages)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMCHUNK pNext = pCur->pFreeNext;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if ( pCur->cFree > GMM_CHUNK_NUM_PAGES / 2
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync && cPages >= GMM_CHUNK_NUM_PAGES / 2)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pCur->hGVM = hGVM; /* change chunk affinity */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
8541678784dbe432feebe6d9c1528525e1771397vboxsync gmmR0UnlinkChunk(pCur);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (; pCur->cFree && iPage < cPages; iPage++)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync gmmR0LinkChunk(pCur, pSet);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pCur = pNext;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Update the account.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync switch (enmAccount)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages += iPage; break;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages += iPage; break;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages += iPage; break;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync default:
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGVM->gmm.s.cPrivatePages += iPage;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pGMM->cAllocatedPages += iPage;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn(iPage == cPages, ("%u != %u\n", iPage, cPages), VERR_INTERNAL_ERROR);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*
7528d4a15800321b4013826ce35ad184898dba21vboxsync * Check if we've reached some threshold and should kick one or two VMs and tell
7528d4a15800321b4013826ce35ad184898dba21vboxsync * them to inflate their balloons a bit more... later.
7528d4a15800321b4013826ce35ad184898dba21vboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return VINF_SUCCESS;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync}
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync/**
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * Updates the previous allocations and allocates more pages.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync *
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * The handy pages are always taken from the 'base' memory account.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * The allocated pages are not cleared and will contains random garbage.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync *
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @returns VBox status code:
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @retval VINF_SUCCESS on success.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @retval VERR_NOT_OWNER if the caller is not an EMT.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @retval VERR_GMM_PAGE_NOT_FOUND if one of the pages to update wasn't found.
51ef69064b4ea4d571ed129ab883b0c08967c901vboxsync * @retval VERR_GMM_PAGE_NOT_PRIVATE if one of the pages to update wasn't a
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * private page.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @retval VERR_GMM_PAGE_NOT_SHARED if one of the pages to update wasn't a
7528d4a15800321b4013826ce35ad184898dba21vboxsync * shared page.
7528d4a15800321b4013826ce35ad184898dba21vboxsync * @retval VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't
7528d4a15800321b4013826ce35ad184898dba21vboxsync * owned by the VM.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * that is we're trying to allocate more than we've reserved.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync *
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param pVM Pointer to the shared VM structure.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param idCpu VCPU id
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param cPagesToUpdate The number of pages to update (starting from the head).
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param cPagesToAlloc The number of pages to allocate (starting from the head).
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @param paPages The array of page descriptors.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * See GMMPAGEDESC for details on what is expected on input.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * @thread EMT.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsyncGMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync{
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync LogFlow(("GMMR0AllocateHandyPages: pVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n",
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync pVM, cPagesToUpdate, cPagesToAlloc, paPages));
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Validate, get basics and take the semaphore.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * (This is a relatively busy path, so make predictions where possible.)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMM pGMM;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGVM pGVM;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (RT_FAILURE(rc))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync return rc;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn( (cPagesToUpdate && cPagesToUpdate < 1024)
ca935c7716bd7361d80c27c870d108a8fec80b79vboxsync || (cPagesToAlloc && cPagesToAlloc < 1024),
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ("cPagesToUpdate=%#x cPagesToAlloc=%#x\n", cPagesToUpdate, cPagesToAlloc),
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync unsigned iPage = 0;
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (; iPage < cPagesToUpdate; iPage++)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn( ( paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync || paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys),
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*|| paPages[iPage].idSharedPage == NIL_GMM_PAGEID*/,
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync }
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (; iPage < cPagesToAlloc; iPage++)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync }
0ab80a5f847b8c908ee2d9db5cc37da8e8dd5697vboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync rc = RTSemFastMutexRequest(pGMM->Mtx);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync AssertRC(rc);
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /* No allocations before the initial reservation has been made! */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (RT_LIKELY( pGVM->gmm.s.Reserved.cBasePages
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync && pGVM->gmm.s.Reserved.cFixedPages
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync && pGVM->gmm.s.Reserved.cShadowPages))
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync /*
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Perform the updates.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync * Stop on the first error.
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync */
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync for (iPage = 0; iPage < cPagesToUpdate; iPage++)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync if (paPages[iPage].idPage != NIL_GMM_PAGEID)
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync {
917f4ee9f101c9786cf09ea0fe7923a7f6dfe40cvboxsync PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idPage);
if (RT_LIKELY(pPage))
{
if (RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
{
if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf))
{
AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
if (RT_LIKELY(paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST))
pPage->Private.pfn = paPages[iPage].HCPhysGCPhys >> PAGE_SHIFT;
else if (paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE)
pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE;
/* else: NIL_RTHCPHYS nothing */
paPages[iPage].idPage = NIL_GMM_PAGEID;
paPages[iPage].HCPhysGCPhys = NIL_RTHCPHYS;
}
else
{
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not owner! hGVM=%#x hSelf=%#x\n",
iPage, paPages[iPage].idPage, pPage->Private.hGVM, pGVM->hSelf));
rc = VERR_GMM_NOT_PAGE_OWNER;
break;
}
}
else
{
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not private! %.*Rhxs (type %d)\n", iPage, paPages[iPage].idPage, sizeof(*pPage), pPage, pPage->Common.u2State));
rc = VERR_GMM_PAGE_NOT_PRIVATE;
break;
}
}
else
{
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (private)\n", iPage, paPages[iPage].idPage));
rc = VERR_GMM_PAGE_NOT_FOUND;
break;
}
}
if (paPages[iPage].idSharedPage != NIL_GMM_PAGEID)
{
PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idSharedPage);
if (RT_LIKELY(pPage))
{
if (RT_LIKELY(GMM_PAGE_IS_SHARED(pPage)))
{
AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
Assert(pPage->Shared.cRefs);
Assert(pGVM->gmm.s.cSharedPages);
Assert(pGVM->gmm.s.Allocated.cBasePages);
Log(("GMMR0AllocateHandyPages: free shared page %x cRefs=%d\n", paPages[iPage].idSharedPage, pPage->Shared.cRefs));
pGVM->gmm.s.cSharedPages--;
pGVM->gmm.s.Allocated.cBasePages--;
if (!--pPage->Shared.cRefs)
{
gmmR0FreeSharedPage(pGMM, paPages[iPage].idSharedPage, pPage);
}
else
{
Assert(pGMM->cDuplicatePages);
pGMM->cDuplicatePages--;
}
paPages[iPage].idSharedPage = NIL_GMM_PAGEID;
}
else
{
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not shared!\n", iPage, paPages[iPage].idSharedPage));
rc = VERR_GMM_PAGE_NOT_SHARED;
break;
}
}
else
{
Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
rc = VERR_GMM_PAGE_NOT_FOUND;
break;
}
}
}
/*
* Join paths with GMMR0AllocatePages for the allocation.
* Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
*/
while (RT_SUCCESS(rc))
{
rc = gmmR0AllocatePages(pGMM, pGVM, cPagesToAlloc, paPages, GMMACCOUNT_BASE);
if ( rc != VERR_GMM_SEED_ME
|| pGMM->fLegacyAllocationMode)
break;
rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->Private, cPagesToAlloc);
}
}
else
rc = VERR_WRONG_ORDER;
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
LogFlow(("GMMR0AllocateHandyPages: returns %Rrc\n", rc));
return rc;
}
/**
* Allocate one or more pages.
*
* This is typically used for ROMs and MMIO2 (VRAM) during VM creation.
* The allocated pages are not cleared and will contains random garbage.
*
* @returns VBox status code:
* @retval VINF_SUCCESS on success.
* @retval VERR_NOT_OWNER if the caller is not an EMT.
* @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
* @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
* @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
* that is we're trying to allocate more than we've reserved.
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param cPages The number of pages to allocate.
* @param paPages Pointer to the page descriptors.
* See GMMPAGEDESC for details on what is expected on input.
* @param enmAccount The account to charge.
*
* @thread EMT.
*/
GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
{
LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
/*
* Validate, get basics and take the semaphore.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
for (unsigned iPage = 0; iPage < cPages; iPage++)
{
AssertMsgReturn( paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS
|| paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE
|| ( enmAccount == GMMACCOUNT_BASE
&& paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
&& !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK)),
("#%#x: %RHp enmAccount=%d\n", iPage, paPages[iPage].HCPhysGCPhys, enmAccount),
VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
}
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
/* No allocations before the initial reservation has been made! */
if (RT_LIKELY( pGVM->gmm.s.Reserved.cBasePages
&& pGVM->gmm.s.Reserved.cFixedPages
&& pGVM->gmm.s.Reserved.cShadowPages))
{
/*
* gmmR0AllocatePages seed loop.
* Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
*/
while (RT_SUCCESS(rc))
{
rc = gmmR0AllocatePages(pGMM, pGVM, cPages, paPages, enmAccount);
if ( rc != VERR_GMM_SEED_ME
|| pGMM->fLegacyAllocationMode)
break;
rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->Private, cPages);
}
}
else
rc = VERR_WRONG_ORDER;
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc));
return rc;
}
/**
* VMMR0 request wrapper for GMMR0AllocatePages.
*
* @returns see GMMR0AllocatePages.
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0]),
("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0])),
VERR_INVALID_PARAMETER);
AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages]),
("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages])),
VERR_INVALID_PARAMETER);
return GMMR0AllocatePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
}
/**
* Allocate a large page to represent guest RAM
*
* The allocated pages are not cleared and will contains random garbage.
*
* @returns VBox status code:
* @retval VINF_SUCCESS on success.
* @retval VERR_NOT_OWNER if the caller is not an EMT.
* @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
* @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
* @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
* that is we're trying to allocate more than we've reserved.
* @returns see GMMR0AllocatePages.
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param cbPage Large page size
*/
GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
{
LogFlow(("GMMR0AllocateLargePage: pVM=%p cbPage=%x\n", pVM, cbPage));
AssertReturn(cbPage == GMM_CHUNK_SIZE, VERR_INVALID_PARAMETER);
AssertPtrReturn(pIdPage, VERR_INVALID_PARAMETER);
AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
/*
* Validate, get basics and take the semaphore.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
/* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
if (pGMM->fLegacyAllocationMode)
return VERR_NOT_SUPPORTED;
*pHCPhys = NIL_RTHCPHYS;
*pIdPage = NIL_GMM_PAGEID;
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRCReturn(rc, rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
PGMMCHUNK pChunk;
GMMPAGEDESC PageDesc;
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
{
Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, cPages));
RTSemFastMutexRelease(pGMM->Mtx);
return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
}
/* Allocate a new continous chunk. */
rc = gmmR0AllocateOneChunk(pGMM, &pGMM->Private, pGVM->hSelf, GMMCHUNKTYPE_CONTINUOUS, &pChunk);
if (RT_FAILURE(rc))
{
RTSemFastMutexRelease(pGMM->Mtx);
return rc;
}
/* Unlink the new chunk from the free list. */
gmmR0UnlinkChunk(pChunk);
/* Allocate all pages. */
gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
/* Return the first page as we'll use the whole chunk as one big page. */
*pIdPage = PageDesc.idPage;
*pHCPhys = PageDesc.HCPhysGCPhys;
for (unsigned i = 1; i < cPages; i++)
gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
/* Update accounting. */
pGVM->gmm.s.Allocated.cBasePages += cPages;
pGVM->gmm.s.cPrivatePages += cPages;
pGMM->cAllocatedPages += cPages;
gmmR0LinkChunk(pChunk, &pGMM->Private);
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc));
return rc;
}
/**
* Free a large page
*
* @returns VBox status code:
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param idPage Large page id
*/
GMMR0DECL(int) GMMR0FreeLargePage(PVM pVM, VMCPUID idCpu, uint32_t idPage)
{
LogFlow(("GMMR0FreeLargePage: pVM=%p idPage=%x\n", pVM, idPage));
/*
* Validate, get basics and take the semaphore.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
/* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
if (pGMM->fLegacyAllocationMode)
return VERR_NOT_SUPPORTED;
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages))
{
Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
RTSemFastMutexRelease(pGMM->Mtx);
return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
}
PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage);
if ( RT_LIKELY(pPage)
&& RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
{
PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
Assert(pChunk);
Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
Assert(pChunk->cPrivate > 0);
/* Release the memory immediately. */
gmmR0FreeChunk(pGMM, NULL, pChunk);
/* Update accounting. */
pGVM->gmm.s.Allocated.cBasePages -= cPages;
pGVM->gmm.s.cPrivatePages -= cPages;
pGMM->cAllocatedPages -= cPages;
}
else
rc = VERR_GMM_PAGE_NOT_FOUND;
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
LogFlow(("GMMR0FreeLargePage: returns %Rrc\n", rc));
return rc;
}
/**
* VMMR0 request wrapper for GMMR0FreeLargePage.
*
* @returns see GMMR0FreeLargePage.
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0FreeLargePageReq(PVM pVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMFREEPAGESREQ),
("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(GMMFREEPAGESREQ)),
VERR_INVALID_PARAMETER);
return GMMR0FreeLargePage(pVM, idCpu, pReq->idPage);
}
/**
* Frees a chunk, giving it back to the host OS.
*
* @param pGMM Pointer to the GMM instance.
* @param pGVM This is set when called from GMMR0CleanupVM so we can
* unmap and free the chunk in one go.
* @param pChunk The chunk to free.
*/
static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
{
Assert(pChunk->Core.Key != NIL_GMM_CHUNKID);
/*
* Cleanup hack! Unmap the chunk from the callers address space.
*/
if ( pChunk->cMappings
&& pGVM)
gmmR0UnmapChunk(pGMM, pGVM, pChunk);
/*
* If there are current mappings of the chunk, then request the
* VMs to unmap them. Reposition the chunk in the free list so
* it won't be a likely candidate for allocations.
*/
if (pChunk->cMappings)
{
/** @todo R0 -> VM request */
/* The chunk can be owned by more than one VM if fBoundMemoryMode is false! */
Log(("gmmR0FreeChunk: chunk still has %d mappings; don't free!\n", pChunk->cMappings));
}
else
{
/*
* Try free the memory object.
*/
int rc = RTR0MemObjFree(pChunk->MemObj, false /* fFreeMappings */);
if (RT_SUCCESS(rc))
{
pChunk->MemObj = NIL_RTR0MEMOBJ;
/*
* Unlink it from everywhere.
*/
gmmR0UnlinkChunk(pChunk);
PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key);
Assert(pCore == &pChunk->Core); NOREF(pCore);
PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pChunk->Core.Key)];
if (pTlbe->pChunk == pChunk)
{
pTlbe->idChunk = NIL_GMM_CHUNKID;
pTlbe->pChunk = NULL;
}
Assert(pGMM->cChunks > 0);
pGMM->cChunks--;
/*
* Free the Chunk ID and struct.
*/
gmmR0FreeChunkId(pGMM, pChunk->Core.Key);
pChunk->Core.Key = NIL_GMM_CHUNKID;
RTMemFree(pChunk->paMappings);
pChunk->paMappings = NULL;
RTMemFree(pChunk);
}
else
AssertRC(rc);
}
}
/**
* Free page worker.
*
* The caller does all the statistic decrementing, we do all the incrementing.
*
* @param pGMM Pointer to the GMM instance data.
* @param pChunk Pointer to the chunk this page belongs to.
* @param idPage The Page ID.
* @param pPage Pointer to the page.
*/
static void gmmR0FreePageWorker(PGMM pGMM, PGMMCHUNK pChunk, uint32_t idPage, PGMMPAGE pPage)
{
Log3(("F pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x\n",
pPage, pPage - &pChunk->aPages[0], idPage, pPage->Common.u2State, pChunk->iFreeHead)); NOREF(idPage);
/*
* Put the page on the free list.
*/
pPage->u = 0;
pPage->Free.u2State = GMM_PAGE_STATE_FREE;
Assert(pChunk->iFreeHead < RT_ELEMENTS(pChunk->aPages) || pChunk->iFreeHead == UINT16_MAX);
pPage->Free.iNext = pChunk->iFreeHead;
pChunk->iFreeHead = pPage - &pChunk->aPages[0];
/*
* Update statistics (the cShared/cPrivate stats are up to date already),
* and relink the chunk if necessary.
*/
if ((pChunk->cFree & GMM_CHUNK_FREE_SET_MASK) == 0)
{
gmmR0UnlinkChunk(pChunk);
pChunk->cFree++;
gmmR0LinkChunk(pChunk, pChunk->cShared ? &pGMM->Shared : &pGMM->Private);
}
else
{
pChunk->cFree++;
pChunk->pSet->cFreePages++;
/*
* If the chunk becomes empty, consider giving memory back to the host OS.
*
* The current strategy is to try give it back if there are other chunks
* in this free list, meaning if there are at least 240 free pages in this
* category. Note that since there are probably mappings of the chunk,
* it won't be freed up instantly, which probably screws up this logic
* a bit...
*/
if (RT_UNLIKELY( pChunk->cFree == GMM_CHUNK_NUM_PAGES
&& pChunk->pFreeNext
&& pChunk->pFreePrev
&& !pGMM->fLegacyAllocationMode))
gmmR0FreeChunk(pGMM, NULL, pChunk);
}
}
/**
* Frees a shared page, the page is known to exist and be valid and such.
*
* @param pGMM Pointer to the GMM instance.
* @param idPage The Page ID
* @param pPage The page structure.
*/
DECLINLINE(void) gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)
{
PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
Assert(pChunk);
Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
Assert(pChunk->cShared > 0);
Assert(pGMM->cSharedPages > 0);
Assert(pGMM->cAllocatedPages > 0);
Assert(!pPage->Shared.cRefs);
pChunk->cShared--;
pGMM->cAllocatedPages--;
pGMM->cSharedPages--;
gmmR0FreePageWorker(pGMM, pChunk, idPage, pPage);
}
#ifdef VBOX_WITH_PAGE_SHARING
/**
* Converts a private page to a shared page, the page is known to exist and be valid and such.
*
* @param pGMM Pointer to the GMM instance.
* @param pGVM Pointer to the GVM instance.
* @param HCPhys Host physical address
* @param idPage The Page ID
* @param pPage The page structure.
*/
DECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
{
PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
Assert(pChunk);
Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
Assert(GMM_PAGE_IS_PRIVATE(pPage));
pChunk->cPrivate--;
pChunk->cShared++;
pGMM->cSharedPages++;
pGVM->gmm.s.cSharedPages++;
pGVM->gmm.s.cPrivatePages--;
/* Modify the page structure. */
pPage->Shared.pfn = (uint32_t)(uint64_t)(HCPhys >> PAGE_SHIFT);
pPage->Shared.cRefs = 1;
pPage->Common.u2State = GMM_PAGE_STATE_SHARED;
}
/**
* Increase the use count of a shared page, the page is known to exist and be valid and such.
*
* @param pGMM Pointer to the GMM instance.
* @param pGVM Pointer to the GVM instance.
* @param pPage The page structure.
*/
DECLINLINE(void) gmmR0UseSharedPage(PGMM pGMM, PGVM pGVM, PGMMPAGE pPage)
{
Assert(pGMM->cSharedPages > 0);
Assert(pGMM->cAllocatedPages > 0);
pGMM->cDuplicatePages++;
pPage->Shared.cRefs++;
pGVM->gmm.s.cSharedPages++;
pGVM->gmm.s.Allocated.cBasePages++;
}
#endif
/**
* Frees a private page, the page is known to exist and be valid and such.
*
* @param pGMM Pointer to the GMM instance.
* @param idPage The Page ID
* @param pPage The page structure.
*/
DECLINLINE(void) gmmR0FreePrivatePage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)
{
PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
Assert(pChunk);
Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
Assert(pChunk->cPrivate > 0);
Assert(pGMM->cAllocatedPages > 0);
pChunk->cPrivate--;
pGMM->cAllocatedPages--;
gmmR0FreePageWorker(pGMM, pChunk, idPage, pPage);
}
/**
* Common worker for GMMR0FreePages and GMMR0BalloonedPages.
*
* @returns VBox status code:
* @retval xxx
*
* @param pGMM Pointer to the GMM instance data.
* @param pGVM Pointer to the shared VM structure.
* @param cPages The number of pages to free.
* @param paPages Pointer to the page descriptors.
* @param enmAccount The account this relates to.
*/
static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
{
/*
* Check that the request isn't impossible wrt to the account status.
*/
switch (enmAccount)
{
case GMMACCOUNT_BASE:
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages))
{
Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
}
break;
case GMMACCOUNT_SHADOW:
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages < cPages))
{
Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cShadowPages, cPages));
return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
}
break;
case GMMACCOUNT_FIXED:
if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages < cPages))
{
Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cFixedPages, cPages));
return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
}
break;
default:
AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
}
/*
* Walk the descriptors and free the pages.
*
* Statistics (except the account) are being updated as we go along,
* unlike the alloc code. Also, stop on the first error.
*/
int rc = VINF_SUCCESS;
uint32_t iPage;
for (iPage = 0; iPage < cPages; iPage++)
{
uint32_t idPage = paPages[iPage].idPage;
PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage);
if (RT_LIKELY(pPage))
{
if (RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
{
if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf))
{
Assert(pGVM->gmm.s.cPrivatePages);
pGVM->gmm.s.cPrivatePages--;
gmmR0FreePrivatePage(pGMM, idPage, pPage);
}
else
{
Log(("gmmR0AllocatePages: #%#x/%#x: not owner! hGVM=%#x hSelf=%#x\n", iPage, idPage,
pPage->Private.hGVM, pGVM->hSelf));
rc = VERR_GMM_NOT_PAGE_OWNER;
break;
}
}
else if (RT_LIKELY(GMM_PAGE_IS_SHARED(pPage)))
{
Assert(pGVM->gmm.s.cSharedPages);
pGVM->gmm.s.cSharedPages--;
Assert(pPage->Shared.cRefs);
if (!--pPage->Shared.cRefs)
{
gmmR0FreeSharedPage(pGMM, idPage, pPage);
}
else
{
Assert(pGMM->cDuplicatePages);
pGMM->cDuplicatePages--;
}
}
else
{
Log(("gmmR0AllocatePages: #%#x/%#x: already free!\n", iPage, idPage));
rc = VERR_GMM_PAGE_ALREADY_FREE;
break;
}
}
else
{
Log(("gmmR0AllocatePages: #%#x/%#x: not found!\n", iPage, idPage));
rc = VERR_GMM_PAGE_NOT_FOUND;
break;
}
paPages[iPage].idPage = NIL_GMM_PAGEID;
}
/*
* Update the account.
*/
switch (enmAccount)
{
case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages -= iPage; break;
case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages -= iPage; break;
case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages -= iPage; break;
default:
AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
}
/*
* Any threshold stuff to be done here?
*/
return rc;
}
/**
* Free one or more pages.
*
* This is typically used at reset time or power off.
*
* @returns VBox status code:
* @retval xxx
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param cPages The number of pages to allocate.
* @param paPages Pointer to the page descriptors containing the Page IDs for each page.
* @param enmAccount The account this relates to.
* @thread EMT.
*/
GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
{
LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
for (unsigned iPage = 0; iPage < cPages; iPage++)
AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
/*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
/*
* Take the semaphore and call the worker function.
*/
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
rc = gmmR0FreePages(pGMM, pGVM, cPages, paPages, enmAccount);
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
LogFlow(("GMMR0FreePages: returns %Rrc\n", rc));
return rc;
}
/**
* VMMR0 request wrapper for GMMR0FreePages.
*
* @returns see GMMR0FreePages.
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0]),
("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0])),
VERR_INVALID_PARAMETER);
AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages]),
("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages])),
VERR_INVALID_PARAMETER);
return GMMR0FreePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
}
/**
* Report back on a memory ballooning request.
*
* The request may or may not have been initiated by the GMM. If it was initiated
* by the GMM it is important that this function is called even if no pages were
* ballooned.
*
* @returns VBox status code:
* @retval VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH
* @retval VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH
* @retval VERR_GMM_OVERCOMMITED_TRY_AGAIN_IN_A_BIT - reset condition
* indicating that we won't necessarily have sufficient RAM to boot
* the VM again and that it should pause until this changes (we'll try
* balloon some other VM). (For standard deflate we have little choice
* but to hope the VM won't use the memory that was returned to it.)
*
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param enmAction Inflate/deflate/reset
* @param cBalloonedPages The number of pages that was ballooned.
*
* @thread EMT.
*/
GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
{
LogFlow(("GMMR0BalloonedPages: pVM=%p enmAction=%d cBalloonedPages=%#x\n",
pVM, enmAction, cBalloonedPages));
AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
/*
* Take the sempahore and do some more validations.
*/
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
switch (enmAction)
{
case GMMBALLOONACTION_INFLATE:
{
if (RT_LIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cBalloonedPages <= pGVM->gmm.s.Reserved.cBasePages))
{
/*
* Record the ballooned memory.
*/
pGMM->cBalloonedPages += cBalloonedPages;
if (pGVM->gmm.s.cReqBalloonedPages)
{
/* Codepath never taken. Might be interesting in the future to request ballooned memory from guests in low memory conditions.. */
AssertFailed();
pGVM->gmm.s.cBalloonedPages += cBalloonedPages;
pGVM->gmm.s.cReqActuallyBalloonedPages += cBalloonedPages;
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
}
else
{
pGVM->gmm.s.cBalloonedPages += cBalloonedPages;
Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx (user)\n",
cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages));
}
}
else
rc = VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
break;
}
case GMMBALLOONACTION_DEFLATE:
{
/* Deflate. */
if (pGVM->gmm.s.cBalloonedPages >= cBalloonedPages)
{
/*
* Record the ballooned memory.
*/
Assert(pGMM->cBalloonedPages >= cBalloonedPages);
pGMM->cBalloonedPages -= cBalloonedPages;
pGVM->gmm.s.cBalloonedPages -= cBalloonedPages;
if (pGVM->gmm.s.cReqDeflatePages)
{
AssertFailed(); /* This is path is for later. */
Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx Req=%#llx\n",
cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqDeflatePages));
/*
* Anything we need to do here now when the request has been completed?
*/
pGVM->gmm.s.cReqDeflatePages = 0;
}
else
Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx (user)\n",
cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages));
}
else
rc = VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH;
break;
}
case GMMBALLOONACTION_RESET:
{
/* Reset to an empty balloon. */
Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
pGVM->gmm.s.cBalloonedPages = 0;
break;
}
default:
rc = VERR_INVALID_PARAMETER;
break;
}
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
LogFlow(("GMMR0BalloonedPages: returns %Rrc\n", rc));
return rc;
}
/**
* VMMR0 request wrapper for GMMR0BalloonedPages.
*
* @returns see GMMR0BalloonedPages.
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMBALLOONEDPAGESREQ),
("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMBALLOONEDPAGESREQ)),
VERR_INVALID_PARAMETER);
return GMMR0BalloonedPages(pVM, idCpu, pReq->enmAction, pReq->cBalloonedPages);
}
/**
* Return memory statistics for the hypervisor
*
* @returns VBox status code:
* @param pVM Pointer to the shared VM structure.
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0QueryHypervisorMemoryStatsReq(PVM pVM, PGMMMEMSTATSREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMMEMSTATSREQ),
("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMMEMSTATSREQ)),
VERR_INVALID_PARAMETER);
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
pReq->cAllocPages = pGMM->cAllocatedPages;
pReq->cFreePages = (pGMM->cChunks << (GMM_CHUNK_SHIFT- PAGE_SHIFT)) - pGMM->cAllocatedPages;
pReq->cBalloonedPages = pGMM->cBalloonedPages;
pReq->cMaxPages = pGMM->cMaxPages;
pReq->cSharedPages = pGMM->cDuplicatePages;
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
return VINF_SUCCESS;
}
/**
* Return memory statistics for the VM
*
* @returns VBox status code:
* @param pVM Pointer to the shared VM structure.
* @parma idCpu Cpu id.
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PVM pVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMMEMSTATSREQ),
("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMMEMSTATSREQ)),
VERR_INVALID_PARAMETER);
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
/*
* Take the sempahore and do some more validations.
*/
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
pReq->cAllocPages = pGVM->gmm.s.Allocated.cBasePages;
pReq->cBalloonedPages = pGVM->gmm.s.cBalloonedPages;
pReq->cMaxPages = pGVM->gmm.s.Reserved.cBasePages;
pReq->cFreePages = pReq->cMaxPages - pReq->cAllocPages;
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
LogFlow(("GMMR3QueryVMMemoryStats: returns %Rrc\n", rc));
return rc;
}
/**
* Unmaps a chunk previously mapped into the address space of the current process.
*
* @returns VBox status code.
* @param pGMM Pointer to the GMM instance data.
* @param pGVM Pointer to the Global VM structure.
* @param pChunk Pointer to the chunk to be unmapped.
*/
static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
{
if (!pGMM->fLegacyAllocationMode)
{
/*
* Find the mapping and try unmapping it.
*/
for (uint32_t i = 0; i < pChunk->cMappings; i++)
{
Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
if (pChunk->paMappings[i].pGVM == pGVM)
{
/* unmap */
int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
if (RT_SUCCESS(rc))
{
/* update the record. */
pChunk->cMappings--;
if (i < pChunk->cMappings)
pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
pChunk->paMappings[pChunk->cMappings].pGVM = NULL;
}
return rc;
}
}
}
else if (pChunk->hGVM == pGVM->hSelf)
return VINF_SUCCESS;
Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
return VERR_GMM_CHUNK_NOT_MAPPED;
}
/**
* Maps a chunk into the user address space of the current process.
*
* @returns VBox status code.
* @param pGMM Pointer to the GMM instance data.
* @param pGVM Pointer to the Global VM structure.
* @param pChunk Pointer to the chunk to be mapped.
* @param ppvR3 Where to store the ring-3 address of the mapping.
* In the VERR_GMM_CHUNK_ALREADY_MAPPED case, this will be
* contain the address of the existing mapping.
*/
static int gmmR0MapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
{
/*
* If we're in legacy mode this is simple.
*/
if (pGMM->fLegacyAllocationMode)
{
if (pChunk->hGVM != pGVM->hSelf)
{
Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
return VERR_GMM_CHUNK_NOT_FOUND;
}
*ppvR3 = RTR0MemObjAddressR3(pChunk->MemObj);
return VINF_SUCCESS;
}
/*
* Check to see if the chunk is already mapped.
*/
for (uint32_t i = 0; i < pChunk->cMappings; i++)
{
Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
if (pChunk->paMappings[i].pGVM == pGVM)
{
*ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
#ifdef VBOX_WITH_PAGE_SHARING
/* The ring-3 chunk cache can be out of sync; don't fail. */
return VINF_SUCCESS;
#else
return VERR_GMM_CHUNK_ALREADY_MAPPED;
#endif
}
}
/*
* Do the mapping.
*/
RTR0MEMOBJ MapObj;
int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
if (RT_SUCCESS(rc))
{
/* reallocate the array? */
if ((pChunk->cMappings & 1 /*7*/) == 0)
{
void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
if (RT_UNLIKELY(!pvMappings))
{
rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */);
AssertRC(rc);
return VERR_NO_MEMORY;
}
pChunk->paMappings = (PGMMCHUNKMAP)pvMappings;
}
/* insert new entry */
pChunk->paMappings[pChunk->cMappings].MapObj = MapObj;
pChunk->paMappings[pChunk->cMappings].pGVM = pGVM;
pChunk->cMappings++;
*ppvR3 = RTR0MemObjAddressR3(MapObj);
}
return rc;
}
/**
* Check if a chunk is mapped into the specified VM
*
* @returns mapped yes/no
* @param pGVM Pointer to the Global VM structure.
* @param pChunk Pointer to the chunk to be mapped.
* @param ppvR3 Where to store the ring-3 address of the mapping.
*/
static int gmmR0IsChunkMapped(PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
{
/*
* Check to see if the chunk is already mapped.
*/
for (uint32_t i = 0; i < pChunk->cMappings; i++)
{
Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
if (pChunk->paMappings[i].pGVM == pGVM)
{
*ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
return true;
}
}
*ppvR3 = NULL;
return false;
}
/**
* Map a chunk and/or unmap another chunk.
*
* The mapping and unmapping applies to the current process.
*
* This API does two things because it saves a kernel call per mapping when
* when the ring-3 mapping cache is full.
*
* @returns VBox status code.
* @param pVM The VM.
* @param idChunkMap The chunk to map. NIL_GMM_CHUNKID if nothing to map.
* @param idChunkUnmap The chunk to unmap. NIL_GMM_CHUNKID if nothing to unmap.
* @param ppvR3 Where to store the address of the mapped chunk. NULL is ok if nothing to map.
* @thread EMT
*/
GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
{
LogFlow(("GMMR0MapUnmapChunk: pVM=%p idChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n",
pVM, idChunkMap, idChunkUnmap, ppvR3));
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVM(pVM, &pGVM);
if (RT_FAILURE(rc))
return rc;
AssertCompile(NIL_GMM_CHUNKID == 0);
AssertMsgReturn(idChunkMap <= GMM_CHUNKID_LAST, ("%#x\n", idChunkMap), VERR_INVALID_PARAMETER);
AssertMsgReturn(idChunkUnmap <= GMM_CHUNKID_LAST, ("%#x\n", idChunkUnmap), VERR_INVALID_PARAMETER);
if ( idChunkMap == NIL_GMM_CHUNKID
&& idChunkUnmap == NIL_GMM_CHUNKID)
return VERR_INVALID_PARAMETER;
if (idChunkMap != NIL_GMM_CHUNKID)
{
AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
*ppvR3 = NIL_RTR3PTR;
}
/*
* Take the semaphore and do the work.
*
* The unmapping is done last since it's easier to undo a mapping than
* undoing an unmapping. The ring-3 mapping cache cannot not be so big
* that it pushes the user virtual address space to within a chunk of
* it it's limits, so, no problem here.
*/
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
PGMMCHUNK pMap = NULL;
if (idChunkMap != NIL_GVM_HANDLE)
{
pMap = gmmR0GetChunk(pGMM, idChunkMap);
if (RT_LIKELY(pMap))
rc = gmmR0MapChunk(pGMM, pGVM, pMap, ppvR3);
else
{
Log(("GMMR0MapUnmapChunk: idChunkMap=%#x\n", idChunkMap));
rc = VERR_GMM_CHUNK_NOT_FOUND;
}
}
if ( idChunkUnmap != NIL_GMM_CHUNKID
&& RT_SUCCESS(rc))
{
PGMMCHUNK pUnmap = gmmR0GetChunk(pGMM, idChunkUnmap);
if (RT_LIKELY(pUnmap))
rc = gmmR0UnmapChunk(pGMM, pGVM, pUnmap);
else
{
Log(("GMMR0MapUnmapChunk: idChunkUnmap=%#x\n", idChunkUnmap));
rc = VERR_GMM_CHUNK_NOT_FOUND;
}
if (RT_FAILURE(rc) && pMap)
gmmR0UnmapChunk(pGMM, pGVM, pMap);
}
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
LogFlow(("GMMR0MapUnmapChunk: returns %Rrc\n", rc));
return rc;
}
/**
* VMMR0 request wrapper for GMMR0MapUnmapChunk.
*
* @returns see GMMR0MapUnmapChunk.
* @param pVM Pointer to the shared VM structure.
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, PGMMMAPUNMAPCHUNKREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0MapUnmapChunk(pVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3);
}
/**
* Legacy mode API for supplying pages.
*
* The specified user address points to a allocation chunk sized block that
* will be locked down and used by the GMM when the GM asks for pages.
*
* @returns VBox status code.
* @param pVM The VM.
* @param idCpu VCPU id
* @param pvR3 Pointer to the chunk size memory block to lock down.
*/
GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3)
{
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
AssertReturn(!(PAGE_OFFSET_MASK & pvR3), VERR_INVALID_POINTER);
if (!pGMM->fLegacyAllocationMode)
{
Log(("GMMR0SeedChunk: not in legacy allocation mode!\n"));
return VERR_NOT_SUPPORTED;
}
/*
* Lock the memory before taking the semaphore.
*/
RTR0MEMOBJ MemObj;
rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
if (RT_SUCCESS(rc))
{
/* Grab the lock. */
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRCReturn(rc, rc);
/*
* Add a new chunk with our hGVM.
*/
rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, MemObj, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
RTSemFastMutexRelease(pGMM->Mtx);
if (RT_FAILURE(rc))
RTR0MemObjFree(MemObj, false /* fFreeMappings */);
}
LogFlow(("GMMR0SeedChunk: rc=%d (pvR3=%p)\n", rc, pvR3));
return rc;
}
typedef struct
{
PAVLGCPTRNODECORE pNode;
char *pszModuleName;
char *pszVersion;
VBOXOSFAMILY enmGuestOS;
} GMMFINDMODULEBYNAME, *PGMMFINDMODULEBYNAME;
/**
* Tree enumeration callback for finding identical modules by name and version
*/
DECLCALLBACK(int) gmmR0CheckForIdenticalModule(PAVLGCPTRNODECORE pNode, void *pvUser)
{
PGMMFINDMODULEBYNAME pInfo = (PGMMFINDMODULEBYNAME)pvUser;
PGMMSHAREDMODULE pModule = (PGMMSHAREDMODULE)pNode;
if ( pInfo
&& pInfo->enmGuestOS == pModule->enmGuestOS
/** todo replace with RTStrNCmp */
&& !strcmp(pModule->szName, pInfo->pszModuleName)
&& !strcmp(pModule->szVersion, pInfo->pszVersion))
{
pInfo->pNode = pNode;
return 1; /* stop search */
}
return 0;
}
/**
* Registers a new shared module for the VM
*
* @returns VBox status code.
* @param pVM VM handle
* @param idCpu VCPU id
* @param enmGuestOS Guest OS type
* @param pszModuleName Module name
* @param pszVersion Module version
* @param GCBaseAddr Module base address
* @param cbModule Module size
* @param cRegions Number of shared region descriptors
* @param pRegions Shared region(s)
*/
GMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule,
unsigned cRegions, VMMDEVSHAREDREGIONDESC *pRegions)
{
#ifdef VBOX_WITH_PAGE_SHARING
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
Log(("GMMR0RegisterSharedModule %s %s base %RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
/*
* Take the sempahore and do some more validations.
*/
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
bool fNewModule = false;
/* Check if this module is already locally registered. */
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
if (!pRecVM)
{
pRecVM = (PGMMSHAREDMODULEPERVM)RTMemAllocZ(RT_OFFSETOF(GMMSHAREDMODULEPERVM, aRegions[cRegions]));
if (!pRecVM)
{
AssertFailed();
rc = VERR_NO_MEMORY;
goto end;
}
pRecVM->Core.Key = GCBaseAddr;
pRecVM->cRegions = cRegions;
/* Save the region data as they can differ between VMs (address space scrambling or simply different loading order) */
for (unsigned i = 0; i < cRegions; i++)
{
pRecVM->aRegions[i].GCRegionAddr = pRegions[i].GCRegionAddr;
pRecVM->aRegions[i].cbRegion = RT_ALIGN_T(pRegions[i].cbRegion, PAGE_SIZE, uint32_t);
pRecVM->aRegions[i].u32Alignment = 0;
pRecVM->aRegions[i].paHCPhysPageID = NULL; /* unused */
}
bool ret = RTAvlGCPtrInsert(&pGVM->gmm.s.pSharedModuleTree, &pRecVM->Core);
Assert(ret);
Log(("GMMR0RegisterSharedModule: new local module %s\n", pszModuleName));
fNewModule = true;
}
else
rc = VINF_PGM_SHARED_MODULE_ALREADY_REGISTERED;
/* Check if this module is already globally registered. */
PGMMSHAREDMODULE pGlobalModule = (PGMMSHAREDMODULE)RTAvlGCPtrGet(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
if ( !pGlobalModule
&& enmGuestOS == VBOXOSFAMILY_Windows64)
{
/* Two identical copies of e.g. Win7 x64 will typically not have a similar virtual address space layout for dlls or kernel modules.
* Try to find identical binaries based on name and version.
*/
GMMFINDMODULEBYNAME Info;
Info.pNode = NULL;
Info.pszVersion = pszVersion;
Info.pszModuleName = pszModuleName;
Info.enmGuestOS = enmGuestOS;
Log(("Try to find identical module %s\n", pszModuleName));
int ret = RTAvlGCPtrDoWithAll(&pGMM->pGlobalSharedModuleTree, true /* fFromLeft */, gmmR0CheckForIdenticalModule, &Info);
if (ret == 1)
{
Assert(Info.pNode);
pGlobalModule = (PGMMSHAREDMODULE)Info.pNode;
Log(("Found identical module at %RGv\n", pGlobalModule->Core.Key));
}
}
if (!pGlobalModule)
{
Assert(fNewModule);
Assert(!pRecVM->fCollision);
pGlobalModule = (PGMMSHAREDMODULE)RTMemAllocZ(RT_OFFSETOF(GMMSHAREDMODULE, aRegions[cRegions]));
if (!pGlobalModule)
{
AssertFailed();
rc = VERR_NO_MEMORY;
goto end;
}
pGlobalModule->Core.Key = GCBaseAddr;
pGlobalModule->cbModule = cbModule;
/* Input limit already safe; no need to check again. */
/** todo replace with RTStrCopy */
strcpy(pGlobalModule->szName, pszModuleName);
strcpy(pGlobalModule->szVersion, pszVersion);
pGlobalModule->enmGuestOS = enmGuestOS;
pGlobalModule->cRegions = cRegions;
for (unsigned i = 0; i < cRegions; i++)
{
Log(("New region %d base=%RGv size %x\n", i, pRegions[i].GCRegionAddr, pRegions[i].cbRegion));
pGlobalModule->aRegions[i].GCRegionAddr = pRegions[i].GCRegionAddr;
pGlobalModule->aRegions[i].cbRegion = RT_ALIGN_T(pRegions[i].cbRegion, PAGE_SIZE, uint32_t);
pGlobalModule->aRegions[i].u32Alignment = 0;
pGlobalModule->aRegions[i].paHCPhysPageID = NULL; /* uninitialized. */
}
/* Save reference. */
pRecVM->pGlobalModule = pGlobalModule;
pRecVM->fCollision = false;
pGlobalModule->cUsers++;
rc = VINF_SUCCESS;
bool ret = RTAvlGCPtrInsert(&pGMM->pGlobalSharedModuleTree, &pGlobalModule->Core);
Assert(ret);
Log(("GMMR0RegisterSharedModule: new global module %s\n", pszModuleName));
}
else
{
Assert(pGlobalModule->cUsers > 0);
/* Make sure the name and version are identical. */
/** todo replace with RTStrNCmp */
if ( !strcmp(pGlobalModule->szName, pszModuleName)
&& !strcmp(pGlobalModule->szVersion, pszVersion))
{
/* Save reference. */
pRecVM->pGlobalModule = pGlobalModule;
if ( fNewModule
|| pRecVM->fCollision == true) /* colliding module unregistered and new one registerd since the last check */
{
pGlobalModule->cUsers++;
Log(("GMMR0RegisterSharedModule: using existing module %s cUser=%d!\n", pszModuleName, pGlobalModule->cUsers));
}
pRecVM->fCollision = false;
rc = VINF_SUCCESS;
}
else
{
Log(("GMMR0RegisterSharedModule: module %s collision!\n", pszModuleName));
pRecVM->fCollision = true;
rc = VINF_PGM_SHARED_MODULE_COLLISION;
goto end;
}
}
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
end:
RTSemFastMutexRelease(pGMM->Mtx);
return rc;
#else
return VERR_NOT_IMPLEMENTED;
#endif
}
/**
* VMMR0 request wrapper for GMMR0RegisterSharedModule.
*
* @returns see GMMR0RegisterSharedModule.
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq >= sizeof(*pReq) && pReq->Hdr.cbReq == RT_UOFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
/* Pass back return code in the request packet to preserve informational codes. (VMMR3CallR0 chokes on them) */
pReq->rc = GMMR0RegisterSharedModule(pVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
return VINF_SUCCESS;
}
/**
* Unregisters a shared module for the VM
*
* @returns VBox status code.
* @param pVM VM handle
* @param idCpu VCPU id
* @param pszModuleName Module name
* @param pszVersion Module version
* @param GCBaseAddr Module base address
* @param cbModule Module size
*/
GMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
{
#ifdef VBOX_WITH_PAGE_SHARING
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
Log(("GMMR0UnregisterSharedModule %s %s base=%RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
/*
* Take the sempahore and do some more validations.
*/
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
if (!pRecVM)
{
rc = VERR_PGM_SHARED_MODULE_NOT_FOUND;
goto end;
}
/* Remove reference to global shared module. */
if (!pRecVM->fCollision)
{
PGMMSHAREDMODULE pRec = pRecVM->pGlobalModule;
Assert(pRec);
if (pRec) /* paranoia */
{
Assert(pRec->cUsers);
pRec->cUsers--;
if (pRec->cUsers == 0)
{
/* Free the ranges, but leave the pages intact as there might still be references; they will be cleared by the COW mechanism. */
for (unsigned i = 0; i < pRec->cRegions; i++)
if (pRec->aRegions[i].paHCPhysPageID)
RTMemFree(pRec->aRegions[i].paHCPhysPageID);
/* Remove from the tree and free memory. */
RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
RTMemFree(pRec);
}
}
else
rc = VERR_PGM_SHARED_MODULE_REGISTRATION_INCONSISTENCY;
}
else
Assert(!pRecVM->pGlobalModule);
/* Remove from the tree and free memory. */
RTAvlGCPtrRemove(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
RTMemFree(pRecVM);
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
end:
RTSemFastMutexRelease(pGMM->Mtx);
return rc;
#else
return VERR_NOT_IMPLEMENTED;
#endif
}
/**
* VMMR0 request wrapper for GMMR0UnregisterSharedModule.
*
* @returns see GMMR0UnregisterSharedModule.
* @param pVM Pointer to the shared VM structure.
* @param idCpu VCPU id
* @param pReq The request packet.
*/
GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
return GMMR0UnregisterSharedModule(pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
}
#ifdef VBOX_WITH_PAGE_SHARING
/**
* Checks specified shared module range for changes
*
* Performs the following tasks:
* - if a shared page is new, then it changes the GMM page type to shared and returns it in the paPageDesc array
* - if a shared page already exists, then it checks if the VM page is identical and if so frees the VM page and returns the shared page in the paPageDesc array
*
* Note: assumes the caller has acquired the GMM semaphore!!
*
* @returns VBox status code.
* @param pGMM Pointer to the GMM instance data.
* @param pGVM Pointer to the GVM instance data.
* @param pModule Module description
* @param idxRegion Region index
* @param cPages Number of entries in the paPageDesc array
* @param paPageDesc Page descriptor array (in/out)
*/
GMMR0DECL(int) GMMR0SharedModuleCheckRange(PGVM pGVM, PGMMSHAREDMODULE pModule, unsigned idxRegion, unsigned cPages, PGMMSHAREDPAGEDESC paPageDesc)
{
int rc = VINF_SUCCESS;
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
AssertReturn(idxRegion < pModule->cRegions, VERR_INVALID_PARAMETER);
AssertReturn(cPages == (pModule->aRegions[idxRegion].cbRegion >> PAGE_SHIFT), VERR_INVALID_PARAMETER);
Log(("GMMR0SharedModuleCheckRange %s base %RGv region %d cPages %d\n", pModule->szName, pModule->Core.Key, idxRegion, cPages));
PGMMSHAREDREGIONDESC pGlobalRegion = &pModule->aRegions[idxRegion];
if (!pGlobalRegion->paHCPhysPageID)
{
/* First time; create a page descriptor array. */
Log(("Allocate page descriptor array for %d pages\n", cPages));
pGlobalRegion->paHCPhysPageID = (uint32_t *)RTMemAlloc(cPages * sizeof(*pGlobalRegion->paHCPhysPageID));
if (!pGlobalRegion->paHCPhysPageID)
{
AssertFailed();
rc = VERR_NO_MEMORY;
goto end;
}
/* Invalidate all descriptors. */
for (unsigned i = 0; i < cPages; i++)
pGlobalRegion->paHCPhysPageID[i] = NIL_GMM_PAGEID;
}
/* Check all pages in the region. */
for (unsigned i = 0; i < cPages; i++)
{
/* Valid page present? */
if (paPageDesc[i].uHCPhysPageId != NIL_GMM_PAGEID)
{
/* We've seen this shared page for the first time? */
if (pGlobalRegion->paHCPhysPageID[i] == NIL_GMM_PAGEID)
{
new_shared_page:
Log(("New shared page guest %RGp host %RHp\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys));
/* Easy case: just change the internal page type. */
PGMMPAGE pPage = gmmR0GetPage(pGMM, paPageDesc[i].uHCPhysPageId);
if (!pPage)
{
AssertFailed();
rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
goto end;
}
AssertMsg(paPageDesc[i].GCPhys == (pPage->Private.pfn << 12), ("desc %RGp gmm %RGp\n", paPageDesc[i].HCPhys, (pPage->Private.pfn << 12)));
gmmR0ConvertToSharedPage(pGMM, pGVM, paPageDesc[i].HCPhys, paPageDesc[i].uHCPhysPageId, pPage);
/* Keep track of these references. */
pGlobalRegion->paHCPhysPageID[i] = paPageDesc[i].uHCPhysPageId;
}
else
{
uint8_t *pbLocalPage, *pbSharedPage;
uint8_t *pbChunk;
PGMMCHUNK pChunk;
Assert(paPageDesc[i].uHCPhysPageId != pGlobalRegion->paHCPhysPageID[i]);
Log(("Replace existing page guest %RGp host %RHp id %x -> id %x\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys, paPageDesc[i].uHCPhysPageId, pGlobalRegion->paHCPhysPageID[i]));
/* Get the shared page source. */
PGMMPAGE pPage = gmmR0GetPage(pGMM, pGlobalRegion->paHCPhysPageID[i]);
if (!pPage)
{
AssertFailed();
rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
goto end;
}
if (pPage->Common.u2State != GMM_PAGE_STATE_SHARED)
{
/* Page was freed at some point; invalidate this entry. */
/** todo this isn't really bullet proof. */
Log(("Old shared page was freed -> create a new one\n"));
pGlobalRegion->paHCPhysPageID[i] = NIL_GMM_PAGEID;
goto new_shared_page; /* ugly goto */
}
Log(("Replace existing page guest host %RHp -> %RHp\n", paPageDesc[i].HCPhys, ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT));
/* Calculate the virtual address of the local page. */
pChunk = gmmR0GetChunk(pGMM, paPageDesc[i].uHCPhysPageId >> GMM_CHUNKID_SHIFT);
if (pChunk)
{
if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
{
AssertFailed();
rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
goto end;
}
pbLocalPage = pbChunk + ((paPageDesc[i].uHCPhysPageId & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
}
else
{
AssertFailed();
rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
goto end;
}
/* Calculate the virtual address of the shared page. */
pChunk = gmmR0GetChunk(pGMM, pGlobalRegion->paHCPhysPageID[i] >> GMM_CHUNKID_SHIFT);
Assert(pChunk); /* can't fail as gmmR0GetPage succeeded. */
/* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */
if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
{
Log(("Map chunk into process!\n"));
rc = gmmR0MapChunk(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk);
if (rc != VINF_SUCCESS)
{
AssertRC(rc);
goto end;
}
}
pbSharedPage = pbChunk + ((pGlobalRegion->paHCPhysPageID[i] & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
/** todo write ASMMemComparePage. */
if (memcmp(pbSharedPage, pbLocalPage, PAGE_SIZE))
{
Log(("Unexpected differences found between local and shared page; skip\n"));
/* Signal to the caller that this one hasn't changed. */
paPageDesc[i].uHCPhysPageId = NIL_GMM_PAGEID;
continue;
}
/* Free the old local page. */
GMMFREEPAGEDESC PageDesc;
PageDesc.idPage = paPageDesc[i].uHCPhysPageId;
rc = gmmR0FreePages(pGMM, pGVM, 1, &PageDesc, GMMACCOUNT_BASE);
AssertRC(rc);
gmmR0UseSharedPage(pGMM, pGVM, pPage);
/* Pass along the new physical address & page id. */
paPageDesc[i].HCPhys = ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT;
paPageDesc[i].uHCPhysPageId = pGlobalRegion->paHCPhysPageID[i];
}
}
}
end:
return rc;
}
/**
* RTAvlU32Destroy callback.
*
* @returns 0
* @param pNode The node to destroy.
* @param pvGVM The GVM handle.
*/
static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM)
{
PGVM pGVM = (PGVM)pvGVM;
PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode;
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
Assert(pRecVM->pGlobalModule || pRecVM->fCollision);
if (pRecVM->pGlobalModule)
{
PGMMSHAREDMODULE pRec = pRecVM->pGlobalModule;
Assert(pRec);
Assert(pRec->cUsers);
Log(("gmmR0CleanupSharedModule: %s %s cUsers=%d\n", pRec->szName, pRec->szVersion, pRec->cUsers));
pRec->cUsers--;
if (pRec->cUsers == 0)
{
for (unsigned i = 0; i < pRec->cRegions; i++)
if (pRec->aRegions[i].paHCPhysPageID)
RTMemFree(pRec->aRegions[i].paHCPhysPageID);
/* Remove from the tree and free memory. */
RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, pRec->Core.Key);
RTMemFree(pRec);
}
}
RTMemFree(pRecVM);
return 0;
}
#endif
/**
* Removes all shared modules for the specified VM
*
* @returns VBox status code.
* @param pVM VM handle
* @param idCpu VCPU id
*/
GMMR0DECL(int) GMMR0ResetSharedModules(PVM pVM, VMCPUID idCpu)
{
#ifdef VBOX_WITH_PAGE_SHARING
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
/*
* Take the sempahore and do some more validations.
*/
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
Log(("GMMR0ResetSharedModules\n"));
RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
rc = VINF_SUCCESS;
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
RTSemFastMutexRelease(pGMM->Mtx);
return rc;
#else
return VERR_NOT_IMPLEMENTED;
#endif
}
#ifdef VBOX_WITH_PAGE_SHARING
typedef struct
{
PGVM pGVM;
VMCPUID idCpu;
} GMMCHECKSHAREDMODULEINFO, *PGMMCHECKSHAREDMODULEINFO;
/**
* Tree enumeration callback for checking a shared module.
*/
DECLCALLBACK(int) gmmR0CheckSharedModule(PAVLGCPTRNODECORE pNode, void *pvUser)
{
PGMMCHECKSHAREDMODULEINFO pInfo = (PGMMCHECKSHAREDMODULEINFO)pvUser;
PGMMSHAREDMODULEPERVM pLocalModule = (PGMMSHAREDMODULEPERVM)pNode;
PGMMSHAREDMODULE pGlobalModule = pLocalModule->pGlobalModule;
if ( !pLocalModule->fCollision
&& pGlobalModule)
{
Log(("gmmR0CheckSharedModule: check %s %s base=%RGv size=%x collision=%d\n", pGlobalModule->szName, pGlobalModule->szVersion, pGlobalModule->Core.Key, pGlobalModule->cbModule, pLocalModule->fCollision));
PGMR0SharedModuleCheck(pInfo->pGVM->pVM, pInfo->pGVM, pInfo->idCpu, pGlobalModule, pLocalModule->cRegions, pLocalModule->aRegions);
}
return 0;
}
#endif
#ifdef DEBUG_sandervl
/**
* Setup for a GMMR0CheckSharedModules call (to allow log flush jumps back to ring 3)
*
* @returns VBox status code.
* @param pVM VM handle
*/
GMMR0DECL(int) GMMR0CheckSharedModulesStart(PVM pVM)
{
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
/*
* Take the sempahore and do some more validations.
*/
int rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
rc = VERR_INTERNAL_ERROR_5;
else
rc = VINF_SUCCESS;
return rc;
}
/**
* Clean up after a GMMR0CheckSharedModules call (to allow log flush jumps back to ring 3)
*
* @returns VBox status code.
* @param pVM VM handle
*/
GMMR0DECL(int) GMMR0CheckSharedModulesEnd(PVM pVM)
{
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
RTSemFastMutexRelease(pGMM->Mtx);
return VINF_SUCCESS;
}
#endif
/**
* Check all shared modules for the specified VM
*
* @returns VBox status code.
* @param pVM VM handle
* @param pVCpu VMCPU handle
*/
GMMR0DECL(int) GMMR0CheckSharedModules(PVM pVM, PVMCPU pVCpu)
{
#ifdef VBOX_WITH_PAGE_SHARING
/*
* Validate input and get the basics.
*/
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
PGVM pGVM;
int rc = GVMMR0ByVMAndEMT(pVM, pVCpu->idCpu, &pGVM);
if (RT_FAILURE(rc))
return rc;
# ifndef DEBUG_sandervl
/*
* Take the sempahore and do some more validations.
*/
rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
# endif
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
GMMCHECKSHAREDMODULEINFO Info;
Log(("GMMR0CheckSharedModules\n"));
Info.pGVM = pGVM;
Info.idCpu = pVCpu->idCpu;
RTAvlGCPtrDoWithAll(&pGVM->gmm.s.pSharedModuleTree, true /* fFromLeft */, gmmR0CheckSharedModule, &Info);
rc = VINF_SUCCESS;
Log(("GMMR0CheckSharedModules done!\n"));
GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
}
else
rc = VERR_INTERNAL_ERROR_5;
# ifndef DEBUG_sandervl
RTSemFastMutexRelease(pGMM->Mtx);
# endif
return rc;
#else
return VERR_NOT_IMPLEMENTED;
#endif
}
#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
typedef struct
{
PGVM pGVM;
PGMM pGMM;
uint8_t *pSourcePage;
bool fFoundDuplicate;
} GMMFINDDUPPAGEINFO, *PGMMFINDDUPPAGEINFO;
/**
* RTAvlU32DoWithAll callback.
*
* @returns 0
* @param pNode The node to search.
* @param pvInfo Pointer to the input parameters
*/
static DECLCALLBACK(int) gmmR0FindDupPageInChunk(PAVLU32NODECORE pNode, void *pvInfo)
{
PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
PGMMFINDDUPPAGEINFO pInfo = (PGMMFINDDUPPAGEINFO)pvInfo;
PGVM pGVM = pInfo->pGVM;
PGMM pGMM = pInfo->pGMM;
uint8_t *pbChunk;
/* Only take chunks not mapped into this VM process; not entirely correct. */
if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
{
int rc = gmmR0MapChunk(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk);
if (rc != VINF_SUCCESS)
goto end;
/*
* Look for duplicate pages
*/
unsigned iPage = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
while (iPage-- > 0)
{
if (GMM_PAGE_IS_PRIVATE(&pChunk->aPages[iPage]))
{
uint8_t *pbDestPage = pbChunk + (iPage << PAGE_SHIFT);
if (!memcmp(pInfo->pSourcePage, pbDestPage, PAGE_SIZE))
{
pInfo->fFoundDuplicate = true;
break;
}
}
}
gmmR0UnmapChunk(pGMM, pGVM, pChunk);
}
end:
if (pInfo->fFoundDuplicate)
return 1; /* stop search */
else
return 0;
}
/**
* Find a duplicate of the specified page in other active VMs
*
* @returns VBox status code.
* @param pVM VM handle
* @param pReq Request packet
*/
GMMR0DECL(int) GMMR0FindDuplicatePageReq(PVM pVM, PGMMFINDDUPLICATEPAGEREQ pReq)
{
/*
* Validate input and pass it on.
*/
AssertPtrReturn(pVM, VERR_INVALID_POINTER);
AssertPtrReturn(pReq, VERR_INVALID_POINTER);
AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
PGMM pGMM;
GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
/*
* Take the sempahore and do some more validations.
*/
int rc = RTSemFastMutexRequest(pGMM->Mtx);
AssertRC(rc);
if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
{
PGVM pGVM;
rc = GVMMR0ByVM(pVM, &pGVM);
if (RT_FAILURE(rc))
goto end;
uint8_t *pbChunk;
PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, pReq->idPage >> GMM_CHUNKID_SHIFT);
if (!pChunk)
{
AssertFailed();
goto end;
}
if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
{
AssertFailed();
goto end;
}
uint8_t *pbSourcePage = pbChunk + ((pReq->idPage & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
PGMMPAGE pPage = gmmR0GetPage(pGMM, pReq->idPage);
if (!pPage)
{
AssertFailed();
rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
goto end;
}
GMMFINDDUPPAGEINFO Info;
Info.pGVM = pGVM;
Info.pGMM = pGMM;
Info.pSourcePage = pbSourcePage;
Info.fFoundDuplicate = false;
RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0FindDupPageInChunk, &Info);
pReq->fDuplicate = Info.fFoundDuplicate;
}
else
rc = VERR_INTERNAL_ERROR_5;
end:
RTSemFastMutexRelease(pGMM->Mtx);
return rc;
}
#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */