PGM.cpp revision 3241f1be564f7351b07ce8a807673fa77a7847bc
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * available from http://www.virtualbox.org. This file is free software;
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * you can redistribute it and/or modify it under the terms of the GNU
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * General Public License (GPL) as published by the Free Software
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * additional information or have any questions.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync/** @page pg_pgm PGM - The Page Manager and Monitor
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @see grp_pgm,
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @ref pg_pgm_pool,
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @ref pg_pgm_phys.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @section sec_pgm_modes Paging Modes
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * There are three memory contexts: Host Context (HC), Guest Context (GC)
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * and intermediate context. When talking about paging HC can also be refered to
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * as "host paging", and GC refered to as "shadow paging".
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * is defined by the host operating system. The mode used in the shadow paging mode
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * depends on the host paging mode and what the mode the guest is currently in. The
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * following relation between the two is defined:
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @verbatim
cc66247640b520463f925a5533fc9e5de06aa982vboxsync Host > 32-bit | PAE | AMD64 |
cc66247640b520463f925a5533fc9e5de06aa982vboxsync Guest | | | |
cc66247640b520463f925a5533fc9e5de06aa982vboxsync ==v================================
cc66247640b520463f925a5533fc9e5de06aa982vboxsync 32-bit 32-bit PAE PAE
cc66247640b520463f925a5533fc9e5de06aa982vboxsync -------|--------|--------|--------|
cc66247640b520463f925a5533fc9e5de06aa982vboxsync PAE PAE PAE PAE
cc66247640b520463f925a5533fc9e5de06aa982vboxsync -------|--------|--------|--------|
cc66247640b520463f925a5533fc9e5de06aa982vboxsync AMD64 AMD64 AMD64 AMD64
cc66247640b520463f925a5533fc9e5de06aa982vboxsync -------|--------|--------|--------| @endverbatim
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * All configuration except those in the diagonal (upper left) are expected to
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * require special effort from the switcher (i.e. a bit slower).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @section sec_pgm_shw The Shadow Memory Context
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Because of guest context mappings requires PDPT and PML4 entries to allow
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * writing on AMD64, the two upper levels will have fixed flags whatever the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * guest is thinking of using there. So, when shadowing the PD level we will
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * calculate the effective flags of PD and all the higher levels. In legacy
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * PAE mode this only applies to the PWT and PCD bits (the rest are
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * ignored/reserved/MBZ). We will ignore those bits for the present.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @section sec_pgm_int The Intermediate Memory Context
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * The world switch goes thru an intermediate memory context which purpose it is
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * to provide different mappings of the switcher code. All guest mappings are also
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * present in this context.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * The switcher code is mapped at the same location as on the host, at an
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * identity mapped location (physical equals virtual address), and at the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * hypervisor location. The identity mapped location is for when the world
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * switches that involves disabling paging.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * simplifies switching guest CPU mode and consistency at the cost of more
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * code to do the work. All memory use for those page tables is located below
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 4GB (this includes page tables for guest context mappings).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection subsec_pgm_int_gc Guest Context Mappings
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * During assignment and relocation of a guest context mapping the intermediate
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * memory context is used to verify the new location.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Guest context mappings are currently restricted to below 4GB, for reasons
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * of simplicity. This may change when we implement AMD64 support.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @section sec_pgm_misc Misc
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection subsec_pgm_misc_diff Differences Between Legacy PAE and Long Mode PAE
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * The differences between legacy PAE and long mode PAE are:
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * usual meanings while 6 is ignored (AMD). This means that upon switching to
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * legacy PAE mode we'll have to clear these bits and when going to long mode
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * they must be set. This applies to both intermediate and shadow contexts,
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * however we don't need to do it for the intermediate one since we're
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * executing with CR0.WP at that time.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * a page aligned one is required.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @section sec_pgm_handlers Access Handlers
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Placeholder.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection sec_pgm_handlers_virt Virtual Access Handlers
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Placeholder.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection sec_pgm_handlers_virt Virtual Access Handlers
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * We currently implement three types of virtual access handlers: ALL, WRITE
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * and HYPERVISOR (WRITE). See PGMVIRTHANDLERTYPE for some more details.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * The HYPERVISOR access handlers is kept in a separate tree since it doesn't apply
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * to physical pages (PGMTREES::HyperVirtHandlers) and only needs to be consulted in
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * a special \#PF case. The ALL and WRITE are in the PGMTREES::VirtHandlers tree, the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * rest of this section is going to be about these handlers.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * We'll go thru the life cycle of a handler and try make sense of it all, don't know
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * how successfull this is gonna be...
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 1. A handler is registered thru the PGMR3HandlerVirtualRegister and
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * PGMHandlerVirtualRegisterEx APIs. We check for conflicting virtual handlers
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * and create a new node that is inserted into the AVL tree (range key). Then
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * a full PGM resync is flagged (clear pool, sync cr3, update virtual bit of PGMPAGE).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 2. The following PGMSyncCR3/SyncCR3 operation will first make invoke HandlerVirtualUpdate.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 2a. HandlerVirtualUpdate will will lookup all the pages covered by virtual handlers
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * via the current guest CR3 and update the physical page -> virtual handler
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * translation. Needless to say, this doesn't exactly scale very well. If any changes
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * are detected, it will flag a virtual bit update just like we did on registration.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * PGMPHYS pages with changes will have their virtual handler state reset to NONE.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 2b. The virtual bit update process will iterate all the pages covered by all the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * virtual handlers and update the PGMPAGE virtual handler state to the max of all
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * virtual handlers on that page.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 2c. Back in SyncCR3 we will now flush the entire shadow page cache to make sure
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * we don't miss any alias mappings of the monitored pages.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 2d. SyncCR3 will then proceed with syncing the CR3 table.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 3. \#PF(np,read) on a page in the range. This will cause it to be synced
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * read-only and resumed if it's a WRITE handler. If it's an ALL handler we
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * will call the handlers like in the next step. If the physical mapping has
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * changed we will - some time in the future - perform a handler callback
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * (optional) and update the physical -> virtual handler cache.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 4. \#PF(,write) on a page in the range. This will cause the handler to
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * be invoked.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 5. The guest invalidates the page and changes the physical backing or
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * unmaps it. This should cause the invalidation callback to be invoked
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * (it might not yet be 100% perfect). Exactly what happens next... is
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * this where we mess up and end up out of sync for a while?
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * 6. The handler is deregistered by the client via PGMHandlerVirtualDeregister.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * We will then set all PGMPAGEs in the physical -> virtual handler cache for
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * this handler to NONE and trigger a full PGM resync (basically the same
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * as int step 1). Which means 2 is executed again.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsubsection sub_sec_pgm_handler_virt_todo TODOs
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * There is a bunch of things that needs to be done to make the virtual handlers
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * work 100% correctly and work more efficiently.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * The first bit hasn't been implemented yet because it's going to slow the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * whole mess down even more, and besides it seems to be working reliably for
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * our current uses. OTOH, some of the optimizations might end up more or less
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * implementing the missing bits, so we'll see.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * On the optimization side, the first thing to do is to try avoid unnecessary
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * cache flushing. Then try team up with the shadowing code to track changes
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * in mappings by means of access to them (shadow in), updates to shadows pages,
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * invlpg, and shadow PT discarding (perhaps).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Some idea that have popped up for optimization for current and new features:
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - bitmap indicating where there are virtual handlers installed.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * (4KB => 2**20 pages, page 2**12 => covers 32-bit address space 1:1!)
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - Further optimize this by min/max (needs min/max avl getters).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - Shadow page table entry bit (if any left)?
cc66247640b520463f925a5533fc9e5de06aa982vboxsync/** @page pg_pgm_phys PGM Physical Guest Memory Management
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Objectives:
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - Guest RAM over-commitment using memory ballooning,
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * zero pages and general page sharing.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - Moving or mirroring a VM onto a different physical machine.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection subsec_pgmPhys_Definitions Definitions
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Allocation chunk - A RTR0MemObjAllocPhysNC object and the tracking
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * machinery assoicated with it.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection subsec_pgmPhys_AllocPage Allocating a page.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Initially we map *all* guest memory to the (per VM) zero page, which
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * means that none of the read functions will cause pages to be allocated.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Exception, access bit in page tables that have been shared. This must
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * be handled, but we must also make sure PGMGst*Modify doesn't make
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * unnecessary modifications.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Allocation points:
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - PGMPhysSimpleWriteGCPhys and PGMPhysWrite.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - Replacing a zero page mapping at \#PF.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - Replacing a shared page mapping at \#PF.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - ROM registration (currently MMR3RomRegister).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - VM restore (pgmR3Load).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * For the first three it would make sense to keep a few pages handy
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * until we've reached the max memory commitment for the VM.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * For the ROM registration, we know exactly how many pages we need
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * and will request these from ring-0. For restore, we will save
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * the number of non-zero pages in the saved state and allocate
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * them up front. This would allow the ring-0 component to refuse
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * the request if the isn't sufficient memory available for VM use.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * Btw. for both ROM and restore allocations we won't be requiring
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * zeroed pages as they are going to be filled instantly.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection subsec_pgmPhys_FreePage Freeing a page
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * There are a few points where a page can be freed:
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - After being replaced by the zero page.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - After being replaced by a shared page.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - After being ballooned by the guest additions.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - At reset.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * - At restore.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * When freeing one or more pages they will be returned to the ring-0
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * component and replaced by the zero page.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * The reasoning for clearing out all the pages on reset is that it will
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * return us to the exact same state as on power on, and may thereby help
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * us reduce the memory load on the system. Further it might have a
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * (temporary) positive influence on memory fragmentation (@see subsec_pgmPhys_Fragmentation).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * On restore, as mention under the allocation topic, pages should be
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * freed / allocated depending on how many is actually required by the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * new VM state. The simplest approach is to do like on reset, and free
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * all non-ROM pages and then allocate what we need.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * A measure to prevent some fragmentation, would be to let each allocation
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * chunk have some affinity towards the VM having allocated the most pages
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * from it. Also, try make sure to allocate from allocation chunks that
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * are almost full. Admittedly, both these measures might work counter to
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * our intentions and its probably not worth putting a lot of effort,
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * cpu time or memory into this.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection subsec_pgmPhys_SharePage Sharing a page
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * The basic idea is that there there will be a idle priority kernel
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * thread walking the non-shared VM pages hashing them and looking for
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * pages with the same checksum. If such pages are found, it will compare
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * them byte-by-byte to see if they actually are identical. If found to be
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * identical it will allocate a shared page, copy the content, check that
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * the page didn't change while doing this, and finally request both the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * VMs to use the shared page instead. If the page is all zeros (special
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * checksum and byte-by-byte check) it will request the VM that owns it
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * to replace it with the zero page.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * To make this efficient, we will have to make sure not to try share a page
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * that will change its contents soon. This part requires the most work.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * A simple idea would be to request the VM to write monitor the page for
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * a while to make sure it isn't modified any time soon. Also, it may
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * make sense to skip pages that are being write monitored since this
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * information is readily available to the thread if it works on the
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * per-VM guest memory structures (presently called PGMRAMRANGE).
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * @subsection subsec_pgmPhys_Fragmentation Fragmentation Concerns and Counter Measures
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * The pages are organized in allocation chunks in ring-0, this is a necessity
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * if we wish to have an OS agnostic approach to this whole thing. (On Linux we
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * could easily work on a page-by-page basis if we liked. Whether this is possible
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * or efficient on NT I don't quite know.) Fragmentation within these chunks may
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * become a problem as part of the idea here is that we wish to return memory to
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * the host system.
cc66247640b520463f925a5533fc9e5de06aa982vboxsync * For instance, starting two VMs at the same time, they will both allocate the
* - The over-commitment management, including the allocating/freeing
* The simplified flow of a PGMPhysRead/Write function:
#include "PGMInternal.h"
#ifdef DEBUG_bird
#ifdef VBOX_STRICT
static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
# ifdef VBOX_STRICT
static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
#ifdef VBOX_WITH_DEBUGGER
/* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
{ "pgmerror", 0, 1, &g_aPgmErrorArgs[0],1, NULL, 0, pgmR3CmdError, "", "Enables inject runtime of errors into parts of PGM." },
{ "pgmerroroff", 0, 1, &g_aPgmErrorArgs[0],1, NULL, 0, pgmR3CmdError, "", "Disables inject runtime errors into parts of PGM." },
#ifdef VBOX_STRICT
{ "pgmsyncalways", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSyncAlways, "", "Toggle permanent CR3 syncing." },
#include "PGMShw.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMGst.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMGst.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMGst.h"
#include "PGMShw.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMGst.h"
#include "PGMShw.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMBth.h"
# include "PGMGstDefs.h"
# include "PGMGst.h"
#include "PGMShw.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMGstDefs.h"
# include "PGMBth.h"
#include "PGMShw.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMGstDefs.h"
# include "PGMBth.h"
int rc;
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE);
cbRam = 0;
cbRam = 0;
return rc;
#ifdef VBOX_STRICT
return rc;
rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
"Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing's given.",
#ifdef VBOX_WITH_DEBUGGER
static bool s_fRegisteredCmds = false;
if (!s_fRegisteredCmds)
s_fRegisteredCmds = true;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
return rc;
return VERR_NO_PAGE_MEMORY;
AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePDPT != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPT & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) && pVM->pgm.s.HCPhysInterPaePML4 < 0xffffffff);
pVM->pgm.s.pInterPaePDPT64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
LogRel(("Debug: apInterPTs={%RHp,%RHp} apInterPaePTs={%RHp,%RHp} apInterPaePDs={%RHp,%RHp,%RHp,%RHp} pInterPaePDPT64=%RHp\n",
MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
return VINF_SUCCESS;
return rc;
int rc;
STAM_REL_REG(pVM, &pPGM->cAllPages, STAMTYPE_U32, "/PGM/Page/cAllPages", STAMUNIT_OCCURENCES, "The total number of pages.");
STAM_REL_REG(pVM, &pPGM->cPrivatePages, STAMTYPE_U32, "/PGM/Page/cPrivatePages", STAMUNIT_OCCURENCES, "The number of private pages.");
STAM_REL_REG(pVM, &pPGM->cSharedPages, STAMTYPE_U32, "/PGM/Page/cSharedPages", STAMUNIT_OCCURENCES, "The number of shared pages.");
STAM_REL_REG(pVM, &pPGM->cZeroPages, STAMTYPE_U32, "/PGM/Page/cZeroPages", STAMUNIT_OCCURENCES, "The number of zero backed pages.");
STAM_REL_REG(pVM, &pPGM->cHandyPages, STAMTYPE_U32, "/PGM/Page/cHandyPages", STAMUNIT_OCCURENCES, "The number of handy pages (not included in cAllPages).");
STAM_REL_REG(pVM, &pPGM->cRelocations, STAMTYPE_COUNTER, "/PGM/cRelocations", STAMUNIT_OCCURENCES, "Number of hypervisor relocations.");
STAM_REL_REG(pVM, &pPGM->ChunkR3Map.c, STAMTYPE_U32, "/PGM/ChunkR3Map/c", STAMUNIT_OCCURENCES, "Number of mapped chunks.");
STAM_REL_REG(pVM, &pPGM->ChunkR3Map.cMax, STAMTYPE_U32, "/PGM/ChunkR3Map/cMax", STAMUNIT_OCCURENCES, "Maximum number of mapped chunks.");
#ifdef VBOX_WITH_STATISTICS
# define PGM_REG_COUNTER(a, b, c) \
# define PGM_REG_PROFILE(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b); \
PGM_REG_COUNTER(&pPGM->StatR3DetectedConflicts, "/PGM/R3/DetectedConflicts", "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
PGM_REG_PROFILE(&pPGM->StatR3ResolveConflict, "/PGM/R3/ResolveConflict", "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
PGM_REG_PROFILE(&pPGM->StatRZSyncCR3HandlerVirtualUpdate, "/PGM/RZ/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates.");
PGM_REG_PROFILE(&pPGM->StatRZSyncCR3HandlerVirtualReset, "/PGM/RZ/SyncCR3/Handlers/VirtualReset", "Profiling of the virtual handler resets.");
PGM_REG_PROFILE(&pPGM->StatR3SyncCR3HandlerVirtualUpdate, "/PGM/R3/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates.");
PGM_REG_PROFILE(&pPGM->StatR3SyncCR3HandlerVirtualReset, "/PGM/R3/SyncCR3/Handlers/VirtualReset", "Profiling of the virtual handler resets.");
PGM_REG_COUNTER(&pPGM->StatRZPhysHandlerReset, "/PGM/RZ/PhysHandlerReset", "The number of times PGMHandlerPhysicalReset is called.");
PGM_REG_COUNTER(&pPGM->StatR3PhysHandlerReset, "/PGM/R3/PhysHandlerReset", "The number of times PGMHandlerPhysicalReset is called.");
PGM_REG_PROFILE(&pPGM->StatRZVirtHandlerSearchByPhys, "/PGM/RZ/VirtHandlerSearchByPhys", "Profiling of pgmHandlerVirtualFindByPhysAddr.");
PGM_REG_PROFILE(&pPGM->StatR3VirtHandlerSearchByPhys, "/PGM/R3/VirtHandlerSearchByPhys", "Profiling of pgmHandlerVirtualFindByPhysAddr.");
PGM_REG_COUNTER(&pPGM->StatRZPageReplaceShared, "/PGM/RZ/Page/ReplacedShared", "Times a shared page was replaced.");
PGM_REG_COUNTER(&pPGM->StatRZPageReplaceZero, "/PGM/RZ/Page/ReplacedZero", "Times the zero page was replaced.");
/// @todo PGM_REG_COUNTER(&pPGM->StatRZPageHandyAllocs, "/PGM/RZ/Page/HandyAllocs", "Number of times we've allocated more handy pages.");
PGM_REG_COUNTER(&pPGM->StatR3PageReplaceShared, "/PGM/R3/Page/ReplacedShared", "Times a shared page was replaced.");
PGM_REG_COUNTER(&pPGM->StatR3PageReplaceZero, "/PGM/R3/Page/ReplacedZero", "Times the zero page was replaced.");
/// @todo PGM_REG_COUNTER(&pPGM->StatR3PageHandyAllocs, "/PGM/R3/Page/HandyAllocs", "Number of times we've allocated more handy pages.");
PGM_REG_COUNTER(&pPGM->StatRCDynMapCacheHits, "/PGM/RC/DynMapCache/Hits" , "Number of dynamic page mapping cache hits.");
PGM_REG_COUNTER(&pPGM->StatRCDynMapCacheMisses, "/PGM/RC/DynMapCache/Misses" , "Number of dynamic page mapping cache misses.");
PGM_REG_COUNTER(&pPGM->StatRCInvlPgConflict, "/PGM/RC/InvlPgConflict", "Number of times PGMInvalidatePage() detected a mapping conflict.");
PGM_REG_COUNTER(&pPGM->StatRCInvlPgSyncMonCR3, "/PGM/RC/InvlPgSyncMonitorCR3", "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3.");
# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
PGM_REG_COUNTER(&pPGM->StatTrackVirgin, "/PGM/Track/Virgin", "The number of first time shadowings");
PGM_REG_COUNTER(&pPGM->StatTrackAliased, "/PGM/Track/Aliased", "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
PGM_REG_COUNTER(&pPGM->StatTrackAliasedMany, "/PGM/Track/AliasedMany", "The number of times we're tracking using cRef2.");
PGM_REG_COUNTER(&pPGM->StatTrackAliasedLots, "/PGM/Track/AliasedLots", "The number of times we're hitting pages which has overflowed cRef2");
PGM_REG_COUNTER(&pPGM->StatTrackOverflows, "/PGM/Track/Overflows", "The number of times the extent list grows too long.");
PGM_REG_PROFILE(&pPGM->StatTrackDeref, "/PGM/Track/Deref", "Profiling of SyncPageWorkerTrackDeref (expensive).");
#define PGM_REG_COUNTER(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
#define PGM_REG_PROFILE(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
PGM_REG_COUNTER(&pPGM->cGuestModeChanges, "/PGM/CPU%d/cGuestModeChanges", "Number of guest mode changes.");
#ifdef VBOX_WITH_STATISTICS
STAMR3RegisterF(pVM, &pPGM->StatSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
STAMR3RegisterF(pVM, &pPGM->StatSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
PGM_REG_COUNTER(&pPGM->StatR0DynMapMigrateInvlPg, "/PGM/CPU%d/R0/DynMapMigrateInvlPg", "invlpg count in PGMDynMapMigrateAutoSet.");
PGM_REG_PROFILE(&pPGM->StatR0DynMapGCPageInl, "/PGM/CPU%d/R0/DynMapPageGCPageInl", "Calls to pgmR0DynMapGCPageInlined.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlHits, "/PGM/CPU%d/R0/DynMapPageGCPageInl/Hits", "Hash table lookup hits.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlMisses, "/PGM/CPU%d/R0/DynMapPageGCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlRamHits, "/PGM/CPU%d/R0/DynMapPageGCPageInl/RamHits", "1st ram range hits.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapGCPageInlRamMisses, "/PGM/CPU%d/R0/DynMapPageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");
PGM_REG_PROFILE(&pPGM->StatR0DynMapHCPageInl, "/PGM/CPU%d/R0/DynMapPageHCPageInl", "Calls to pgmR0DynMapHCPageInlined.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapHCPageInlHits, "/PGM/CPU%d/R0/DynMapPageHCPageInl/Hits", "Hash table lookup hits.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapHCPageInlMisses, "/PGM/CPU%d/R0/DynMapPageHCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapSetOptimize, "/PGM/CPU%d/R0/DynMapPage/SetOptimize", "Calls to pgmDynMapOptimizeAutoSet.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchFlushes, "/PGM/CPU%d/R0/DynMapPage/SetSearchFlushes","Set search restorting to subset flushes.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchHits, "/PGM/CPU%d/R0/DynMapPage/SetSearchHits", "Set search hits.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapSetSearchMisses, "/PGM/CPU%d/R0/DynMapPage/SetSearchMisses", "Set search misses.");
PGM_REG_PROFILE(&pPGM->StatR0DynMapHCPage, "/PGM/CPU%d/R0/DynMapPage/HCPage", "Calls to PGMDynMapHCPage (ring-0).");
PGM_REG_COUNTER(&pPGM->StatR0DynMapPageInvlPg, "/PGM/CPU%d/R0/DynMapPage/InvlPg", "invlpg count in pgmR0DynMapPageSlow.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlow, "/PGM/CPU%d/R0/DynMapPage/Slow", "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLoopHits, "/PGM/CPU%d/R0/DynMapPage/SlowLoopHits" , "Hits in the loop path.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLoopMisses, "/PGM/CPU%d/R0/DynMapPage/SlowLoopMisses", "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
//PGM_REG_COUNTER(&pPGM->StatR0DynMapPageSlowLostHits, "/PGM/CPU%d/R0/DynMapPage/SlowLostHits", "Lost hits.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapSubsets, "/PGM/CPU%d/R0/Subsets", "Times PGMDynMapPushAutoSubset was called.");
PGM_REG_COUNTER(&pPGM->StatR0DynMapPopFlushes, "/PGM/CPU%d/R0/SubsetPopFlushes", "Times PGMDynMapPopAutoSubset flushes the subset.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0e, "/PGM/CPU%d/RZ/Trap0e", "Profiling of the PGMTrap0eHandler() body.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeCheckPageFault, "/PGM/CPU%d/RZ/Trap0e/Time/CheckPageFault", "Profiling of checking for dirty/access emulation faults.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeSyncPT, "/PGM/CPU%d/RZ/Trap0e/Time/SyncPT", "Profiling of lazy page table syncing.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeMapping, "/PGM/CPU%d/RZ/Trap0e/Time/Mapping", "Profiling of checking virtual mappings.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeOutOfSync, "/PGM/CPU%d/RZ/Trap0e/Time/OutOfSync", "Profiling of out of sync page handling.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTimeHandlers, "/PGM/CPU%d/RZ/Trap0e/Time/Handlers", "Profiling of checking handlers.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2CSAM, "/PGM/CPU%d/RZ/Trap0e/Time2/CSAM", "Profiling of the Trap0eHandler body when the cause is CSAM.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2DirtyAndAccessed, "/PGM/CPU%d/RZ/Trap0e/Time2/DirtyAndAccessedBits", "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2GuestTrap, "/PGM/CPU%d/RZ/Trap0e/Time2/GuestTrap", "Profiling of the Trap0eHandler body when the cause is a guest trap.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2HndPhys, "/PGM/CPU%d/RZ/Trap0e/Time2/HandlerPhysical", "Profiling of the Trap0eHandler body when the cause is a physical handler.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2HndVirt, "/PGM/CPU%d/RZ/Trap0e/Time2/HandlerVirtual", "Profiling of the Trap0eHandler body when the cause is a virtual handler.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2HndUnhandled, "/PGM/CPU%d/RZ/Trap0e/Time2/HandlerUnhandled", "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2Misc, "/PGM/CPU%d/RZ/Trap0e/Time2/Misc", "Profiling of the Trap0eHandler body when the cause is not known.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2OutOfSync, "/PGM/CPU%d/RZ/Trap0e/Time2/OutOfSync", "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2OutOfSyncHndPhys, "/PGM/CPU%d/RZ/Trap0e/Time2/OutOfSyncHndPhys", "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2OutOfSyncHndVirt, "/PGM/CPU%d/RZ/Trap0e/Time2/OutOfSyncHndVirt", "Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2OutOfSyncHndObs, "/PGM/CPU%d/RZ/Trap0e/Time2/OutOfSyncObsHnd", "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
PGM_REG_PROFILE(&pPGM->StatRZTrap0eTime2SyncPT, "/PGM/CPU%d/RZ/Trap0e/Time2/SyncPT", "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eConflicts, "/PGM/CPU%d/RZ/Trap0e/Conflicts", "The number of times #PF was caused by an undetected conflict.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersMapping, "/PGM/CPU%d/RZ/Trap0e/Handlers/Mapping", "Number of traps due to access handlers in mappings.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersOutOfSync, "/PGM/CPU%d/RZ/Trap0e/Handlers/OutOfSync", "Number of traps due to out-of-sync handled pages.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersPhysical, "/PGM/CPU%d/RZ/Trap0e/Handlers/Physical", "Number of traps due to physical access handlers.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersVirtual, "/PGM/CPU%d/RZ/Trap0e/Handlers/Virtual", "Number of traps due to virtual access handlers.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersVirtualByPhys, "/PGM/CPU%d/RZ/Trap0e/Handlers/VirtualByPhys", "Number of traps due to virtual access handlers by physical address.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersVirtualUnmarked,"/PGM/CPU%d/RZ/Trap0e/Handlers/VirtualUnmarked","Number of traps due to virtual access handlers by virtual address (without proper physical flags).");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersUnhandled, "/PGM/CPU%d/RZ/Trap0e/Handlers/Unhandled", "Number of traps due to access outside range of monitored page(s).");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eHandlersInvalid, "/PGM/CPU%d/RZ/Trap0e/Handlers/Invalid", "Number of traps due to access to invalid physical memory.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSNotPresentRead, "/PGM/CPU%d/RZ/Trap0e/Err/User/NPRead", "Number of user mode not present read page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSNotPresentWrite, "/PGM/CPU%d/RZ/Trap0e/Err/User/NPWrite", "Number of user mode not present write page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSWrite, "/PGM/CPU%d/RZ/Trap0e/Err/User/Write", "Number of user mode write page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSReserved, "/PGM/CPU%d/RZ/Trap0e/Err/User/Reserved", "Number of user mode reserved bit page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSNXE, "/PGM/CPU%d/RZ/Trap0e/Err/User/NXE", "Number of user mode NXE page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eUSRead, "/PGM/CPU%d/RZ/Trap0e/Err/User/Read", "Number of user mode read page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eSVNotPresentRead, "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/NPRead", "Number of supervisor mode not present read page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eSVNotPresentWrite, "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/NPWrite", "Number of supervisor mode not present write page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eSVWrite, "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/Write", "Number of supervisor mode write page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eSVReserved, "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/Reserved", "Number of supervisor mode reserved bit page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eSNXE, "/PGM/CPU%d/RZ/Trap0e/Err/Supervisor/NXE", "Number of supervisor mode NXE page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eGuestPF, "/PGM/CPU%d/RZ/Trap0e/GuestPF", "Number of real guest page faults.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eGuestPFUnh, "/PGM/CPU%d/RZ/Trap0e/GuestPF/Unhandled", "Number of real guest page faults from the 'unhandled' case.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eGuestPFMapping, "/PGM/CPU%d/RZ/Trap0e/GuestPF/InMapping", "Number of real guest page faults in a mapping.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eWPEmulInRZ, "/PGM/CPU%d/RZ/Trap0e/WP/InRZ", "Number of guest page faults due to X86_CR0_WP emulation.");
PGM_REG_COUNTER(&pPGM->StatRZTrap0eWPEmulToR3, "/PGM/CPU%d/RZ/Trap0e/WP/ToR3", "Number of guest page faults due to X86_CR0_WP emulation (forward to R3 for emulation).");
STAMR3RegisterF(pVM, &pPGM->StatRZTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
PGM_REG_COUNTER(&pPGM->StatRZGuestCR3WriteHandled, "/PGM/CPU%d/RZ/CR3WriteHandled", "The number of times the Guest CR3 change was successfully handled.");
PGM_REG_COUNTER(&pPGM->StatRZGuestCR3WriteUnhandled, "/PGM/CPU%d/RZ/CR3WriteUnhandled", "The number of times the Guest CR3 change was passed back to the recompiler.");
PGM_REG_COUNTER(&pPGM->StatRZGuestCR3WriteConflict, "/PGM/CPU%d/RZ/CR3WriteConflict", "The number of times the Guest CR3 monitoring detected a conflict.");
PGM_REG_COUNTER(&pPGM->StatRZGuestROMWriteHandled, "/PGM/CPU%d/RZ/ROMWriteHandled", "The number of times the Guest ROM change was successfully handled.");
PGM_REG_COUNTER(&pPGM->StatRZGuestROMWriteUnhandled, "/PGM/CPU%d/RZ/ROMWriteUnhandled", "The number of times the Guest ROM change was passed back to the recompiler.");
PGM_REG_PROFILE(&pPGM->StatRZSyncCR3, "/PGM/CPU%d/RZ/SyncCR3", "Profiling of the PGMSyncCR3() body.");
PGM_REG_PROFILE(&pPGM->StatRZSyncCR3Handlers, "/PGM/CPU%d/RZ/SyncCR3/Handlers", "Profiling of the PGMSyncCR3() update handler section.");
PGM_REG_COUNTER(&pPGM->StatRZSyncCR3Global, "/PGM/CPU%d/RZ/SyncCR3/Global", "The number of global CR3 syncs.");
PGM_REG_COUNTER(&pPGM->StatRZSyncCR3NotGlobal, "/PGM/CPU%d/RZ/SyncCR3/NotGlobal", "The number of non-global CR3 syncs.");
PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstCacheHit, "/PGM/CPU%d/RZ/SyncCR3/DstChacheHit", "The number of times we got some kind of a cache hit.");
PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstFreed, "/PGM/CPU%d/RZ/SyncCR3/DstFreed", "The number of times we've had to free a shadow entry.");
PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstFreedSrcNP, "/PGM/CPU%d/RZ/SyncCR3/DstFreedSrcNP", "The number of times we've had to free a shadow entry for which the source entry was not present.");
PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstNotPresent, "/PGM/CPU%d/RZ/SyncCR3/DstNotPresent", "The number of times we've encountered a not present shadow entry for a present guest entry.");
PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstSkippedGlobalPD, "/PGM/CPU%d/RZ/SyncCR3/DstSkippedGlobalPD", "The number of times a global page directory wasn't flushed.");
PGM_REG_COUNTER(&pPGM->StatRZSyncCR3DstSkippedGlobalPT, "/PGM/CPU%d/RZ/SyncCR3/DstSkippedGlobalPT", "The number of times a page table with only global entries wasn't flushed.");
PGM_REG_COUNTER(&pPGM->StatRZSyncPTFailed, "/PGM/CPU%d/RZ/SyncPT/Failed", "The number of times pfnSyncPT() failed.");
PGM_REG_COUNTER(&pPGM->StatRZSyncPagePDNAs, "/PGM/CPU%d/RZ/SyncPagePDNAs", "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
PGM_REG_COUNTER(&pPGM->StatRZSyncPagePDOutOfSync, "/PGM/CPU%d/RZ/SyncPagePDOutOfSync", "The number of time we've encountered an out-of-sync PD in SyncPage.");
PGM_REG_COUNTER(&pPGM->StatRZAccessedPage, "/PGM/CPU%d/RZ/AccessedPage", "The number of pages marked not present for accessed bit emulation.");
PGM_REG_PROFILE(&pPGM->StatRZDirtyBitTracking, "/PGM/CPU%d/RZ/DirtyPage", "Profiling the dirty bit tracking in CheckPageFault().");
PGM_REG_COUNTER(&pPGM->StatRZDirtyPage, "/PGM/CPU%d/RZ/DirtyPage/Mark", "The number of pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pPGM->StatRZDirtyPageBig, "/PGM/CPU%d/RZ/DirtyPage/MarkBig", "The number of 4MB pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pPGM->StatRZDirtyPageSkipped, "/PGM/CPU%d/RZ/DirtyPage/Skipped", "The number of pages already dirty or readonly.");
PGM_REG_COUNTER(&pPGM->StatRZDirtyPageTrap, "/PGM/CPU%d/RZ/DirtyPage/Trap", "The number of traps generated for dirty bit tracking.");
PGM_REG_COUNTER(&pPGM->StatRZDirtiedPage, "/PGM/CPU%d/RZ/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses.");
PGM_REG_COUNTER(&pPGM->StatRZDirtyTrackRealPF, "/PGM/CPU%d/RZ/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking.");
PGM_REG_COUNTER(&pPGM->StatRZPageAlreadyDirty, "/PGM/CPU%d/RZ/DirtyPage/AlreadySet", "The number of pages already marked dirty because of write accesses.");
PGM_REG_PROFILE(&pPGM->StatRZInvalidatePage, "/PGM/CPU%d/RZ/InvalidatePage", "PGMInvalidatePage() profiling.");
PGM_REG_COUNTER(&pPGM->StatRZInvalidatePage4KBPages, "/PGM/CPU%d/RZ/InvalidatePage/4KBPages", "The number of times PGMInvalidatePage() was called for a 4KB page.");
PGM_REG_COUNTER(&pPGM->StatRZInvalidatePage4MBPages, "/PGM/CPU%d/RZ/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page.");
PGM_REG_COUNTER(&pPGM->StatRZInvalidatePage4MBPagesSkip, "/PGM/CPU%d/RZ/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
PGM_REG_COUNTER(&pPGM->StatRZInvalidatePagePDMappings, "/PGM/CPU%d/RZ/InvalidatePage/PDMappings", "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
PGM_REG_COUNTER(&pPGM->StatRZInvalidatePagePDNAs, "/PGM/CPU%d/RZ/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
PGM_REG_COUNTER(&pPGM->StatRZInvalidatePagePDNPs, "/PGM/CPU%d/RZ/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory.");
PGM_REG_COUNTER(&pPGM->StatRZInvalidatePagePDOutOfSync, "/PGM/CPU%d/RZ/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
PGM_REG_COUNTER(&pPGM->StatRZInvalidatePageSkipped, "/PGM/CPU%d/RZ/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
PGM_REG_COUNTER(&pPGM->StatRZPageOutOfSyncSupervisor, "/PGM/CPU%d/RZ/OutOfSync/SuperVisor", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pPGM->StatRZPageOutOfSyncUser, "/PGM/CPU%d/RZ/OutOfSync/User", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_PROFILE(&pPGM->StatRZFlushTLB, "/PGM/CPU%d/RZ/FlushTLB", "Profiling of the PGMFlushTLB() body.");
PGM_REG_COUNTER(&pPGM->StatRZFlushTLBNewCR3, "/PGM/CPU%d/RZ/FlushTLB/NewCR3", "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
PGM_REG_COUNTER(&pPGM->StatRZFlushTLBNewCR3Global, "/PGM/CPU%d/RZ/FlushTLB/NewCR3Global", "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
PGM_REG_COUNTER(&pPGM->StatRZFlushTLBSameCR3, "/PGM/CPU%d/RZ/FlushTLB/SameCR3", "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
PGM_REG_COUNTER(&pPGM->StatRZFlushTLBSameCR3Global, "/PGM/CPU%d/RZ/FlushTLB/SameCR3Global", "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
PGM_REG_PROFILE(&pPGM->StatRZGstModifyPage, "/PGM/CPU%d/RZ/GstModifyPage", "Profiling of the PGMGstModifyPage() body.");
PGM_REG_PROFILE(&pPGM->StatR3SyncCR3, "/PGM/CPU%d/R3/SyncCR3", "Profiling of the PGMSyncCR3() body.");
PGM_REG_PROFILE(&pPGM->StatR3SyncCR3Handlers, "/PGM/CPU%d/R3/SyncCR3/Handlers", "Profiling of the PGMSyncCR3() update handler section.");
PGM_REG_COUNTER(&pPGM->StatR3SyncCR3Global, "/PGM/CPU%d/R3/SyncCR3/Global", "The number of global CR3 syncs.");
PGM_REG_COUNTER(&pPGM->StatR3SyncCR3NotGlobal, "/PGM/CPU%d/R3/SyncCR3/NotGlobal", "The number of non-global CR3 syncs.");
PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstCacheHit, "/PGM/CPU%d/R3/SyncCR3/DstChacheHit", "The number of times we got some kind of a cache hit.");
PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstFreed, "/PGM/CPU%d/R3/SyncCR3/DstFreed", "The number of times we've had to free a shadow entry.");
PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstFreedSrcNP, "/PGM/CPU%d/R3/SyncCR3/DstFreedSrcNP", "The number of times we've had to free a shadow entry for which the source entry was not present.");
PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstNotPresent, "/PGM/CPU%d/R3/SyncCR3/DstNotPresent", "The number of times we've encountered a not present shadow entry for a present guest entry.");
PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstSkippedGlobalPD, "/PGM/CPU%d/R3/SyncCR3/DstSkippedGlobalPD", "The number of times a global page directory wasn't flushed.");
PGM_REG_COUNTER(&pPGM->StatR3SyncCR3DstSkippedGlobalPT, "/PGM/CPU%d/R3/SyncCR3/DstSkippedGlobalPT", "The number of times a page table with only global entries wasn't flushed.");
PGM_REG_COUNTER(&pPGM->StatR3SyncPTFailed, "/PGM/CPU%d/R3/SyncPT/Failed", "The number of times pfnSyncPT() failed.");
PGM_REG_COUNTER(&pPGM->StatR3SyncPagePDNAs, "/PGM/CPU%d/R3/SyncPagePDNAs", "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
PGM_REG_COUNTER(&pPGM->StatR3SyncPagePDOutOfSync, "/PGM/CPU%d/R3/SyncPagePDOutOfSync", "The number of time we've encountered an out-of-sync PD in SyncPage.");
PGM_REG_COUNTER(&pPGM->StatR3AccessedPage, "/PGM/CPU%d/R3/AccessedPage", "The number of pages marked not present for accessed bit emulation.");
PGM_REG_PROFILE(&pPGM->StatR3DirtyBitTracking, "/PGM/CPU%d/R3/DirtyPage", "Profiling the dirty bit tracking in CheckPageFault().");
PGM_REG_COUNTER(&pPGM->StatR3DirtyPage, "/PGM/CPU%d/R3/DirtyPage/Mark", "The number of pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pPGM->StatR3DirtyPageBig, "/PGM/CPU%d/R3/DirtyPage/MarkBig", "The number of 4MB pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pPGM->StatR3DirtyPageSkipped, "/PGM/CPU%d/R3/DirtyPage/Skipped", "The number of pages already dirty or readonly.");
PGM_REG_COUNTER(&pPGM->StatR3DirtyPageTrap, "/PGM/CPU%d/R3/DirtyPage/Trap", "The number of traps generated for dirty bit tracking.");
PGM_REG_COUNTER(&pPGM->StatR3DirtiedPage, "/PGM/CPU%d/R3/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses.");
PGM_REG_COUNTER(&pPGM->StatR3DirtyTrackRealPF, "/PGM/CPU%d/R3/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking.");
PGM_REG_COUNTER(&pPGM->StatR3PageAlreadyDirty, "/PGM/CPU%d/R3/DirtyPage/AlreadySet", "The number of pages already marked dirty because of write accesses.");
PGM_REG_PROFILE(&pPGM->StatR3InvalidatePage, "/PGM/CPU%d/R3/InvalidatePage", "PGMInvalidatePage() profiling.");
PGM_REG_COUNTER(&pPGM->StatR3InvalidatePage4KBPages, "/PGM/CPU%d/R3/InvalidatePage/4KBPages", "The number of times PGMInvalidatePage() was called for a 4KB page.");
PGM_REG_COUNTER(&pPGM->StatR3InvalidatePage4MBPages, "/PGM/CPU%d/R3/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page.");
PGM_REG_COUNTER(&pPGM->StatR3InvalidatePage4MBPagesSkip, "/PGM/CPU%d/R3/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
PGM_REG_COUNTER(&pPGM->StatR3InvalidatePagePDMappings, "/PGM/CPU%d/R3/InvalidatePage/PDMappings", "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
PGM_REG_COUNTER(&pPGM->StatR3InvalidatePagePDNAs, "/PGM/CPU%d/R3/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
PGM_REG_COUNTER(&pPGM->StatR3InvalidatePagePDNPs, "/PGM/CPU%d/R3/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory.");
PGM_REG_COUNTER(&pPGM->StatR3InvalidatePagePDOutOfSync, "/PGM/CPU%d/R3/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
PGM_REG_COUNTER(&pPGM->StatR3InvalidatePageSkipped, "/PGM/CPU%d/R3/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
PGM_REG_COUNTER(&pPGM->StatR3PageOutOfSyncSupervisor, "/PGM/CPU%d/R3/OutOfSync/SuperVisor", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pPGM->StatR3PageOutOfSyncUser, "/PGM/CPU%d/R3/OutOfSync/User", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_PROFILE(&pPGM->StatR3FlushTLB, "/PGM/CPU%d/R3/FlushTLB", "Profiling of the PGMFlushTLB() body.");
PGM_REG_COUNTER(&pPGM->StatR3FlushTLBNewCR3, "/PGM/CPU%d/R3/FlushTLB/NewCR3", "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
PGM_REG_COUNTER(&pPGM->StatR3FlushTLBNewCR3Global, "/PGM/CPU%d/R3/FlushTLB/NewCR3Global", "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
PGM_REG_COUNTER(&pPGM->StatR3FlushTLBSameCR3, "/PGM/CPU%d/R3/FlushTLB/SameCR3", "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
PGM_REG_COUNTER(&pPGM->StatR3FlushTLBSameCR3Global, "/PGM/CPU%d/R3/FlushTLB/SameCR3Global", "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
PGM_REG_PROFILE(&pPGM->StatR3GstModifyPage, "/PGM/CPU%d/R3/GstModifyPage", "Profiling of the PGMGstModifyPage() body.");
int rc;
&& (pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT))
AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT));
return rc;
int rc;
pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTRC + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);
pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
return rc;
LogFlow(("PGMR3Relocate %RGv to %RGv\n", pVM->pgm.s.GCPtrCR3Mapping, pVM->pgm.s.GCPtrCR3Mapping + offDelta));
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, true, pgmR3RelocatePhysHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->VirtHandlers, true, pgmR3RelocateVirtHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3RelocateHyperVirtHandler, &offDelta);
int rc;
#ifdef DEBUG
#ifdef VBOX_STRICT
static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
return NULL;
static int pgmR3SavePage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
int rc;
void const *pvPage;
AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
return rc;
static int pgmR3SaveShadowedRomPage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
return rc;
bool fMappingsFixed;
bool fA20Enabled;
} PGMOLD;
int rc;
static int pgmR3LoadPageZero(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
return VERR_SSM_UNEXPECTED_DATA;
return VERR_SSM_UNEXPECTED_DATA;
return VINF_SUCCESS;
static int pgmR3LoadPageBits(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
int rc;
void *pvPage;
return rc;
static int pgmR3LoadPage(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
rc);
return VINF_SUCCESS;
static int pgmR3LoadShadowedRomPage(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
return rc;
int rc;
return rc;
return rc;
uint32_t i = 0;
return rc;
if (u32Sep == ~0U)
if (u32Sep != i)
return rc;
return rc;
return rc;
if (u32Sep == ~0U)
if (u32Sep != i)
return rc;
return rc;
|| ( cchDesc
|| !fHaveBits)
AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
if ( !fHaveBits
AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
if (fPresent)
AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
return rc;
int rc;
AssertMsgFailed(("pgmR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, PGM_SAVED_STATE_VERSION));
/* Restore pVM->pgm.s.GCPhysCR3. */
return rc;
if (pszArgs)
fGuest = true;
fShadow = true;
fHost = true;
if (fGuest)
if (fShadow)
pHlp->pfnPrintf(pHlp, "Shadow paging mode: %s\n", PGMGetModeName(pVM->aCpus[0].pgm.s.enmShadowMode));
if (fHost)
const char *psz;
pVM,
Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
iPD,
iPD,
return rc;
switch (pgmMode)
case PGMMODE_PAE:
case PGMMODE_AMD64:
int rc;
pVM->pgm.s.paModeData = (PPGMMODEDATA)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMMODEDATA) * pgmModeDataMaxIndex());
#ifdef VBOX_WITH_64_BITS_GUESTS
#ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_32_BIT:
# ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
# ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
# ifdef VBOX_WITH_64_BITS_GUESTS
AssertFailed();
#ifdef VBOX_WITH_64_BITS_GUESTS
return VINF_SUCCESS;
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher)
switch (enmGuestMode)
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifdef DEBUG_bird
case PGMMODE_32_BIT:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifdef DEBUG_bird
case PGMMODE_PAE:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
return PGMMODE_INVALID;
return enmShadowMode;
Log(("PGMR3ChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
PGMMODE enmShadowMode = pgmR3CalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode, &enmSwitcher);
return rc;
LogFlow(("PGMR3ChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
return rc;
LogFlow(("PGMR3ChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
return rc;
int rc;
switch (enmShadowMode)
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
return VERR_INTERNAL_ERROR;
return rc;
switch (enmGuestMode)
case PGMMODE_REAL:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_PROTECTED:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_32_BIT:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_PAE_NX:
case PGMMODE_PAE:
N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (General/Advanced)"));
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_32_BIT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
#ifdef VBOX_WITH_64_BITS_GUESTS
case PGMMODE_AMD64_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
default: AssertFailed(); break;
return rc;
static int pgmR3DumpHierarchyHCPaePT(PVM pVM, PX86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
return VINF_SUCCESS;
static int pgmR3DumpHierarchyHCPaePD(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPD)
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory at HCPhys=%RHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
pHlp->pfnPrintf(pHlp, "%0*llx error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
if (pPT)
return rc;
static int pgmR3DumpHierarchyHCPaePDPT(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPDPT)
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory pointer table at HCPhys=%RHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
if (fLongMode)
i << X86_PDPT_SHIFT,
int rc2 = pgmR3DumpHierarchyHCPaePD(pVM, Pdpe.u & X86_PDPE_PG_MASK, u64Address + ((uint64_t)i << X86_PDPT_SHIFT),
return rc;
static int pgmR3DumpHierarchyHcPaePML4(PVM pVM, RTHCPHYS HCPhys, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPML4)
return VERR_INVALID_PARAMETER;
uint64_t u64Address = ((uint64_t)i << X86_PML4_SHIFT) | (((uint64_t)i >> (X86_PML4_SHIFT - X86_PDPT_SHIFT - 1)) * 0xffff000000000000ULL);
int rc2 = pgmR3DumpHierarchyHCPaePDPT(pVM, Pml4e.u & X86_PML4E_PG_MASK, u64Address, cr4, true, cMaxDepth - 1, pHlp);
return rc;
return VINF_SUCCESS;
int pgmR3DumpHierarchyHC32BitPD(PVM pVM, uint32_t cr3, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPD)
pHlp->pfnPrintf(pHlp, "Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK);
return VERR_INVALID_PARAMETER;
pHlp->pfnPrintf(pHlp, "%08x error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
if (pPT)
pHlp->pfnPrintf(pHlp, "%08x error! Page table at %#x was not found in the page pool!\n", u32Address, HCPhys);
return rc;
PGMShwGetPage(pVM, &pVM->aCpus[0], (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), &fPageShw, &pPhysHC);
Log(("Found %RGp at %RGv -> flags=%llx\n", PhysSearch, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), fPageShw));
return VINF_SUCCESS;
bool fLongMode = false;
return VERR_INVALID_PARAMETER;
if (pPT)
return rc;
VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint64_t cr3, uint64_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pHlp)
if (!cMaxDepth)
return VINF_SUCCESS;
if (fLongMode)
return pgmR3DumpHierarchyHCPaePDPT(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, 0, cr4, false, cMaxDepth, pHlp);
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, pVM->pgm.s.fMappingsFixed ? "The mappings are FIXED.\n" : "The mappings are FLOATING.\n");
return rc;
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
if (!cArgs)
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
#ifdef VBOX_STRICT
static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
typedef struct PGMCHECKINTARGS
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys->Core.KeyLast > pCur->Core.Key),
pArgs->pPrevPhys, pArgs->pPrevPhys->Core.Key, pArgs->pPrevPhys->Core.KeyLast, pArgs->pPrevPhys->pszDesc,
static DECLCALLBACK(int) pgmR3CheckIntegrityVirtHandlerNode(PAVLROGCPTRNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGv-%RGv %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevVirt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevVirt->Core.KeyLast > pCur->Core.Key),
pArgs->pPrevVirt, pArgs->pPrevVirt->Core.Key, pArgs->pPrevVirt->Core.KeyLast, pArgs->pPrevVirt->pszDesc,
AssertReleaseMsg(pCur->aPhysToVirt[iPage].offVirtHandler == -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage]),
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysToVirtHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp\n", pCur, pCur->Core.Key, pCur->Core.KeyLast));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
AssertReleaseMsg((pCur->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD),
pCur2 = (PPGMPHYS2VIRTHANDLER)((intptr_t)pCur + (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
AssertReleaseMsg((pCur2->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == PGMPHYS2VIRTHANDLER_IN_TREE,
int cErrors = 0;
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, true, pgmR3CheckIntegrityPhysHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, false, pgmR3CheckIntegrityPhysHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, true, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, false, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);