PGM.cpp revision 74b308a9368bd8065d316be3ee45ab23ff79cca5
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
506df97a8c78dacfe69bc4d3ee5d3de832d0f19avboxsync * Copyright (C) 2006-2007 Oracle Corporation
6ec4e1827eab6a424d672ef0e5a17b065e52db20vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
6ec4e1827eab6a424d672ef0e5a17b065e52db20vboxsync * available from http://www.virtualbox.org. This file is free software;
6ec4e1827eab6a424d672ef0e5a17b065e52db20vboxsync * you can redistribute it and/or modify it under the terms of the GNU
6ec4e1827eab6a424d672ef0e5a17b065e52db20vboxsync * General Public License (GPL) as published by the Free Software
6ec4e1827eab6a424d672ef0e5a17b065e52db20vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
6ec4e1827eab6a424d672ef0e5a17b065e52db20vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
6ec4e1827eab6a424d672ef0e5a17b065e52db20vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync/** @page pg_pgm PGM - The Page Manager and Monitor
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @see grp_pgm,
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @ref pg_pgm_pool,
43747b1f0bc8302a238fb35e55857a5e9aa1933dvboxsync * @ref pg_pgm_phys.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @section sec_pgm_modes Paging Modes
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * There are three memory contexts: Host Context (HC), Guest Context (GC)
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * and intermediate context. When talking about paging HC can also be refered to
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * as "host paging", and GC refered to as "shadow paging".
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * is defined by the host operating system. The mode used in the shadow paging mode
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * depends on the host paging mode and what the mode the guest is currently in. The
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * following relation between the two is defined:
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @verbatim
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync Host > 32-bit | PAE | AMD64 |
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync Guest | | | |
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync ==v================================
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync 32-bit 32-bit PAE PAE
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync -------|--------|--------|--------|
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync PAE PAE PAE PAE
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync -------|--------|--------|--------|
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync AMD64 AMD64 AMD64 AMD64
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync -------|--------|--------|--------| @endverbatim
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * All configuration except those in the diagonal (upper left) are expected to
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * require special effort from the switcher (i.e. a bit slower).
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @section sec_pgm_shw The Shadow Memory Context
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Because of guest context mappings requires PDPT and PML4 entries to allow
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * writing on AMD64, the two upper levels will have fixed flags whatever the
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * guest is thinking of using there. So, when shadowing the PD level we will
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * calculate the effective flags of PD and all the higher levels. In legacy
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * PAE mode this only applies to the PWT and PCD bits (the rest are
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * ignored/reserved/MBZ). We will ignore those bits for the present.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @section sec_pgm_int The Intermediate Memory Context
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * The world switch goes thru an intermediate memory context which purpose it is
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * to provide different mappings of the switcher code. All guest mappings are also
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * present in this context.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * The switcher code is mapped at the same location as on the host, at an
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * identity mapped location (physical equals virtual address), and at the
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * hypervisor location. The identity mapped location is for when the world
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * switches that involves disabling paging.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * simplifies switching guest CPU mode and consistency at the cost of more
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * code to do the work. All memory use for those page tables is located below
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 4GB (this includes page tables for guest context mappings).
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsection subsec_pgm_int_gc Guest Context Mappings
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * During assignment and relocation of a guest context mapping the intermediate
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * memory context is used to verify the new location.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Guest context mappings are currently restricted to below 4GB, for reasons
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * of simplicity. This may change when we implement AMD64 support.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @section sec_pgm_misc Misc
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsection subsec_pgm_misc_diff Differences Between Legacy PAE and Long Mode PAE
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * The differences between legacy PAE and long mode PAE are:
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * usual meanings while 6 is ignored (AMD). This means that upon switching to
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * legacy PAE mode we'll have to clear these bits and when going to long mode
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * they must be set. This applies to both intermediate and shadow contexts,
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * however we don't need to do it for the intermediate one since we're
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * executing with CR0.WP at that time.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * a page aligned one is required.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @section sec_pgm_handlers Access Handlers
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Placeholder.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsection sec_pgm_handlers_virt Virtual Access Handlers
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Placeholder.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsection sec_pgm_handlers_virt Virtual Access Handlers
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * We currently implement three types of virtual access handlers: ALL, WRITE
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * and HYPERVISOR (WRITE). See PGMVIRTHANDLERTYPE for some more details.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * The HYPERVISOR access handlers is kept in a separate tree since it doesn't apply
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * to physical pages (PGMTREES::HyperVirtHandlers) and only needs to be consulted in
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * a special \#PF case. The ALL and WRITE are in the PGMTREES::VirtHandlers tree, the
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * rest of this section is going to be about these handlers.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * We'll go thru the life cycle of a handler and try make sense of it all, don't know
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * how successfull this is gonna be...
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 1. A handler is registered thru the PGMR3HandlerVirtualRegister and
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * PGMHandlerVirtualRegisterEx APIs. We check for conflicting virtual handlers
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * and create a new node that is inserted into the AVL tree (range key). Then
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * a full PGM resync is flagged (clear pool, sync cr3, update virtual bit of PGMPAGE).
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 2. The following PGMSyncCR3/SyncCR3 operation will first make invoke HandlerVirtualUpdate.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 2a. HandlerVirtualUpdate will will lookup all the pages covered by virtual handlers
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * via the current guest CR3 and update the physical page -> virtual handler
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * translation. Needless to say, this doesn't exactly scale very well. If any changes
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * are detected, it will flag a virtual bit update just like we did on registration.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * PGMPHYS pages with changes will have their virtual handler state reset to NONE.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 2b. The virtual bit update process will iterate all the pages covered by all the
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * virtual handlers and update the PGMPAGE virtual handler state to the max of all
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * virtual handlers on that page.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 2c. Back in SyncCR3 we will now flush the entire shadow page cache to make sure
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * we don't miss any alias mappings of the monitored pages.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 2d. SyncCR3 will then proceed with syncing the CR3 table.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 3. \#PF(np,read) on a page in the range. This will cause it to be synced
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * read-only and resumed if it's a WRITE handler. If it's an ALL handler we
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * will call the handlers like in the next step. If the physical mapping has
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * changed we will - some time in the future - perform a handler callback
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * (optional) and update the physical -> virtual handler cache.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 4. \#PF(,write) on a page in the range. This will cause the handler to
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * be invoked.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 5. The guest invalidates the page and changes the physical backing or
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * unmaps it. This should cause the invalidation callback to be invoked
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * (it might not yet be 100% perfect). Exactly what happens next... is
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * this where we mess up and end up out of sync for a while?
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * 6. The handler is deregistered by the client via PGMHandlerVirtualDeregister.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * We will then set all PGMPAGEs in the physical -> virtual handler cache for
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * this handler to NONE and trigger a full PGM resync (basically the same
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * as int step 1). Which means 2 is executed again.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsubsection sub_sec_pgm_handler_virt_todo TODOs
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * There is a bunch of things that needs to be done to make the virtual handlers
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * work 100% correctly and work more efficiently.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * The first bit hasn't been implemented yet because it's going to slow the
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * whole mess down even more, and besides it seems to be working reliably for
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * our current uses. OTOH, some of the optimizations might end up more or less
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * implementing the missing bits, so we'll see.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * On the optimization side, the first thing to do is to try avoid unnecessary
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * cache flushing. Then try team up with the shadowing code to track changes
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * in mappings by means of access to them (shadow in), updates to shadows pages,
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * invlpg, and shadow PT discarding (perhaps).
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Some idea that have popped up for optimization for current and new features:
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - bitmap indicating where there are virtual handlers installed.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * (4KB => 2**20 pages, page 2**12 => covers 32-bit address space 1:1!)
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - Further optimize this by min/max (needs min/max avl getters).
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - Shadow page table entry bit (if any left)?
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync/** @page pg_pgm_phys PGM Physical Guest Memory Management
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Objectives:
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - Guest RAM over-commitment using memory ballooning,
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * zero pages and general page sharing.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - Moving or mirroring a VM onto a different physical machine.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsection subsec_pgmPhys_Definitions Definitions
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Allocation chunk - A RTR0MemObjAllocPhysNC object and the tracking
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * machinery assoicated with it.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsection subsec_pgmPhys_AllocPage Allocating a page.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Initially we map *all* guest memory to the (per VM) zero page, which
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * means that none of the read functions will cause pages to be allocated.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Exception, access bit in page tables that have been shared. This must
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * be handled, but we must also make sure PGMGst*Modify doesn't make
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * unnecessary modifications.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Allocation points:
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - PGMPhysSimpleWriteGCPhys and PGMPhysWrite.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - Replacing a zero page mapping at \#PF.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - Replacing a shared page mapping at \#PF.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - ROM registration (currently MMR3RomRegister).
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - VM restore (pgmR3Load).
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * For the first three it would make sense to keep a few pages handy
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * until we've reached the max memory commitment for the VM.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * For the ROM registration, we know exactly how many pages we need
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * and will request these from ring-0. For restore, we will save
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * the number of non-zero pages in the saved state and allocate
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * them up front. This would allow the ring-0 component to refuse
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * the request if the isn't sufficient memory available for VM use.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * Btw. for both ROM and restore allocations we won't be requiring
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * zeroed pages as they are going to be filled instantly.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsection subsec_pgmPhys_FreePage Freeing a page
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * There are a few points where a page can be freed:
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - After being replaced by the zero page.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - After being replaced by a shared page.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - After being ballooned by the guest additions.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - At reset.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * - At restore.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * When freeing one or more pages they will be returned to the ring-0
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * component and replaced by the zero page.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * The reasoning for clearing out all the pages on reset is that it will
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * return us to the exact same state as on power on, and may thereby help
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * us reduce the memory load on the system. Further it might have a
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * (temporary) positive influence on memory fragmentation (@see subsec_pgmPhys_Fragmentation).
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * On restore, as mention under the allocation topic, pages should be
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * freed / allocated depending on how many is actually required by the
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * new VM state. The simplest approach is to do like on reset, and free
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * all non-ROM pages and then allocate what we need.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * A measure to prevent some fragmentation, would be to let each allocation
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * chunk have some affinity towards the VM having allocated the most pages
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync * from it. Also, try make sure to allocate from allocation chunks that
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * are almost full. Admittedly, both these measures might work counter to
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * our intentions and its probably not worth putting a lot of effort,
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * cpu time or memory into this.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * @subsection subsec_pgmPhys_SharePage Sharing a page
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * The basic idea is that there there will be a idle priority kernel
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * thread walking the non-shared VM pages hashing them and looking for
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * pages with the same checksum. If such pages are found, it will compare
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * them byte-by-byte to see if they actually are identical. If found to be
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * identical it will allocate a shared page, copy the content, check that
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * the page didn't change while doing this, and finally request both the
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * VMs to use the shared page instead. If the page is all zeros (special
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * checksum and byte-by-byte check) it will request the VM that owns it
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * to replace it with the zero page.
a33af978add1a03aab11b2895f441af5cb2a11a6vboxsync * To make this efficient, we will have to make sure not to try share a page
* - The over-commitment management, including the allocating/freeing
* The simplified flow of a PGMPhysRead/Write function:
#include "PGMInternal.h"
#include "PGMInline.h"
#ifdef VBOX_STRICT
static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
# ifdef VBOX_STRICT
static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
#ifdef VBOX_WITH_DEBUGGER
/* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
{ "pgmerror", 0, 1, &g_aPgmErrorArgs[0], 1, NULL, 0, pgmR3CmdError, "", "Enables inject runtime of errors into parts of PGM." },
{ "pgmerroroff", 0, 1, &g_aPgmErrorArgs[0], 1, NULL, 0, pgmR3CmdError, "", "Disables inject runtime errors into parts of PGM." },
#ifdef VBOX_STRICT
{ "pgmcheckduppages", 0, 0, NULL, 0, NULL, 0, pgmR3CmdCheckDuplicatePages, "", "Check for duplicate pages in all running VMs." },
{ "pgmsyncalways", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSyncAlways, "", "Toggle permanent CR3 syncing." },
{ "pgmphystofile", 1, 2, &g_aPgmPhysToFileArgs[0], 2, NULL, 0, pgmR3CmdPhysToFile, "", "Save the physical memory to file." },
#include "PGMShw.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMGst.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMGst.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMGst.h"
#include "PGMShw.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMGst.h"
#include "PGMShw.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMBth.h"
# include "PGMGstDefs.h"
# include "PGMGst.h"
#include "PGMShw.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMGstDefs.h"
# include "PGMBth.h"
#include "PGMShw.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#include "PGMGstDefs.h"
#include "PGMBth.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMGstDefs.h"
# include "PGMBth.h"
int rc;
#ifdef PGM_WITHOUT_MAPPINGS
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE);
cbRam = 0;
cbRam = 0;
return rc;
#ifdef VBOX_STRICT
return rc;
rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
"Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing's given.",
#ifdef VBOX_WITH_DEBUGGER
static bool s_fRegisteredCmds = false;
if (!s_fRegisteredCmds)
s_fRegisteredCmds = true;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
return rc;
pVM->pgm.s.pInterPD = (PX86PD)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPD, VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPTs[0] = (PX86PT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.apInterPTs[0], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPTs[1] = (PX86PT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.apInterPTs[1], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePTs[0] = (PX86PTPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePTs[0], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePTs[1] = (PX86PTPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePTs[1], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[0], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[1], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[2], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[3], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.pInterPaePDPT = (PX86PDPT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePDPT, VERR_NO_PAGE_MEMORY);
pVM->pgm.s.pInterPaePDPT64 = (PX86PDPT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePDPT64, VERR_NO_PAGE_MEMORY);
pVM->pgm.s.pInterPaePML4 = (PX86PML4)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePML4, VERR_NO_PAGE_MEMORY);
AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePDPT != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPT & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) && pVM->pgm.s.HCPhysInterPaePML4 < 0xffffffff);
pVM->pgm.s.pInterPaePDPT64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
LogRel(("Debug: apInterPTs={%RHp,%RHp} apInterPaePTs={%RHp,%RHp} apInterPaePDs={%RHp,%RHp,%RHp,%RHp} pInterPaePDPT64=%RHp\n",
MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
return VINF_SUCCESS;
return rc;
int rc;
STAM_REL_REG(pVM, &pPGM->cAllPages, STAMTYPE_U32, "/PGM/Page/cAllPages", STAMUNIT_COUNT, "The total number of pages.");
STAM_REL_REG(pVM, &pPGM->cPrivatePages, STAMTYPE_U32, "/PGM/Page/cPrivatePages", STAMUNIT_COUNT, "The number of private pages.");
STAM_REL_REG(pVM, &pPGM->cSharedPages, STAMTYPE_U32, "/PGM/Page/cSharedPages", STAMUNIT_COUNT, "The number of shared pages.");
STAM_REL_REG(pVM, &pPGM->cReusedSharedPages, STAMTYPE_U32, "/PGM/Page/cReusedSharedPages", STAMUNIT_COUNT, "The number of reused shared pages.");
STAM_REL_REG(pVM, &pPGM->cZeroPages, STAMTYPE_U32, "/PGM/Page/cZeroPages", STAMUNIT_COUNT, "The number of zero backed pages.");
STAM_REL_REG(pVM, &pPGM->cPureMmioPages, STAMTYPE_U32, "/PGM/Page/cPureMmioPages", STAMUNIT_COUNT, "The number of pure MMIO pages.");
STAM_REL_REG(pVM, &pPGM->cMonitoredPages, STAMTYPE_U32, "/PGM/Page/cMonitoredPages", STAMUNIT_COUNT, "The number of write monitored pages.");
STAM_REL_REG(pVM, &pPGM->cWrittenToPages, STAMTYPE_U32, "/PGM/Page/cWrittenToPages", STAMUNIT_COUNT, "The number of previously write monitored pages that have been written to.");
STAM_REL_REG(pVM, &pPGM->cWriteLockedPages, STAMTYPE_U32, "/PGM/Page/cWriteLockedPages", STAMUNIT_COUNT, "The number of write(/read) locked pages.");
STAM_REL_REG(pVM, &pPGM->cReadLockedPages, STAMTYPE_U32, "/PGM/Page/cReadLockedPages", STAMUNIT_COUNT, "The number of read (only) locked pages.");
STAM_REL_REG(pVM, &pPGM->cBalloonedPages, STAMTYPE_U32, "/PGM/Page/cBalloonedPages", STAMUNIT_COUNT, "The number of ballooned pages.");
STAM_REL_REG(pVM, &pPGM->cHandyPages, STAMTYPE_U32, "/PGM/Page/cHandyPages", STAMUNIT_COUNT, "The number of handy pages (not included in cAllPages).");
STAM_REL_REG(pVM, &pPGM->cRelocations, STAMTYPE_COUNTER, "/PGM/cRelocations", STAMUNIT_OCCURENCES,"Number of hypervisor relocations.");
STAM_REL_REG(pVM, &pPGM->ChunkR3Map.c, STAMTYPE_U32, "/PGM/ChunkR3Map/c", STAMUNIT_COUNT, "Number of mapped chunks.");
STAM_REL_REG(pVM, &pPGM->ChunkR3Map.cMax, STAMTYPE_U32, "/PGM/ChunkR3Map/cMax", STAMUNIT_COUNT, "Maximum number of mapped chunks.");
STAM_REL_REG(pVM, &pPGM->StatLargePageAlloc, STAMTYPE_COUNTER, "/PGM/LargePage/Alloc", STAMUNIT_OCCURENCES, "The number of large pages we've used.");
STAM_REL_REG(pVM, &pPGM->StatLargePageReused, STAMTYPE_COUNTER, "/PGM/LargePage/Reused", STAMUNIT_OCCURENCES, "The number of times we've reused a large page.");
STAM_REL_REG(pVM, &pPGM->StatLargePageRefused, STAMTYPE_COUNTER, "/PGM/LargePage/Refused", STAMUNIT_OCCURENCES, "The number of times we couldn't use a large page.");
STAM_REL_REG(pVM, &pPGM->StatLargePageRecheck, STAMTYPE_COUNTER, "/PGM/LargePage/Recheck", STAMUNIT_OCCURENCES, "The number of times we've rechecked a disabled large page.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.fActive, STAMTYPE_U8, "/PGM/LiveSave/fActive", STAMUNIT_COUNT, "Active or not.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cIgnoredPages, STAMTYPE_U32, "/PGM/LiveSave/cIgnoredPages", STAMUNIT_COUNT, "The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM).");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cDirtyPagesLong, STAMTYPE_U32, "/PGM/LiveSave/cDirtyPagesLong", STAMUNIT_COUNT, "Longer term dirty page average.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cDirtyPagesShort, STAMTYPE_U32, "/PGM/LiveSave/cDirtyPagesShort", STAMUNIT_COUNT, "Short term dirty page average.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cPagesPerSecond, STAMTYPE_U32, "/PGM/LiveSave/cPagesPerSecond", STAMUNIT_COUNT, "Pages per second.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cSavedPages, STAMTYPE_U64, "/PGM/LiveSave/cSavedPages", STAMUNIT_COUNT, "The total number of saved pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cReadPages", STAMUNIT_COUNT, "RAM: Ready pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cDirtyPages", STAMUNIT_COUNT, "RAM: Dirty pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cZeroPages", STAMUNIT_COUNT, "RAM: Ready zero pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cMonitoredPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cMonitoredPages", STAMUNIT_COUNT, "RAM: Write monitored pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cReadPages", STAMUNIT_COUNT, "ROM: Ready pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cDirtyPages", STAMUNIT_COUNT, "ROM: Dirty pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cZeroPages", STAMUNIT_COUNT, "ROM: Ready zero pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cMonitoredPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cMonitoredPages", STAMUNIT_COUNT, "ROM: Write monitored pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cReadPages", STAMUNIT_COUNT, "MMIO2: Ready pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cDirtyPages", STAMUNIT_COUNT, "MMIO2: Dirty pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cZeroPages", STAMUNIT_COUNT, "MMIO2: Ready zero pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cMonitoredPages,STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cMonitoredPages",STAMUNIT_COUNT, "MMIO2: Write monitored pages.");
#ifdef VBOX_WITH_STATISTICS
# define PGM_REG_COUNTER(a, b, c) \
# define PGM_REG_COUNTER_BYTES(a, b, c) \
# define PGM_REG_PROFILE(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b); \
PGM_REG_PROFILE(&pPGM->StatAllocLargePage, "/PGM/LargePage/Prof/Alloc", "Time spent by the host OS for large page allocation.");
PGM_REG_PROFILE(&pPGM->StatClearLargePage, "/PGM/LargePage/Prof/Clear", "Time spent clearing the newly allocated large pages.");
PGM_REG_PROFILE(&pPGM->StatR3IsValidLargePage, "/PGM/LargePage/Prof/R3/IsValid", "pgmPhysIsValidLargePage profiling - R3.");
PGM_REG_PROFILE(&pPGM->StatRZIsValidLargePage, "/PGM/LargePage/Prof/RZ/IsValid", "pgmPhysIsValidLargePage profiling - RZ.");
PGM_REG_COUNTER(&pPGM->StatR3DetectedConflicts, "/PGM/R3/DetectedConflicts", "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
PGM_REG_PROFILE(&pPGM->StatR3ResolveConflict, "/PGM/R3/ResolveConflict", "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
PGM_REG_COUNTER(&pPGM->StatR3PhysRead, "/PGM/R3/Phys/Read", "The number of times PGMPhysRead was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatR3PhysReadBytes, "/PGM/R3/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
PGM_REG_COUNTER(&pPGM->StatR3PhysWrite, "/PGM/R3/Phys/Write", "The number of times PGMPhysWrite was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatR3PhysWriteBytes, "/PGM/R3/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
PGM_REG_COUNTER(&pPGM->StatR3PhysSimpleRead, "/PGM/R3/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatR3PhysSimpleReadBytes, "/PGM/R3/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
PGM_REG_COUNTER(&pPGM->StatR3PhysSimpleWrite, "/PGM/R3/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatR3PhysSimpleWriteBytes, "/PGM/R3/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
PGM_REG_COUNTER(&pPGM->StatPageMapTlbFlushes, "/PGM/R3/Page/MapTlbFlushes", "TLB flushes (all contexts).");
PGM_REG_COUNTER(&pPGM->StatPageMapTlbFlushEntry, "/PGM/R3/Page/MapTlbFlushEntry", "TLB entry flushes (all contexts).");
PGM_REG_PROFILE(&pPGM->StatRZSyncCR3HandlerVirtualUpdate, "/PGM/RZ/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates.");
PGM_REG_PROFILE(&pPGM->StatRZSyncCR3HandlerVirtualReset, "/PGM/RZ/SyncCR3/Handlers/VirtualReset", "Profiling of the virtual handler resets.");
PGM_REG_PROFILE(&pPGM->StatR3SyncCR3HandlerVirtualUpdate, "/PGM/R3/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates.");
PGM_REG_PROFILE(&pPGM->StatR3SyncCR3HandlerVirtualReset, "/PGM/R3/SyncCR3/Handlers/VirtualReset", "Profiling of the virtual handler resets.");
PGM_REG_COUNTER(&pPGM->StatRZPhysHandlerReset, "/PGM/RZ/PhysHandlerReset", "The number of times PGMHandlerPhysicalReset is called.");
PGM_REG_COUNTER(&pPGM->StatR3PhysHandlerReset, "/PGM/R3/PhysHandlerReset", "The number of times PGMHandlerPhysicalReset is called.");
PGM_REG_PROFILE(&pPGM->StatRZVirtHandlerSearchByPhys, "/PGM/RZ/VirtHandlerSearchByPhys", "Profiling of pgmHandlerVirtualFindByPhysAddr.");
PGM_REG_PROFILE(&pPGM->StatR3VirtHandlerSearchByPhys, "/PGM/R3/VirtHandlerSearchByPhys", "Profiling of pgmHandlerVirtualFindByPhysAddr.");
PGM_REG_COUNTER(&pPGM->StatRZPageReplaceShared, "/PGM/RZ/Page/ReplacedShared", "Times a shared page was replaced.");
PGM_REG_COUNTER(&pPGM->StatRZPageReplaceZero, "/PGM/RZ/Page/ReplacedZero", "Times the zero page was replaced.");
/// @todo PGM_REG_COUNTER(&pPGM->StatRZPageHandyAllocs, "/PGM/RZ/Page/HandyAllocs", "Number of times we've allocated more handy pages.");
PGM_REG_COUNTER(&pPGM->StatR3PageReplaceShared, "/PGM/R3/Page/ReplacedShared", "Times a shared page was replaced.");
PGM_REG_COUNTER(&pPGM->StatR3PageReplaceZero, "/PGM/R3/Page/ReplacedZero", "Times the zero page was replaced.");
/// @todo PGM_REG_COUNTER(&pPGM->StatR3PageHandyAllocs, "/PGM/R3/Page/HandyAllocs", "Number of times we've allocated more handy pages.");
PGM_REG_COUNTER(&pPGM->StatRZPhysRead, "/PGM/RZ/Phys/Read", "The number of times PGMPhysRead was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatRZPhysReadBytes, "/PGM/RZ/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
PGM_REG_COUNTER(&pPGM->StatRZPhysWrite, "/PGM/RZ/Phys/Write", "The number of times PGMPhysWrite was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatRZPhysWriteBytes, "/PGM/RZ/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
PGM_REG_COUNTER(&pPGM->StatRZPhysSimpleRead, "/PGM/RZ/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatRZPhysSimpleReadBytes, "/PGM/RZ/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
PGM_REG_COUNTER(&pPGM->StatRZPhysSimpleWrite, "/PGM/RZ/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatRZPhysSimpleWriteBytes, "/PGM/RZ/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
PGM_REG_COUNTER(&pPGM->StatRCDynMapCacheHits, "/PGM/RC/DynMapCache/Hits" , "Number of dynamic page mapping cache hits.");
PGM_REG_COUNTER(&pPGM->StatRCDynMapCacheMisses, "/PGM/RC/DynMapCache/Misses" , "Number of dynamic page mapping cache misses.");
PGM_REG_COUNTER(&pPGM->StatRCInvlPgConflict, "/PGM/RC/InvlPgConflict", "Number of times PGMInvalidatePage() detected a mapping conflict.");
PGM_REG_COUNTER(&pPGM->StatRCInvlPgSyncMonCR3, "/PGM/RC/InvlPgSyncMonitorCR3", "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3.");
PGM_REG_COUNTER(&pPGM->StatRCPhysRead, "/PGM/RC/Phys/Read", "The number of times PGMPhysRead was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatRCPhysReadBytes, "/PGM/RC/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
PGM_REG_COUNTER(&pPGM->StatRCPhysWrite, "/PGM/RC/Phys/Write", "The number of times PGMPhysWrite was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatRCPhysWriteBytes, "/PGM/RC/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
PGM_REG_COUNTER(&pPGM->StatRCPhysSimpleRead, "/PGM/RC/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatRCPhysSimpleReadBytes, "/PGM/RC/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
PGM_REG_COUNTER(&pPGM->StatRCPhysSimpleWrite, "/PGM/RC/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pPGM->StatRCPhysSimpleWriteBytes, "/PGM/RC/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
PGM_REG_COUNTER(&pPGM->StatTrackVirgin, "/PGM/Track/Virgin", "The number of first time shadowings");
PGM_REG_COUNTER(&pPGM->StatTrackAliased, "/PGM/Track/Aliased", "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
PGM_REG_COUNTER(&pPGM->StatTrackAliasedMany, "/PGM/Track/AliasedMany", "The number of times we're tracking using cRef2.");
PGM_REG_COUNTER(&pPGM->StatTrackAliasedLots, "/PGM/Track/AliasedLots", "The number of times we're hitting pages which has overflowed cRef2");
PGM_REG_COUNTER(&pPGM->StatTrackOverflows, "/PGM/Track/Overflows", "The number of times the extent list grows too long.");
PGM_REG_COUNTER(&pPGM->StatTrackNoExtentsLeft, "/PGM/Track/NoExtentLeft", "The number of times the extent list was exhausted.");
PGM_REG_PROFILE(&pPGM->StatTrackDeref, "/PGM/Track/Deref", "Profiling of SyncPageWorkerTrackDeref (expensive).");
#define PGM_REG_COUNTER(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
#define PGM_REG_PROFILE(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
PGM_REG_COUNTER(&pPgmCpu->cGuestModeChanges, "/PGM/CPU%u/cGuestModeChanges", "Number of guest mode changes.");
#ifdef VBOX_WITH_STATISTICS
STAMR3RegisterF(pVM, &pPgmCpu->StatSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
STAMR3RegisterF(pVM, &pPgmCpu->StatSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapMigrateInvlPg, "/PGM/CPU%u/R0/DynMapMigrateInvlPg", "invlpg count in PGMDynMapMigrateAutoSet.");
PGM_REG_PROFILE(&pPgmCpu->StatR0DynMapGCPageInl, "/PGM/CPU%u/R0/DynMapPageGCPageInl", "Calls to pgmR0DynMapGCPageInlined.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapGCPageInlHits, "/PGM/CPU%u/R0/DynMapPageGCPageInl/Hits", "Hash table lookup hits.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapGCPageInlMisses, "/PGM/CPU%u/R0/DynMapPageGCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapGCPageInlRamHits, "/PGM/CPU%u/R0/DynMapPageGCPageInl/RamHits", "1st ram range hits.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapGCPageInlRamMisses, "/PGM/CPU%u/R0/DynMapPageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");
PGM_REG_PROFILE(&pPgmCpu->StatR0DynMapHCPageInl, "/PGM/CPU%u/R0/DynMapPageHCPageInl", "Calls to pgmR0DynMapHCPageInlined.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapHCPageInlHits, "/PGM/CPU%u/R0/DynMapPageHCPageInl/Hits", "Hash table lookup hits.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapHCPageInlMisses, "/PGM/CPU%u/R0/DynMapPageHCPageInl/Misses", "Misses that falls back to code common with PGMDynMapHCPage.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPage, "/PGM/CPU%u/R0/DynMapPage", "Calls to pgmR0DynMapPage");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapSetOptimize, "/PGM/CPU%u/R0/DynMapPage/SetOptimize", "Calls to pgmDynMapOptimizeAutoSet.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapSetSearchFlushes, "/PGM/CPU%u/R0/DynMapPage/SetSearchFlushes","Set search restorting to subset flushes.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapSetSearchHits, "/PGM/CPU%u/R0/DynMapPage/SetSearchHits", "Set search hits.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapSetSearchMisses, "/PGM/CPU%u/R0/DynMapPage/SetSearchMisses", "Set search misses.");
PGM_REG_PROFILE(&pPgmCpu->StatR0DynMapHCPage, "/PGM/CPU%u/R0/DynMapPage/HCPage", "Calls to PGMDynMapHCPage (ring-0).");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPageHits0, "/PGM/CPU%u/R0/DynMapPage/Hits0", "Hits at iPage+0");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPageHits1, "/PGM/CPU%u/R0/DynMapPage/Hits1", "Hits at iPage+1");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPageHits2, "/PGM/CPU%u/R0/DynMapPage/Hits2", "Hits at iPage+2");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPageInvlPg, "/PGM/CPU%u/R0/DynMapPage/InvlPg", "invlpg count in pgmR0DynMapPageSlow.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPageSlow, "/PGM/CPU%u/R0/DynMapPage/Slow", "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPageSlowLoopHits, "/PGM/CPU%u/R0/DynMapPage/SlowLoopHits" , "Hits in the loop path.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPageSlowLoopMisses, "/PGM/CPU%u/R0/DynMapPage/SlowLoopMisses", "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
//PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPageSlowLostHits, "/PGM/CPU%u/R0/DynMapPage/SlowLostHits", "Lost hits.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapSubsets, "/PGM/CPU%u/R0/Subsets", "Times PGMDynMapPushAutoSubset was called.");
PGM_REG_COUNTER(&pPgmCpu->StatR0DynMapPopFlushes, "/PGM/CPU%u/R0/SubsetPopFlushes", "Times PGMDynMapPopAutoSubset flushes the subset.");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[0], "/PGM/CPU%u/R0/SetSize000..09", "00-09% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[1], "/PGM/CPU%u/R0/SetSize010..19", "10-19% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[2], "/PGM/CPU%u/R0/SetSize020..29", "20-29% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[3], "/PGM/CPU%u/R0/SetSize030..39", "30-39% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[4], "/PGM/CPU%u/R0/SetSize040..49", "40-49% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[5], "/PGM/CPU%u/R0/SetSize050..59", "50-59% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[6], "/PGM/CPU%u/R0/SetSize060..69", "60-69% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[7], "/PGM/CPU%u/R0/SetSize070..79", "70-79% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[8], "/PGM/CPU%u/R0/SetSize080..89", "80-89% filled");
PGM_REG_COUNTER(&pPgmCpu->aStatR0DynMapSetSize[9], "/PGM/CPU%u/R0/SetSize090..99", "90-99% filled");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0e, "/PGM/CPU%u/RZ/Trap0e", "Profiling of the PGMTrap0eHandler() body.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTimeCheckPageFault, "/PGM/CPU%u/RZ/Trap0e/Time/CheckPageFault", "Profiling of checking for dirty/access emulation faults.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTimeSyncPT, "/PGM/CPU%u/RZ/Trap0e/Time/SyncPT", "Profiling of lazy page table syncing.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTimeMapping, "/PGM/CPU%u/RZ/Trap0e/Time/Mapping", "Profiling of checking virtual mappings.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTimeOutOfSync, "/PGM/CPU%u/RZ/Trap0e/Time/OutOfSync", "Profiling of out of sync page handling.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTimeHandlers, "/PGM/CPU%u/RZ/Trap0e/Time/Handlers", "Profiling of checking handlers.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2CSAM, "/PGM/CPU%u/RZ/Trap0e/Time2/CSAM", "Profiling of the Trap0eHandler body when the cause is CSAM.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2DirtyAndAccessed, "/PGM/CPU%u/RZ/Trap0e/Time2/DirtyAndAccessedBits", "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2GuestTrap, "/PGM/CPU%u/RZ/Trap0e/Time2/GuestTrap", "Profiling of the Trap0eHandler body when the cause is a guest trap.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2HndPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/HandlerPhysical", "Profiling of the Trap0eHandler body when the cause is a physical handler.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2HndVirt, "/PGM/CPU%u/RZ/Trap0e/Time2/HandlerVirtual", "Profiling of the Trap0eHandler body when the cause is a virtual handler.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2HndUnhandled, "/PGM/CPU%u/RZ/Trap0e/Time2/HandlerUnhandled", "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2Misc, "/PGM/CPU%u/RZ/Trap0e/Time2/Misc", "Profiling of the Trap0eHandler body when the cause is not known.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2OutOfSync, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSync", "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2OutOfSyncHndPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncHndPhys", "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2OutOfSyncHndVirt, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncHndVirt", "Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2OutOfSyncHndObs, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncObsHnd", "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
PGM_REG_PROFILE(&pPgmCpu->StatRZTrap0eTime2SyncPT, "/PGM/CPU%u/RZ/Trap0e/Time2/SyncPT", "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eConflicts, "/PGM/CPU%u/RZ/Trap0e/Conflicts", "The number of times #PF was caused by an undetected conflict.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eHandlersMapping, "/PGM/CPU%u/RZ/Trap0e/Handlers/Mapping", "Number of traps due to access handlers in mappings.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eHandlersOutOfSync, "/PGM/CPU%u/RZ/Trap0e/Handlers/OutOfSync", "Number of traps due to out-of-sync handled pages.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eHandlersPhysical, "/PGM/CPU%u/RZ/Trap0e/Handlers/Physical", "Number of traps due to physical access handlers.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eHandlersVirtual, "/PGM/CPU%u/RZ/Trap0e/Handlers/Virtual", "Number of traps due to virtual access handlers.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eHandlersVirtualByPhys, "/PGM/CPU%u/RZ/Trap0e/Handlers/VirtualByPhys", "Number of traps due to virtual access handlers by physical address.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eHandlersVirtualUnmarked,"/PGM/CPU%u/RZ/Trap0e/Handlers/VirtualUnmarked","Number of traps due to virtual access handlers by virtual address (without proper physical flags).");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eHandlersUnhandled, "/PGM/CPU%u/RZ/Trap0e/Handlers/Unhandled", "Number of traps due to access outside range of monitored page(s).");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eHandlersInvalid, "/PGM/CPU%u/RZ/Trap0e/Handlers/Invalid", "Number of traps due to access to invalid physical memory.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eUSNotPresentRead, "/PGM/CPU%u/RZ/Trap0e/Err/User/NPRead", "Number of user mode not present read page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eUSNotPresentWrite, "/PGM/CPU%u/RZ/Trap0e/Err/User/NPWrite", "Number of user mode not present write page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eUSWrite, "/PGM/CPU%u/RZ/Trap0e/Err/User/Write", "Number of user mode write page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eUSReserved, "/PGM/CPU%u/RZ/Trap0e/Err/User/Reserved", "Number of user mode reserved bit page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eUSNXE, "/PGM/CPU%u/RZ/Trap0e/Err/User/NXE", "Number of user mode NXE page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eUSRead, "/PGM/CPU%u/RZ/Trap0e/Err/User/Read", "Number of user mode read page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eSVNotPresentRead, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NPRead", "Number of supervisor mode not present read page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eSVNotPresentWrite, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NPWrite", "Number of supervisor mode not present write page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eSVWrite, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/Write", "Number of supervisor mode write page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eSVReserved, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/Reserved", "Number of supervisor mode reserved bit page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eSNXE, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NXE", "Number of supervisor mode NXE page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eGuestPF, "/PGM/CPU%u/RZ/Trap0e/GuestPF", "Number of real guest page faults.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eGuestPFUnh, "/PGM/CPU%u/RZ/Trap0e/GuestPF/Unhandled", "Number of real guest page faults from the 'unhandled' case.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eGuestPFMapping, "/PGM/CPU%u/RZ/Trap0e/GuestPF/InMapping", "Number of real guest page faults in a mapping.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eWPEmulInRZ, "/PGM/CPU%u/RZ/Trap0e/WP/InRZ", "Number of guest page faults due to X86_CR0_WP emulation.");
PGM_REG_COUNTER(&pPgmCpu->StatRZTrap0eWPEmulToR3, "/PGM/CPU%u/RZ/Trap0e/WP/ToR3", "Number of guest page faults due to X86_CR0_WP emulation (forward to R3 for emulation).");
STAMR3RegisterF(pVM, &pPgmCpu->StatRZTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
PGM_REG_COUNTER(&pPgmCpu->StatRZGuestCR3WriteHandled, "/PGM/CPU%u/RZ/CR3WriteHandled", "The number of times the Guest CR3 change was successfully handled.");
PGM_REG_COUNTER(&pPgmCpu->StatRZGuestCR3WriteUnhandled, "/PGM/CPU%u/RZ/CR3WriteUnhandled", "The number of times the Guest CR3 change was passed back to the recompiler.");
PGM_REG_COUNTER(&pPgmCpu->StatRZGuestCR3WriteConflict, "/PGM/CPU%u/RZ/CR3WriteConflict", "The number of times the Guest CR3 monitoring detected a conflict.");
PGM_REG_COUNTER(&pPgmCpu->StatRZGuestROMWriteHandled, "/PGM/CPU%u/RZ/ROMWriteHandled", "The number of times the Guest ROM change was successfully handled.");
PGM_REG_COUNTER(&pPgmCpu->StatRZGuestROMWriteUnhandled, "/PGM/CPU%u/RZ/ROMWriteUnhandled", "The number of times the Guest ROM change was passed back to the recompiler.");
PGM_REG_PROFILE(&pPgmCpu->StatRZSyncCR3, "/PGM/CPU%u/RZ/SyncCR3", "Profiling of the PGMSyncCR3() body.");
PGM_REG_PROFILE(&pPgmCpu->StatRZSyncCR3Handlers, "/PGM/CPU%u/RZ/SyncCR3/Handlers", "Profiling of the PGMSyncCR3() update handler section.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncCR3Global, "/PGM/CPU%u/RZ/SyncCR3/Global", "The number of global CR3 syncs.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncCR3NotGlobal, "/PGM/CPU%u/RZ/SyncCR3/NotGlobal", "The number of non-global CR3 syncs.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncCR3DstCacheHit, "/PGM/CPU%u/RZ/SyncCR3/DstChacheHit", "The number of times we got some kind of a cache hit.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncCR3DstFreed, "/PGM/CPU%u/RZ/SyncCR3/DstFreed", "The number of times we've had to free a shadow entry.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncCR3DstFreedSrcNP, "/PGM/CPU%u/RZ/SyncCR3/DstFreedSrcNP", "The number of times we've had to free a shadow entry for which the source entry was not present.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncCR3DstNotPresent, "/PGM/CPU%u/RZ/SyncCR3/DstNotPresent", "The number of times we've encountered a not present shadow entry for a present guest entry.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncCR3DstSkippedGlobalPD, "/PGM/CPU%u/RZ/SyncCR3/DstSkippedGlobalPD", "The number of times a global page directory wasn't flushed.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncCR3DstSkippedGlobalPT, "/PGM/CPU%u/RZ/SyncCR3/DstSkippedGlobalPT", "The number of times a page table with only global entries wasn't flushed.");
PGM_REG_PROFILE(&pPgmCpu->StatRZSyncPT, "/PGM/CPU%u/RZ/SyncPT", "Profiling of the pfnSyncPT() body.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncPTFailed, "/PGM/CPU%u/RZ/SyncPT/Failed", "The number of times pfnSyncPT() failed.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncPagePDNAs, "/PGM/CPU%u/RZ/SyncPagePDNAs", "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
PGM_REG_COUNTER(&pPgmCpu->StatRZSyncPagePDOutOfSync, "/PGM/CPU%u/RZ/SyncPagePDOutOfSync", "The number of time we've encountered an out-of-sync PD in SyncPage.");
PGM_REG_COUNTER(&pPgmCpu->StatRZAccessedPage, "/PGM/CPU%u/RZ/AccessedPage", "The number of pages marked not present for accessed bit emulation.");
PGM_REG_PROFILE(&pPgmCpu->StatRZDirtyBitTracking, "/PGM/CPU%u/RZ/DirtyPage", "Profiling the dirty bit tracking in CheckPageFault().");
PGM_REG_COUNTER(&pPgmCpu->StatRZDirtyPage, "/PGM/CPU%u/RZ/DirtyPage/Mark", "The number of pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pPgmCpu->StatRZDirtyPageBig, "/PGM/CPU%u/RZ/DirtyPage/MarkBig", "The number of 4MB pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pPgmCpu->StatRZDirtyPageSkipped, "/PGM/CPU%u/RZ/DirtyPage/Skipped", "The number of pages already dirty or readonly.");
PGM_REG_COUNTER(&pPgmCpu->StatRZDirtyPageTrap, "/PGM/CPU%u/RZ/DirtyPage/Trap", "The number of traps generated for dirty bit tracking.");
PGM_REG_COUNTER(&pPgmCpu->StatRZDirtyPageStale, "/PGM/CPU%u/RZ/DirtyPage/Stale", "The number of traps generated for dirty bit tracking (stale tlb entries).");
PGM_REG_COUNTER(&pPgmCpu->StatRZDirtiedPage, "/PGM/CPU%u/RZ/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses.");
PGM_REG_COUNTER(&pPgmCpu->StatRZDirtyTrackRealPF, "/PGM/CPU%u/RZ/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking.");
PGM_REG_COUNTER(&pPgmCpu->StatRZPageAlreadyDirty, "/PGM/CPU%u/RZ/DirtyPage/AlreadySet", "The number of pages already marked dirty because of write accesses.");
PGM_REG_PROFILE(&pPgmCpu->StatRZInvalidatePage, "/PGM/CPU%u/RZ/InvalidatePage", "PGMInvalidatePage() profiling.");
PGM_REG_COUNTER(&pPgmCpu->StatRZInvalidatePage4KBPages, "/PGM/CPU%u/RZ/InvalidatePage/4KBPages", "The number of times PGMInvalidatePage() was called for a 4KB page.");
PGM_REG_COUNTER(&pPgmCpu->StatRZInvalidatePage4MBPages, "/PGM/CPU%u/RZ/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page.");
PGM_REG_COUNTER(&pPgmCpu->StatRZInvalidatePage4MBPagesSkip, "/PGM/CPU%u/RZ/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
PGM_REG_COUNTER(&pPgmCpu->StatRZInvalidatePagePDMappings, "/PGM/CPU%u/RZ/InvalidatePage/PDMappings", "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
PGM_REG_COUNTER(&pPgmCpu->StatRZInvalidatePagePDNAs, "/PGM/CPU%u/RZ/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
PGM_REG_COUNTER(&pPgmCpu->StatRZInvalidatePagePDNPs, "/PGM/CPU%u/RZ/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory.");
PGM_REG_COUNTER(&pPgmCpu->StatRZInvalidatePagePDOutOfSync, "/PGM/CPU%u/RZ/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
PGM_REG_COUNTER(&pPgmCpu->StatRZInvalidatePageSkipped, "/PGM/CPU%u/RZ/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
PGM_REG_COUNTER(&pPgmCpu->StatRZPageOutOfSyncSupervisor, "/PGM/CPU%u/RZ/OutOfSync/SuperVisor", "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pPgmCpu->StatRZPageOutOfSyncUser, "/PGM/CPU%u/RZ/OutOfSync/User", "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pPgmCpu->StatRZPageOutOfSyncSupervisorWrite,"/PGM/CPU%u/RZ/OutOfSync/SuperVisorWrite", "Number of traps due to pages out of sync (RW) and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pPgmCpu->StatRZPageOutOfSyncUserWrite, "/PGM/CPU%u/RZ/OutOfSync/UserWrite", "Number of traps due to pages out of sync (RW) and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pPgmCpu->StatRZPageOutOfSyncBallloon, "/PGM/CPU%u/RZ/OutOfSync/Balloon", "The number of times a ballooned page was accessed (read).");
PGM_REG_PROFILE(&pPgmCpu->StatRZFlushTLB, "/PGM/CPU%u/RZ/FlushTLB", "Profiling of the PGMFlushTLB() body.");
PGM_REG_COUNTER(&pPgmCpu->StatRZFlushTLBNewCR3, "/PGM/CPU%u/RZ/FlushTLB/NewCR3", "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
PGM_REG_COUNTER(&pPgmCpu->StatRZFlushTLBNewCR3Global, "/PGM/CPU%u/RZ/FlushTLB/NewCR3Global", "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
PGM_REG_COUNTER(&pPgmCpu->StatRZFlushTLBSameCR3, "/PGM/CPU%u/RZ/FlushTLB/SameCR3", "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
PGM_REG_COUNTER(&pPgmCpu->StatRZFlushTLBSameCR3Global, "/PGM/CPU%u/RZ/FlushTLB/SameCR3Global", "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
PGM_REG_PROFILE(&pPgmCpu->StatRZGstModifyPage, "/PGM/CPU%u/RZ/GstModifyPage", "Profiling of the PGMGstModifyPage() body.");
PGM_REG_PROFILE(&pPgmCpu->StatR3SyncCR3, "/PGM/CPU%u/R3/SyncCR3", "Profiling of the PGMSyncCR3() body.");
PGM_REG_PROFILE(&pPgmCpu->StatR3SyncCR3Handlers, "/PGM/CPU%u/R3/SyncCR3/Handlers", "Profiling of the PGMSyncCR3() update handler section.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncCR3Global, "/PGM/CPU%u/R3/SyncCR3/Global", "The number of global CR3 syncs.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncCR3NotGlobal, "/PGM/CPU%u/R3/SyncCR3/NotGlobal", "The number of non-global CR3 syncs.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncCR3DstCacheHit, "/PGM/CPU%u/R3/SyncCR3/DstChacheHit", "The number of times we got some kind of a cache hit.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncCR3DstFreed, "/PGM/CPU%u/R3/SyncCR3/DstFreed", "The number of times we've had to free a shadow entry.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncCR3DstFreedSrcNP, "/PGM/CPU%u/R3/SyncCR3/DstFreedSrcNP", "The number of times we've had to free a shadow entry for which the source entry was not present.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncCR3DstNotPresent, "/PGM/CPU%u/R3/SyncCR3/DstNotPresent", "The number of times we've encountered a not present shadow entry for a present guest entry.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncCR3DstSkippedGlobalPD, "/PGM/CPU%u/R3/SyncCR3/DstSkippedGlobalPD", "The number of times a global page directory wasn't flushed.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncCR3DstSkippedGlobalPT, "/PGM/CPU%u/R3/SyncCR3/DstSkippedGlobalPT", "The number of times a page table with only global entries wasn't flushed.");
PGM_REG_PROFILE(&pPgmCpu->StatR3SyncPT, "/PGM/CPU%u/R3/SyncPT", "Profiling of the pfnSyncPT() body.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncPTFailed, "/PGM/CPU%u/R3/SyncPT/Failed", "The number of times pfnSyncPT() failed.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncPagePDNAs, "/PGM/CPU%u/R3/SyncPagePDNAs", "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
PGM_REG_COUNTER(&pPgmCpu->StatR3SyncPagePDOutOfSync, "/PGM/CPU%u/R3/SyncPagePDOutOfSync", "The number of time we've encountered an out-of-sync PD in SyncPage.");
PGM_REG_COUNTER(&pPgmCpu->StatR3AccessedPage, "/PGM/CPU%u/R3/AccessedPage", "The number of pages marked not present for accessed bit emulation.");
PGM_REG_PROFILE(&pPgmCpu->StatR3DirtyBitTracking, "/PGM/CPU%u/R3/DirtyPage", "Profiling the dirty bit tracking in CheckPageFault().");
PGM_REG_COUNTER(&pPgmCpu->StatR3DirtyPage, "/PGM/CPU%u/R3/DirtyPage/Mark", "The number of pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pPgmCpu->StatR3DirtyPageBig, "/PGM/CPU%u/R3/DirtyPage/MarkBig", "The number of 4MB pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pPgmCpu->StatR3DirtyPageSkipped, "/PGM/CPU%u/R3/DirtyPage/Skipped", "The number of pages already dirty or readonly.");
PGM_REG_COUNTER(&pPgmCpu->StatR3DirtyPageTrap, "/PGM/CPU%u/R3/DirtyPage/Trap", "The number of traps generated for dirty bit tracking.");
PGM_REG_COUNTER(&pPgmCpu->StatR3DirtiedPage, "/PGM/CPU%u/R3/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses.");
PGM_REG_COUNTER(&pPgmCpu->StatR3DirtyTrackRealPF, "/PGM/CPU%u/R3/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking.");
PGM_REG_COUNTER(&pPgmCpu->StatR3PageAlreadyDirty, "/PGM/CPU%u/R3/DirtyPage/AlreadySet", "The number of pages already marked dirty because of write accesses.");
PGM_REG_PROFILE(&pPgmCpu->StatR3InvalidatePage, "/PGM/CPU%u/R3/InvalidatePage", "PGMInvalidatePage() profiling.");
PGM_REG_COUNTER(&pPgmCpu->StatR3InvalidatePage4KBPages, "/PGM/CPU%u/R3/InvalidatePage/4KBPages", "The number of times PGMInvalidatePage() was called for a 4KB page.");
PGM_REG_COUNTER(&pPgmCpu->StatR3InvalidatePage4MBPages, "/PGM/CPU%u/R3/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page.");
PGM_REG_COUNTER(&pPgmCpu->StatR3InvalidatePage4MBPagesSkip, "/PGM/CPU%u/R3/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
PGM_REG_COUNTER(&pPgmCpu->StatR3InvalidatePagePDMappings, "/PGM/CPU%u/R3/InvalidatePage/PDMappings", "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
PGM_REG_COUNTER(&pPgmCpu->StatR3InvalidatePagePDNAs, "/PGM/CPU%u/R3/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
PGM_REG_COUNTER(&pPgmCpu->StatR3InvalidatePagePDNPs, "/PGM/CPU%u/R3/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory.");
PGM_REG_COUNTER(&pPgmCpu->StatR3InvalidatePagePDOutOfSync, "/PGM/CPU%u/R3/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
PGM_REG_COUNTER(&pPgmCpu->StatR3InvalidatePageSkipped, "/PGM/CPU%u/R3/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
PGM_REG_COUNTER(&pPgmCpu->StatR3PageOutOfSyncSupervisor, "/PGM/CPU%u/R3/OutOfSync/SuperVisor", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pPgmCpu->StatR3PageOutOfSyncUser, "/PGM/CPU%u/R3/OutOfSync/User", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pPgmCpu->StatR3PageOutOfSyncBallloon, "/PGM/CPU%u/R3/OutOfSync/Balloon", "The number of times a ballooned page was accessed (read).");
PGM_REG_PROFILE(&pPgmCpu->StatR3FlushTLB, "/PGM/CPU%u/R3/FlushTLB", "Profiling of the PGMFlushTLB() body.");
PGM_REG_COUNTER(&pPgmCpu->StatR3FlushTLBNewCR3, "/PGM/CPU%u/R3/FlushTLB/NewCR3", "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
PGM_REG_COUNTER(&pPgmCpu->StatR3FlushTLBNewCR3Global, "/PGM/CPU%u/R3/FlushTLB/NewCR3Global", "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
PGM_REG_COUNTER(&pPgmCpu->StatR3FlushTLBSameCR3, "/PGM/CPU%u/R3/FlushTLB/SameCR3", "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
PGM_REG_COUNTER(&pPgmCpu->StatR3FlushTLBSameCR3Global, "/PGM/CPU%u/R3/FlushTLB/SameCR3Global", "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
PGM_REG_PROFILE(&pPgmCpu->StatR3GstModifyPage, "/PGM/CPU%u/R3/GstModifyPage", "Profiling of the PGMGstModifyPage() body.");
int rc;
&& (pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT))
AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT));
return rc;
int rc;
pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTRC + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);
pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
return rc;
LogFlow(("PGMR3Relocate %RGv to %RGv\n", pVM->pgm.s.GCPtrCR3Mapping, pVM->pgm.s.GCPtrCR3Mapping + offDelta));
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, true, pgmR3RelocatePhysHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->VirtHandlers, true, pgmR3RelocateVirtHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3RelocateHyperVirtHandler, &offDelta);
int rc;
#ifdef DEBUG
#ifdef VBOX_STRICT
static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
if (pszArgs)
fGuest = true;
fShadow = true;
fHost = true;
if (fGuest)
if (fShadow)
pHlp->pfnPrintf(pHlp, "Shadow paging mode: %s\n", PGMGetModeName(pVM->aCpus[0].pgm.s.enmShadowMode));
if (fHost)
const char *psz;
pVM,
Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
iPD,
iPD,
return rc;
switch (pgmMode)
case PGMMODE_PAE:
case PGMMODE_AMD64:
int rc;
pVM->pgm.s.paModeData = (PPGMMODEDATA)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMMODEDATA) * pgmModeDataMaxIndex());
#ifdef VBOX_WITH_64_BITS_GUESTS
#ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_32_BIT:
# ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
# ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
# ifdef VBOX_WITH_64_BITS_GUESTS
AssertFailed();
#ifdef VBOX_WITH_64_BITS_GUESTS
return VINF_SUCCESS;
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher)
switch (enmGuestMode)
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifdef DEBUG_bird
case PGMMODE_32_BIT:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifdef DEBUG_bird
case PGMMODE_PAE:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
return PGMMODE_INVALID;
return enmShadowMode;
Log(("PGMR3ChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
PGMMODE enmShadowMode = pgmR3CalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode, &enmSwitcher);
#ifdef VBOX_WITH_RAW_MODE
return rc;
/* The nested shadow paging mode for AMD-V does change when running 64 bits guests on 32 bits hosts; typically PAE <-> AMD64 */
const bool fForceShwEnterExit = false;
LogFlow(("PGMR3ChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
return rc;
LogFlow(("PGMR3ChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
return rc;
int rc;
switch (enmShadowMode)
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
return VERR_INTERNAL_ERROR;
return rc;
switch (enmGuestMode)
case PGMMODE_REAL:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_PROTECTED:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_32_BIT:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_PAE_NX:
case PGMMODE_PAE:
N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (General/Advanced)"));
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_32_BIT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
#ifdef VBOX_WITH_64_BITS_GUESTS
case PGMMODE_AMD64_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
default: AssertFailed(); break;
return rc;
/* Exit the current shadow paging mode as well; nested paging and EPT use a root CR3 which will get flushed here. */
return rc;
("%RHp != %RHp %s\n", (RTHCPHYS)CPUMGetHyperCR3(pVCpu), PGMGetHyperCR3(pVCpu), PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
return rc;
static int pgmR3DumpHierarchyHCPaePT(PVM pVM, PX86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
return VINF_SUCCESS;
static int pgmR3DumpHierarchyHCPaePD(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPD)
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory at HCPhys=%RHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
pHlp->pfnPrintf(pHlp, "%0*llx error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
if (pPT)
return rc;
static int pgmR3DumpHierarchyHCPaePDPT(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPDPT)
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory pointer table at HCPhys=%RHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
if (fLongMode)
i << X86_PDPT_SHIFT,
int rc2 = pgmR3DumpHierarchyHCPaePD(pVM, Pdpe.u & X86_PDPE_PG_MASK, u64Address + ((uint64_t)i << X86_PDPT_SHIFT),
return rc;
static int pgmR3DumpHierarchyHcPaePML4(PVM pVM, RTHCPHYS HCPhys, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPML4)
return VERR_INVALID_PARAMETER;
uint64_t u64Address = ((uint64_t)i << X86_PML4_SHIFT) | (((uint64_t)i >> (X86_PML4_SHIFT - X86_PDPT_SHIFT - 1)) * 0xffff000000000000ULL);
int rc2 = pgmR3DumpHierarchyHCPaePDPT(pVM, Pml4e.u & X86_PML4E_PG_MASK, u64Address, cr4, true, cMaxDepth - 1, pHlp);
return rc;
return VINF_SUCCESS;
int pgmR3DumpHierarchyHC32BitPD(PVM pVM, uint32_t cr3, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPD)
pHlp->pfnPrintf(pHlp, "Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK);
return VERR_INVALID_PARAMETER;
pHlp->pfnPrintf(pHlp, "%08x error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
if (pPT)
pHlp->pfnPrintf(pHlp, "%08x error! Page table at %#x was not found in the page pool!\n", u32Address, HCPhys);
return rc;
Log(("Found %RGp at %RGv -> flags=%llx\n", PhysSearch, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), fPageShw));
return VINF_SUCCESS;
bool fLongMode = false;
return VERR_INVALID_PARAMETER;
if (pPT)
return rc;
VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint64_t cr3, uint64_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pHlp)
if (!cMaxDepth)
return VINF_SUCCESS;
if (fLongMode)
return pgmR3DumpHierarchyHCPaePDPT(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, 0, cr4, false, cMaxDepth, pHlp);
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
if (!cArgs)
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
#ifdef VBOX_STRICT
static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
unsigned cBallooned = 0;
unsigned cShared = 0;
unsigned cZero = 0;
unsigned cUnique = 0;
unsigned cDuplicate = 0;
while (cLeft-- > 0)
case PGM_PAGE_STATE_ZERO:
cZero++;
case PGM_PAGE_STATE_BALLOONED:
cBallooned++;
case PGM_PAGE_STATE_SHARED:
cShared++;
case PGM_PAGE_STATE_ALLOCATED:
cDuplicate++;
cUnique++;
AssertFailed();
pPage++;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: Invalid 2nd argument '%s', must be 'nozero'.\n", paArgs[1].u.pszString);
int rc = RTFileOpen(&hFile, paArgs[0].u.pszString, RTFILE_O_WRITE | RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_WRITE);
return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileOpen(,'%s',) -> %Rrc.\n", paArgs[0].u.pszString, rc);
if (fIncZeroPgs)
case PGMPAGETYPE_RAM:
case PGMPAGETYPE_ROM:
case PGMPAGETYPE_MMIO2:
void const *pvPage;
pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: PGMPhysGCPhys2CCPtrReadOnly -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
AssertFailed();
case PGMPAGETYPE_MMIO:
if (fIncZeroPgs)
pPage++;
return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Successfully saved physical memory to '%s'.\n", paArgs[0].u.pszString);
return VINF_SUCCESS;
typedef struct PGMCHECKINTARGS
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys->Core.KeyLast > pCur->Core.Key),
pArgs->pPrevPhys, pArgs->pPrevPhys->Core.Key, pArgs->pPrevPhys->Core.KeyLast, pArgs->pPrevPhys->pszDesc,
static DECLCALLBACK(int) pgmR3CheckIntegrityVirtHandlerNode(PAVLROGCPTRNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGv-%RGv %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevVirt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevVirt->Core.KeyLast > pCur->Core.Key),
pArgs->pPrevVirt, pArgs->pPrevVirt->Core.Key, pArgs->pPrevVirt->Core.KeyLast, pArgs->pPrevVirt->pszDesc,
AssertReleaseMsg(pCur->aPhysToVirt[iPage].offVirtHandler == -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage]),
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysToVirtHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp\n", pCur, pCur->Core.Key, pCur->Core.KeyLast));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
AssertReleaseMsg((pCur->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD),
pCur2 = (PPGMPHYS2VIRTHANDLER)((intptr_t)pCur + (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
AssertReleaseMsg((pCur2->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == PGMPHYS2VIRTHANDLER_IN_TREE,
int cErrors = 0;
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, true, pgmR3CheckIntegrityPhysHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, false, pgmR3CheckIntegrityPhysHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, true, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, false, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);