PGM.cpp revision 3f1e0eea71cabeb90529e546f16eb7aee513fde9
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Copyright (C) 2006-2010 Oracle Corporation
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * available from http://www.virtualbox.org. This file is free software;
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * you can redistribute it and/or modify it under the terms of the GNU
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * General Public License (GPL) as published by the Free Software
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/** @page pg_pgm PGM - The Page Manager and Monitor
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @see grp_pgm,
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @ref pg_pgm_pool,
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @ref pg_pgm_phys.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @section sec_pgm_modes Paging Modes
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * There are three memory contexts: Host Context (HC), Guest Context (GC)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * and intermediate context. When talking about paging HC can also be refered to
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * as "host paging", and GC refered to as "shadow paging".
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * is defined by the host operating system. The mode used in the shadow paging mode
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * depends on the host paging mode and what the mode the guest is currently in. The
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * following relation between the two is defined:
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @verbatim
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync Host > 32-bit | PAE | AMD64 |
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync Guest | | | |
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync ==v================================
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync 32-bit 32-bit PAE PAE
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync -------|--------|--------|--------|
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync PAE PAE PAE PAE
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync -------|--------|--------|--------|
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync AMD64 AMD64 AMD64 AMD64
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync -------|--------|--------|--------| @endverbatim
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * All configuration except those in the diagonal (upper left) are expected to
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * require special effort from the switcher (i.e. a bit slower).
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @section sec_pgm_shw The Shadow Memory Context
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Because of guest context mappings requires PDPT and PML4 entries to allow
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * writing on AMD64, the two upper levels will have fixed flags whatever the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * guest is thinking of using there. So, when shadowing the PD level we will
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * calculate the effective flags of PD and all the higher levels. In legacy
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * PAE mode this only applies to the PWT and PCD bits (the rest are
956230513f1e35e9d694c219c7009034b71bb836vboxsync * ignored/reserved/MBZ). We will ignore those bits for the present.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @section sec_pgm_int The Intermediate Memory Context
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * The world switch goes thru an intermediate memory context which purpose it is
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * to provide different mappings of the switcher code. All guest mappings are also
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * present in this context.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * The switcher code is mapped at the same location as on the host, at an
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * identity mapped location (physical equals virtual address), and at the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * hypervisor location. The identity mapped location is for when the world
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * switches that involves disabling paging.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * simplifies switching guest CPU mode and consistency at the cost of more
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * code to do the work. All memory use for those page tables is located below
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * 4GB (this includes page tables for guest context mappings).
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @subsection subsec_pgm_int_gc Guest Context Mappings
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * During assignment and relocation of a guest context mapping the intermediate
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * memory context is used to verify the new location.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Guest context mappings are currently restricted to below 4GB, for reasons
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * of simplicity. This may change when we implement AMD64 support.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @section sec_pgm_misc Misc
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @subsection subsec_pgm_misc_diff Differences Between Legacy PAE and Long Mode PAE
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * The differences between legacy PAE and long mode PAE are:
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * usual meanings while 6 is ignored (AMD). This means that upon switching to
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * legacy PAE mode we'll have to clear these bits and when going to long mode
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * they must be set. This applies to both intermediate and shadow contexts,
956230513f1e35e9d694c219c7009034b71bb836vboxsync * however we don't need to do it for the intermediate one since we're
956230513f1e35e9d694c219c7009034b71bb836vboxsync * executing with CR0.WP at that time.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
956230513f1e35e9d694c219c7009034b71bb836vboxsync * a page aligned one is required.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @section sec_pgm_handlers Access Handlers
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Placeholder.
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * @subsection sec_pgm_handlers_virt Virtual Access Handlers
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * Placeholder.
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * @subsection sec_pgm_handlers_virt Virtual Access Handlers
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * We currently implement three types of virtual access handlers: ALL, WRITE
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * and HYPERVISOR (WRITE). See PGMVIRTHANDLERTYPE for some more details.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * The HYPERVISOR access handlers is kept in a separate tree since it doesn't apply
956230513f1e35e9d694c219c7009034b71bb836vboxsync * to physical pages (PGMTREES::HyperVirtHandlers) and only needs to be consulted in
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * a special \#PF case. The ALL and WRITE are in the PGMTREES::VirtHandlers tree, the
956230513f1e35e9d694c219c7009034b71bb836vboxsync * rest of this section is going to be about these handlers.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * We'll go thru the life cycle of a handler and try make sense of it all, don't know
956230513f1e35e9d694c219c7009034b71bb836vboxsync * how successfull this is gonna be...
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 1. A handler is registered thru the PGMR3HandlerVirtualRegister and
956230513f1e35e9d694c219c7009034b71bb836vboxsync * PGMHandlerVirtualRegisterEx APIs. We check for conflicting virtual handlers
956230513f1e35e9d694c219c7009034b71bb836vboxsync * and create a new node that is inserted into the AVL tree (range key). Then
956230513f1e35e9d694c219c7009034b71bb836vboxsync * a full PGM resync is flagged (clear pool, sync cr3, update virtual bit of PGMPAGE).
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 2. The following PGMSyncCR3/SyncCR3 operation will first make invoke HandlerVirtualUpdate.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 2a. HandlerVirtualUpdate will will lookup all the pages covered by virtual handlers
956230513f1e35e9d694c219c7009034b71bb836vboxsync * via the current guest CR3 and update the physical page -> virtual handler
956230513f1e35e9d694c219c7009034b71bb836vboxsync * translation. Needless to say, this doesn't exactly scale very well. If any changes
956230513f1e35e9d694c219c7009034b71bb836vboxsync * are detected, it will flag a virtual bit update just like we did on registration.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * PGMPHYS pages with changes will have their virtual handler state reset to NONE.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 2b. The virtual bit update process will iterate all the pages covered by all the
956230513f1e35e9d694c219c7009034b71bb836vboxsync * virtual handlers and update the PGMPAGE virtual handler state to the max of all
956230513f1e35e9d694c219c7009034b71bb836vboxsync * virtual handlers on that page.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 2c. Back in SyncCR3 we will now flush the entire shadow page cache to make sure
956230513f1e35e9d694c219c7009034b71bb836vboxsync * we don't miss any alias mappings of the monitored pages.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 2d. SyncCR3 will then proceed with syncing the CR3 table.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 3. \#PF(np,read) on a page in the range. This will cause it to be synced
956230513f1e35e9d694c219c7009034b71bb836vboxsync * read-only and resumed if it's a WRITE handler. If it's an ALL handler we
956230513f1e35e9d694c219c7009034b71bb836vboxsync * will call the handlers like in the next step. If the physical mapping has
956230513f1e35e9d694c219c7009034b71bb836vboxsync * changed we will - some time in the future - perform a handler callback
956230513f1e35e9d694c219c7009034b71bb836vboxsync * (optional) and update the physical -> virtual handler cache.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 4. \#PF(,write) on a page in the range. This will cause the handler to
956230513f1e35e9d694c219c7009034b71bb836vboxsync * be invoked.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 5. The guest invalidates the page and changes the physical backing or
956230513f1e35e9d694c219c7009034b71bb836vboxsync * unmaps it. This should cause the invalidation callback to be invoked
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * (it might not yet be 100% perfect). Exactly what happens next... is
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * this where we mess up and end up out of sync for a while?
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * 6. The handler is deregistered by the client via PGMHandlerVirtualDeregister.
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * We will then set all PGMPAGEs in the physical -> virtual handler cache for
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * this handler to NONE and trigger a full PGM resync (basically the same
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * as int step 1). Which means 2 is executed again.
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * @subsubsection sub_sec_pgm_handler_virt_todo TODOs
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * There is a bunch of things that needs to be done to make the virtual handlers
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync * work 100% correctly and work more efficiently.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The first bit hasn't been implemented yet because it's going to slow the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * whole mess down even more, and besides it seems to be working reliably for
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * our current uses. OTOH, some of the optimizations might end up more or less
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * implementing the missing bits, so we'll see.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * On the optimization side, the first thing to do is to try avoid unnecessary
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * cache flushing. Then try team up with the shadowing code to track changes
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * in mappings by means of access to them (shadow in), updates to shadows pages,
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * invlpg, and shadow PT discarding (perhaps).
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Some idea that have popped up for optimization for current and new features:
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - bitmap indicating where there are virtual handlers installed.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * (4KB => 2**20 pages, page 2**12 => covers 32-bit address space 1:1!)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - Further optimize this by min/max (needs min/max avl getters).
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - Shadow page table entry bit (if any left)?
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/** @page pg_pgm_phys PGM Physical Guest Memory Management
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Objectives:
7cd65b78ef52a960d2237ca56abb05385a12217cvboxsync * - Guest RAM over-commitment using memory ballooning,
7cd65b78ef52a960d2237ca56abb05385a12217cvboxsync * zero pages and general page sharing.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * - Moving or mirroring a VM onto a different physical machine.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @subsection subsec_pgmPhys_Definitions Definitions
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Allocation chunk - A RTR0MemObjAllocPhysNC object and the tracking
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * machinery assoicated with it.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @subsection subsec_pgmPhys_AllocPage Allocating a page.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Initially we map *all* guest memory to the (per VM) zero page, which
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * means that none of the read functions will cause pages to be allocated.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Exception, access bit in page tables that have been shared. This must
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * be handled, but we must also make sure PGMGst*Modify doesn't make
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * unnecessary modifications.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Allocation points:
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - PGMPhysSimpleWriteGCPhys and PGMPhysWrite.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - Replacing a zero page mapping at \#PF.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - Replacing a shared page mapping at \#PF.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - ROM registration (currently MMR3RomRegister).
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - VM restore (pgmR3Load).
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * For the first three it would make sense to keep a few pages handy
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * until we've reached the max memory commitment for the VM.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * For the ROM registration, we know exactly how many pages we need
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * and will request these from ring-0. For restore, we will save
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * the number of non-zero pages in the saved state and allocate
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * them up front. This would allow the ring-0 component to refuse
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * the request if the isn't sufficient memory available for VM use.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Btw. for both ROM and restore allocations we won't be requiring
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * zeroed pages as they are going to be filled instantly.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @subsection subsec_pgmPhys_FreePage Freeing a page
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * There are a few points where a page can be freed:
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - After being replaced by the zero page.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - After being replaced by a shared page.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - After being ballooned by the guest additions.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - At reset.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * - At restore.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * When freeing one or more pages they will be returned to the ring-0
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * component and replaced by the zero page.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * The reasoning for clearing out all the pages on reset is that it will
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * return us to the exact same state as on power on, and may thereby help
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * us reduce the memory load on the system. Further it might have a
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * (temporary) positive influence on memory fragmentation (@see subsec_pgmPhys_Fragmentation).
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * On restore, as mention under the allocation topic, pages should be
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * freed / allocated depending on how many is actually required by the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * new VM state. The simplest approach is to do like on reset, and free
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * all non-ROM pages and then allocate what we need.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * A measure to prevent some fragmentation, would be to let each allocation
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * chunk have some affinity towards the VM having allocated the most pages
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * from it. Also, try make sure to allocate from allocation chunks that
666edc71e2906904545617ad6fae769f7d0bbf08vboxsync * are almost full. Admittedly, both these measures might work counter to
666edc71e2906904545617ad6fae769f7d0bbf08vboxsync * our intentions and its probably not worth putting a lot of effort,
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * cpu time or memory into this.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @subsection subsec_pgmPhys_SharePage Sharing a page
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * The basic idea is that there there will be a idle priority kernel
956230513f1e35e9d694c219c7009034b71bb836vboxsync * thread walking the non-shared VM pages hashing them and looking for
956230513f1e35e9d694c219c7009034b71bb836vboxsync * pages with the same checksum. If such pages are found, it will compare
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * them byte-by-byte to see if they actually are identical. If found to be
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * identical it will allocate a shared page, copy the content, check that
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * the page didn't change while doing this, and finally request both the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * VMs to use the shared page instead. If the page is all zeros (special
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * checksum and byte-by-byte check) it will request the VM that owns it
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * to replace it with the zero page.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * To make this efficient, we will have to make sure not to try share a page
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * that will change its contents soon. This part requires the most work.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * A simple idea would be to request the VM to write monitor the page for
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * a while to make sure it isn't modified any time soon. Also, it may
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * make sense to skip pages that are being write monitored since this
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * information is readily available to the thread if it works on the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * per-VM guest memory structures (presently called PGMRAMRANGE).
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @subsection subsec_pgmPhys_Fragmentation Fragmentation Concerns and Counter Measures
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * The pages are organized in allocation chunks in ring-0, this is a necessity
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * if we wish to have an OS agnostic approach to this whole thing. (On Linux we
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * could easily work on a page-by-page basis if we liked. Whether this is possible
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * or efficient on NT I don't quite know.) Fragmentation within these chunks may
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * become a problem as part of the idea here is that we wish to return memory to
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * the host system.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * For instance, starting two VMs at the same time, they will both allocate the
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * guest memory on-demand and if permitted their page allocations will be
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * intermixed. Shut down one of the two VMs and it will be difficult to return
956230513f1e35e9d694c219c7009034b71bb836vboxsync * any memory to the host system because the page allocation for the two VMs are
956230513f1e35e9d694c219c7009034b71bb836vboxsync * mixed up in the same allocation chunks.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * To further complicate matters, when pages are freed because they have been
956230513f1e35e9d694c219c7009034b71bb836vboxsync * ballooned or become shared/zero the whole idea is that the page is supposed
956230513f1e35e9d694c219c7009034b71bb836vboxsync * to be reused by another VM or returned to the host system. This will cause
956230513f1e35e9d694c219c7009034b71bb836vboxsync * allocation chunks to contain pages belonging to different VMs and prevent
bd7c18002f48884a132bb0967408b6111dec326evboxsync * returning memory to the host when one of those VM shuts down.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The only way to really deal with this problem is to move pages. This can
956230513f1e35e9d694c219c7009034b71bb836vboxsync * either be done at VM shutdown and or by the idle priority worker thread
956230513f1e35e9d694c219c7009034b71bb836vboxsync * that will be responsible for finding sharable/zero pages. The mechanisms
956230513f1e35e9d694c219c7009034b71bb836vboxsync * involved for coercing a VM to move a page (or to do it for it) will be
956230513f1e35e9d694c219c7009034b71bb836vboxsync * the same as when telling it to share/zero a page.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @subsection subsec_pgmPhys_Tracking Tracking Structures And Their Cost
956230513f1e35e9d694c219c7009034b71bb836vboxsync * There's a difficult balance between keeping the per-page tracking structures
956230513f1e35e9d694c219c7009034b71bb836vboxsync * (global and guest page) easy to use and keeping them from eating too much
bd7c18002f48884a132bb0967408b6111dec326evboxsync * memory. We have limited virtual memory resources available when operating in
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 32-bit kernel space (on 64-bit there'll it's quite a different story). The
956230513f1e35e9d694c219c7009034b71bb836vboxsync * tracking structures will be attemted designed such that we can deal with up
956230513f1e35e9d694c219c7009034b71bb836vboxsync * to 32GB of memory on a 32-bit system and essentially unlimited on 64-bit ones.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @subsubsection subsubsec_pgmPhys_Tracking_Kernel Kernel Space
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @see pg_GMM
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @subsubsection subsubsec_pgmPhys_Tracking_PerVM Per-VM
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Fixed info is the physical address of the page (HCPhys) and the page id
956230513f1e35e9d694c219c7009034b71bb836vboxsync * (described above). Theoretically we'll need 48(-12) bits for the HCPhys part.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Today we've restricting ourselves to 40(-12) bits because this is the current
956230513f1e35e9d694c219c7009034b71bb836vboxsync * restrictions of all AMD64 implementations (I think Barcelona will up this
956230513f1e35e9d694c219c7009034b71bb836vboxsync * to 48(-12) bits, not that it really matters) and I needed the bits for
956230513f1e35e9d694c219c7009034b71bb836vboxsync * tracking mappings of a page. 48-12 = 36. That leaves 28 bits, which means a
956230513f1e35e9d694c219c7009034b71bb836vboxsync * decent range for the page id: 2^(28+12) = 1024TB.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * In additions to these, we'll have to keep maintaining the page flags as we
956230513f1e35e9d694c219c7009034b71bb836vboxsync * currently do. Although it wouldn't harm to optimize these quite a bit, like
956230513f1e35e9d694c219c7009034b71bb836vboxsync * for instance the ROM shouldn't depend on having a write handler installed
956230513f1e35e9d694c219c7009034b71bb836vboxsync * in order for it to become read-only. A RO/RW bit should be considered so
956230513f1e35e9d694c219c7009034b71bb836vboxsync * that the page syncing code doesn't have to mess about checking multiple
956230513f1e35e9d694c219c7009034b71bb836vboxsync * flag combinations (ROM || RW handler || write monitored) in order to
956230513f1e35e9d694c219c7009034b71bb836vboxsync * figure out how to setup a shadow PTE. But this of course, is second
956230513f1e35e9d694c219c7009034b71bb836vboxsync * priority at present. Current this requires 12 bits, but could probably
956230513f1e35e9d694c219c7009034b71bb836vboxsync * be optimized to ~8.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Then there's the 24 bits used to track which shadow page tables are
956230513f1e35e9d694c219c7009034b71bb836vboxsync * currently mapping a page for the purpose of speeding up physical
956230513f1e35e9d694c219c7009034b71bb836vboxsync * access handlers, and thereby the page pool cache. More bit for this
956230513f1e35e9d694c219c7009034b71bb836vboxsync * purpose wouldn't hurt IIRC.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Then there is a new bit in which we need to record what kind of page
956230513f1e35e9d694c219c7009034b71bb836vboxsync * this is, shared, zero, normal or write-monitored-normal. This'll
956230513f1e35e9d694c219c7009034b71bb836vboxsync * require 2 bits. One bit might be needed for indicating whether a
956230513f1e35e9d694c219c7009034b71bb836vboxsync * write monitored page has been written to. And yet another one or
956230513f1e35e9d694c219c7009034b71bb836vboxsync * two for tracking migration status. 3-4 bits total then.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Whatever is left will can be used to record the sharabilitiy of a
956230513f1e35e9d694c219c7009034b71bb836vboxsync * page. The page checksum will not be stored in the per-VM table as
956230513f1e35e9d694c219c7009034b71bb836vboxsync * the idle thread will not be permitted to do modifications to it.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * It will instead have to keep its own working set of potentially
956230513f1e35e9d694c219c7009034b71bb836vboxsync * shareable pages and their check sums and stuff.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * For the present we'll keep the current packing of the
956230513f1e35e9d694c219c7009034b71bb836vboxsync * PGMRAMRANGE::aHCPhys to keep the changes simple, only of course,
956230513f1e35e9d694c219c7009034b71bb836vboxsync * we'll have to change it to a struct with a total of 128-bits at
956230513f1e35e9d694c219c7009034b71bb836vboxsync * our disposal.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The initial layout will be like this:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @verbatim
956230513f1e35e9d694c219c7009034b71bb836vboxsync RTHCPHYS HCPhys; The current stuff.
956230513f1e35e9d694c219c7009034b71bb836vboxsync 63:40 Current shadow PT tracking stuff.
956230513f1e35e9d694c219c7009034b71bb836vboxsync 39:12 The physical page frame number.
956230513f1e35e9d694c219c7009034b71bb836vboxsync 11:0 The current flags.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u28PageId : 28; The page id.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u2State : 2; The page state { zero, shared, normal, write monitored }.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t fWrittenTo : 1; Whether a write monitored page was written to.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u1Reserved : 1; Reserved for later.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u32Reserved; Reserved for later, mostly sharing stats.
956230513f1e35e9d694c219c7009034b71bb836vboxsync @endverbatim
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The final layout will be something like this:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @verbatim
956230513f1e35e9d694c219c7009034b71bb836vboxsync RTHCPHYS HCPhys; The current stuff.
956230513f1e35e9d694c219c7009034b71bb836vboxsync 63:48 High page id (12+).
956230513f1e35e9d694c219c7009034b71bb836vboxsync 47:12 The physical page frame number.
956230513f1e35e9d694c219c7009034b71bb836vboxsync 11:0 Low page id.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t fReadOnly : 1; Whether it's readonly page (rom or monitored in some way).
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u3Type : 3; The page type {RESERVED, MMIO, MMIO2, ROM, shadowed ROM, RAM}.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u2PhysMon : 2; Physical access handler type {none, read, write, all}.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u2VirtMon : 2; Virtual access handler type {none, read, write, all}..
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u2State : 2; The page state { zero, shared, normal, write monitored }.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t fWrittenTo : 1; Whether a write monitored page was written to.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u20Reserved : 20; Reserved for later, mostly sharing stats.
956230513f1e35e9d694c219c7009034b71bb836vboxsync uint32_t u32Tracking; The shadow PT tracking stuff, roughly.
956230513f1e35e9d694c219c7009034b71bb836vboxsync @endverbatim
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Cost wise, this means we'll double the cost for guest memory. There isn't anyway
956230513f1e35e9d694c219c7009034b71bb836vboxsync * around that I'm afraid. It means that the cost of dealing out 32GB of memory
956230513f1e35e9d694c219c7009034b71bb836vboxsync * to one or more VMs is: (32GB >> PAGE_SHIFT) * 16 bytes, or 128MBs. Or another
956230513f1e35e9d694c219c7009034b71bb836vboxsync * example, the VM heap cost when assigning 1GB to a VM will be: 4MB.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * A couple of cost examples for the total cost per-VM + kernel.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 32-bit Windows and 32-bit linux:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 1GB guest ram, 256K pages: 4MB + 2MB(+) = 6MB
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 4GB guest ram, 1M pages: 16MB + 8MB(+) = 24MB
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 32GB guest ram, 8M pages: 128MB + 64MB(+) = 192MB
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 64-bit Windows and 64-bit linux:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 1GB guest ram, 256K pages: 4MB + 3MB(+) = 7MB
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 4GB guest ram, 1M pages: 16MB + 12MB(+) = 28MB
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 32GB guest ram, 8M pages: 128MB + 96MB(+) = 224MB
956230513f1e35e9d694c219c7009034b71bb836vboxsync * UPDATE - 2007-09-27:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Will need a ballooned flag/state too because we cannot
956230513f1e35e9d694c219c7009034b71bb836vboxsync * trust the guest 100% and reporting the same page as ballooned more
956230513f1e35e9d694c219c7009034b71bb836vboxsync * than once will put the GMM off balance.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @subsection subsec_pgmPhys_Serializing Serializing Access
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Initially, we'll try a simple scheme:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * - The per-VM RAM tracking structures (PGMRAMRANGE) is only modified
956230513f1e35e9d694c219c7009034b71bb836vboxsync * by the EMT thread of that VM while in the pgm critsect.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * - Other threads in the VM process that needs to make reliable use of
956230513f1e35e9d694c219c7009034b71bb836vboxsync * the per-VM RAM tracking structures will enter the critsect.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * - No process external thread or kernel thread will ever try enter
956230513f1e35e9d694c219c7009034b71bb836vboxsync * the pgm critical section, as that just won't work.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * - The idle thread (and similar threads) doesn't not need 100% reliable
956230513f1e35e9d694c219c7009034b71bb836vboxsync * data when performing it tasks as the EMT thread will be the one to
956230513f1e35e9d694c219c7009034b71bb836vboxsync * do the actual changes later anyway. So, as long as it only accesses
956230513f1e35e9d694c219c7009034b71bb836vboxsync * the main ram range, it can do so by somehow preventing the VM from
956230513f1e35e9d694c219c7009034b71bb836vboxsync * being destroyed while it works on it...
956230513f1e35e9d694c219c7009034b71bb836vboxsync * - The over-commitment management, including the allocating/freeing
956230513f1e35e9d694c219c7009034b71bb836vboxsync * chunks, is serialized by a ring-0 mutex lock (a fast one since the
956230513f1e35e9d694c219c7009034b71bb836vboxsync * more mundane mutex implementation is broken on Linux).
956230513f1e35e9d694c219c7009034b71bb836vboxsync * - A separeate mutex is protecting the set of allocation chunks so
956230513f1e35e9d694c219c7009034b71bb836vboxsync * that pages can be shared or/and freed up while some other VM is
956230513f1e35e9d694c219c7009034b71bb836vboxsync * allocating more chunks. This mutex can be take from under the other
956230513f1e35e9d694c219c7009034b71bb836vboxsync * one, but not the otherway around.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @subsection subsec_pgmPhys_Request VM Request interface
956230513f1e35e9d694c219c7009034b71bb836vboxsync * When in ring-0 it will become necessary to send requests to a VM so it can
956230513f1e35e9d694c219c7009034b71bb836vboxsync * for instance move a page while defragmenting during VM destroy. The idle
956230513f1e35e9d694c219c7009034b71bb836vboxsync * thread will make use of this interface to request VMs to setup shared
956230513f1e35e9d694c219c7009034b71bb836vboxsync * pages and to perform write monitoring of pages.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * I would propose an interface similar to the current VMReq interface, similar
956230513f1e35e9d694c219c7009034b71bb836vboxsync * in that it doesn't require locking and that the one sending the request may
956230513f1e35e9d694c219c7009034b71bb836vboxsync * wait for completion if it wishes to. This shouldn't be very difficult to
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The requests themselves are also pretty simple. They are basically:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Check that some precondition is still true.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Do the update.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Update all shadow page tables involved with the page.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The 3rd step is identical to what we're already doing when updating a
956230513f1e35e9d694c219c7009034b71bb836vboxsync * physical handler, see pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @section sec_pgmPhys_MappingCaches Mapping Caches
956230513f1e35e9d694c219c7009034b71bb836vboxsync * In order to be able to map in and out memory and to be able to support
956230513f1e35e9d694c219c7009034b71bb836vboxsync * guest with more RAM than we've got virtual address space, we'll employing
956230513f1e35e9d694c219c7009034b71bb836vboxsync * a mapping cache. Normally ring-0 and ring-3 can share the same cache,
956230513f1e35e9d694c219c7009034b71bb836vboxsync * however on 32-bit darwin the ring-0 code is running in a different memory
956230513f1e35e9d694c219c7009034b71bb836vboxsync * context and therefore needs a separate cache. In raw-mode context we also
956230513f1e35e9d694c219c7009034b71bb836vboxsync * need a separate cache. The 32-bit darwin mapping cache and the one for
956230513f1e35e9d694c219c7009034b71bb836vboxsync * raw-mode context share a lot of code, see PGMRZDYNMAP.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @subsection subsec_pgmPhys_MappingCaches_R3 Ring-3
956230513f1e35e9d694c219c7009034b71bb836vboxsync * We've considered implementing the ring-3 mapping cache page based but found
956230513f1e35e9d694c219c7009034b71bb836vboxsync * that this was bother some when one had to take into account TLBs+SMP and
956230513f1e35e9d694c219c7009034b71bb836vboxsync * portability (missing the necessary APIs on several platforms). There were
956230513f1e35e9d694c219c7009034b71bb836vboxsync * also some performance concerns with this approach which hadn't quite been
956230513f1e35e9d694c219c7009034b71bb836vboxsync * worked out.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Instead, we'll be mapping allocation chunks into the VM process. This simplifies
956230513f1e35e9d694c219c7009034b71bb836vboxsync * matters greatly quite a bit since we don't need to invent any new ring-0 stuff,
956230513f1e35e9d694c219c7009034b71bb836vboxsync * only some minor RTR0MEMOBJ mapping stuff. The main concern here is that mapping
956230513f1e35e9d694c219c7009034b71bb836vboxsync * compared to the previous idea is that mapping or unmapping a 1MB chunk is more
956230513f1e35e9d694c219c7009034b71bb836vboxsync * costly than a single page, although how much more costly is uncertain. We'll
956230513f1e35e9d694c219c7009034b71bb836vboxsync * try address this by using a very big cache, preferably bigger than the actual
956230513f1e35e9d694c219c7009034b71bb836vboxsync * VM RAM size if possible. The current VM RAM sizes should give some idea for
956230513f1e35e9d694c219c7009034b71bb836vboxsync * 32-bit boxes, while on 64-bit we can probably get away with employing an
956230513f1e35e9d694c219c7009034b71bb836vboxsync * unlimited cache.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The cache have to parts, as already indicated, the ring-3 side and the
956230513f1e35e9d694c219c7009034b71bb836vboxsync * ring-0 side.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The ring-0 will be tied to the page allocator since it will operate on the
956230513f1e35e9d694c219c7009034b71bb836vboxsync * memory objects it contains. It will therefore require the first ring-0 mutex
956230513f1e35e9d694c219c7009034b71bb836vboxsync * discussed in @ref subsec_pgmPhys_Serializing. We
956230513f1e35e9d694c219c7009034b71bb836vboxsync * some double house keeping wrt to who has mapped what I think, since both
956230513f1e35e9d694c219c7009034b71bb836vboxsync * VMMR0.r0 and RTR0MemObj will keep track of mapping relataions
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The ring-3 part will be protected by the pgm critsect. For simplicity, we'll
956230513f1e35e9d694c219c7009034b71bb836vboxsync * require anyone that desires to do changes to the mapping cache to do that
956230513f1e35e9d694c219c7009034b71bb836vboxsync * from within this critsect. Alternatively, we could employ a separate critsect
956230513f1e35e9d694c219c7009034b71bb836vboxsync * for serializing changes to the mapping cache as this would reduce potential
956230513f1e35e9d694c219c7009034b71bb836vboxsync * contention with other threads accessing mappings unrelated to the changes
956230513f1e35e9d694c219c7009034b71bb836vboxsync * that are in process. We can see about this later, contention will show
956230513f1e35e9d694c219c7009034b71bb836vboxsync * up in the statistics anyway, so it'll be simple to tell.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The organization of the ring-3 part will be very much like how the allocation
956230513f1e35e9d694c219c7009034b71bb836vboxsync * chunks are organized in ring-0, that is in an AVL tree by chunk id. To avoid
956230513f1e35e9d694c219c7009034b71bb836vboxsync * having to walk the tree all the time, we'll have a couple of lookaside entries
956230513f1e35e9d694c219c7009034b71bb836vboxsync * like in we do for I/O ports and MMIO in IOM.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The simplified flow of a PGMPhysRead/Write function:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Enter the PGM critsect.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Lookup GCPhys in the ram ranges and get the Page ID.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Calc the Allocation Chunk ID from the Page ID.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Check the lookaside entries and then the AVL tree for the Chunk ID.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * If not found in cache:
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Call ring-0 and request it to be mapped and supply
956230513f1e35e9d694c219c7009034b71bb836vboxsync * a chunk to be unmapped if the cache is maxed out already.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Insert the new mapping into the AVL tree (id + R3 address).
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Update the relevant lookaside entry and return the mapping address.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Do the read/write according to monitoring flags and everything.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * -# Leave the critsect.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @section sec_pgmPhys_Fallback Fallback
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Current all the "second tier" hosts will not support the RTR0MemObjAllocPhysNC
956230513f1e35e9d694c219c7009034b71bb836vboxsync * API and thus require a fallback.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * So, when RTR0MemObjAllocPhysNC returns VERR_NOT_SUPPORTED the page allocator
956230513f1e35e9d694c219c7009034b71bb836vboxsync * will return to the ring-3 caller (and later ring-0) and asking it to seed
956230513f1e35e9d694c219c7009034b71bb836vboxsync * the page allocator with some fresh pages (VERR_GMM_SEED_ME). Ring-3 will
956230513f1e35e9d694c219c7009034b71bb836vboxsync * then perform an SUPR3PageAlloc(cbChunk >> PAGE_SHIFT) call and make a
956230513f1e35e9d694c219c7009034b71bb836vboxsync * "SeededAllocPages" call to ring-0.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * The first time ring-0 sees the VERR_NOT_SUPPORTED failure it will disable
956230513f1e35e9d694c219c7009034b71bb836vboxsync * all page sharing (zero page detection will continue). It will also force
956230513f1e35e9d694c219c7009034b71bb836vboxsync * all allocations to come from the VM which seeded the page. Both these
956230513f1e35e9d694c219c7009034b71bb836vboxsync * measures are taken to make sure that there will never be any need for
956230513f1e35e9d694c219c7009034b71bb836vboxsync * mapping anything into ring-3 - everything will be mapped already.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Whether we'll continue to use the current MM locked memory management
956230513f1e35e9d694c219c7009034b71bb836vboxsync * for this I don't quite know (I'd prefer not to and just ditch that all
956230513f1e35e9d694c219c7009034b71bb836vboxsync * togther), we'll see what's simplest to do.
956230513f1e35e9d694c219c7009034b71bb836vboxsync * @section sec_pgmPhys_Changes Changes
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Breakdown of the changes involved?
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/*******************************************************************************
956230513f1e35e9d694c219c7009034b71bb836vboxsync* Header Files *
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync*******************************************************************************/
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/*******************************************************************************
956230513f1e35e9d694c219c7009034b71bb836vboxsync* Internal Functions *
956230513f1e35e9d694c219c7009034b71bb836vboxsync*******************************************************************************/
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3RelocatePhysHandler(PAVLROGCPHYSNODECORE pNode, void *pvUser);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3RelocateVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3RelocateHyperVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic int pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic void pgmR3ModeDataSwitch(PVM pVM, PVMCPU pVCpu, PGMMODE enmShw, PGMMODE enmGst);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
956230513f1e35e9d694c219c7009034b71bb836vboxsync/** @todo Convert the first two commands to 'info' items. */
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
956230513f1e35e9d694c219c7009034b71bb836vboxsyncstatic DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
956230513f1e35e9d694c219c7009034b71bb836vboxsync/*******************************************************************************
956230513f1e35e9d694c219c7009034b71bb836vboxsync* Global Variables *
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync*******************************************************************************/
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/** Argument descriptors for '.pgmerror' and '.pgmerroroff'. */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { 0, 1, DBGCVAR_CAT_STRING, 0, "where", "Error injection location." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { 1, 1, DBGCVAR_CAT_STRING, 0, "file", "The file name." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { 0, 1, DBGCVAR_CAT_STRING, 0, "nozero", "If present, zero pages are skipped." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/** Command descriptors. */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { "pgmram", 0, 0, NULL, 0, NULL, 0, pgmR3CmdRam, "", "Display the ram ranges." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { "pgmsync", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSync, "", "Sync the CR3 page." },
956230513f1e35e9d694c219c7009034b71bb836vboxsync { "pgmerror", 0, 1, &g_aPgmErrorArgs[0], 1, NULL, 0, pgmR3CmdError, "", "Enables inject runtime of errors into parts of PGM." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { "pgmerroroff", 0, 1, &g_aPgmErrorArgs[0], 1, NULL, 0, pgmR3CmdError, "", "Disables inject runtime errors into parts of PGM." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { "pgmassertcr3", 0, 0, NULL, 0, NULL, 0, pgmR3CmdAssertCR3, "", "Check the shadow CR3 mapping." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { "pgmcheckduppages", 0, 0, NULL, 0, NULL, 0, pgmR3CmdCheckDuplicatePages, "", "Check for duplicate pages in all running VMs." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { "pgmsharedmodules", 0, 0, NULL, 0, NULL, 0, pgmR3CmdShowSharedModules, "", "Print shared modules info." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { "pgmsyncalways", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSyncAlways, "", "Toggle permanent CR3 syncing." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync { "pgmphystofile", 1, 2, &g_aPgmPhysToFileArgs[0], 2, NULL, 0, pgmR3CmdPhysToFile, "", "Save the physical memory to file." },
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Shadow - 32-bit mode
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME_RC_STR(name) PGM_SHW_NAME_RC_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - real mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_32BIT_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - protected mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_PROT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_32BIT_PROT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_PROT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - 32-bit mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_32BIT_STR(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_32BIT_32BIT_STR(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
956230513f1e35e9d694c219c7009034b71bb836vboxsync * Shadow - PAE mode
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_SHW_NAME_RC_STR(name) PGM_SHW_NAME_RC_PAE_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_PAE_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync/* Guest - real mode */
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_REAL_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_PAE_REAL_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_REAL_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync/* Guest - protected mode */
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_PROT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_PAE_PROT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_PROT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync/* Guest - 32-bit mode */
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_32BIT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_32BIT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_PAE_32BIT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_32BIT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync/* Guest - PAE mode */
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_PAE_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PAE_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_PAE_PAE_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_PAE_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Shadow - AMD64 mode
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME_RC_STR(name) PGM_SHW_NAME_RC_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - AMD64 mode */
956230513f1e35e9d694c219c7009034b71bb836vboxsync# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_AMD64_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_AMD64_STR(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#endif /* VBOX_WITH_64_BITS_GUESTS */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Shadow - Nested paging mode
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME_RC_STR(name) PGM_SHW_NAME_RC_NESTED_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_NESTED_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - real mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_NESTED_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_NESTED_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - protected mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_PROT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_NESTED_PROT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_NESTED_PROT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - 32-bit mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_NESTED_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_NESTED_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - PAE mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_PAE_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PAE_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_NESTED_PAE_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_NESTED_PAE_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - AMD64 mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync# define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_NESTED_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_NESTED_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#endif /* VBOX_WITH_64_BITS_GUESTS */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Shadow - EPT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME_RC_STR(name) PGM_SHW_NAME_RC_EPT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_EPT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - real mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_EPT_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_EPT_REAL_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync/* Guest - protected mode */
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_PROT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_EPT_PROT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_EPT_PROT_STR(name)
49bcf64aaacb2de9703cdd3633eec0a2610c3cf3vboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - 32-bit mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_EPT_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_EPT_32BIT_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - PAE mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_PAE_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PAE_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_EPT_PAE_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_EPT_PAE_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync/* Guest - AMD64 mode */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_GST_NAME_RC_STR(name) PGM_GST_NAME_RC_AMD64_STR(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync# define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_AMD64_STR(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define PGM_BTH_NAME_RC_STR(name) PGM_BTH_NAME_RC_EPT_AMD64_STR(name)
956230513f1e35e9d694c219c7009034b71bb836vboxsync# define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_EPT_AMD64_STR(name)
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync#endif /* VBOX_WITH_64_BITS_GUESTS */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Initiates the paging of VM.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @returns VBox status code.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * @param pVM Pointer to VM structure.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync PCFGMNODE pCfgPGM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM");
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Assert alignment and sizes.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync AssertCompile(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync AssertCompile(sizeof(pVM->aCpus[0].pgm.s) <= sizeof(pVM->aCpus[0].pgm.padding));
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync AssertCompileMemberAlignment(PGM, CritSect, sizeof(uintptr_t));
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Init the structure.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync /* Init the per-CPU part. */
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
35d0bfb0c568c691325cade7618555e3d3df918avboxsync pPGM->offVM = (uintptr_t)&pVCpu->pgm.s - (uintptr_t)pVM;
35d0bfb0c568c691325cade7618555e3d3df918avboxsync pPGM->offPGM = (uintptr_t)&pVCpu->pgm.s - (uintptr_t)&pVM->pgm.s;
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.apGstPaePDsR3); i++)
d012a89a724ba60c4fb5e74ce51f8b404fda4a8cvboxsync pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.GCPtrPrevRamRangeMapping = MM_HYPER_AREA_ADDRESS;
d012a89a724ba60c4fb5e74ce51f8b404fda4a8cvboxsync rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "RamPreAlloc", &pVM->pgm.s.fRamPreAlloc,
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, _1G / GMM_CHUNK_SIZE);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync rc = CFGMR3QueryU32Def(pCfgPGM, "MaxRing3Chunks", &pVM->pgm.s.ChunkR3Map.cMax, UINT32_MAX);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Get the configured RAM size - to estimate saved state size.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Rrc.\n", rc));
d012a89a724ba60c4fb5e74ce51f8b404fda4a8cvboxsync * Allocate memory for the statistics before someone tries to use them.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync size_t cbTotalStats = RT_ALIGN_Z(sizeof(PGMSTATS), 64) + RT_ALIGN_Z(sizeof(PGMCPUSTATS), 64) * pVM->cCpus;
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync rc = MMHyperAlloc(pVM, RT_ALIGN_Z(cbTotalStats, PAGE_SIZE), PAGE_SIZE, MM_TAG_PGM, &pv);
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync pv = (uint8_t *)pv + RT_ALIGN_Z(sizeof(PGMSTATS), 64);
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync pVM->aCpus[iCpu].pgm.s.pStatsR3 = (PGMCPUSTATS *)pv;
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync pVM->aCpus[iCpu].pgm.s.pStatsR0 = MMHyperCCToR0(pVM, pv);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->aCpus[iCpu].pgm.s.pStatsRC = MMHyperCCToRC(pVM, pv);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pv = (uint8_t *)pv + RT_ALIGN_Z(sizeof(PGMCPUSTATS), 64);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync#endif /* VBOX_WITH_STATISTICS */
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Register callbacks, string formatters and the saved state data unit.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync VMR3AtStateRegister(pVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Initialize the PGM critical section and flush the phys TLBs
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSect, RT_SRC_POS, "PGM");
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * For the time being we sport a full set of handy pages in addition to the base
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * memory to simplify things.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync rc = MMR3ReserveHandyPages(pVM, RT_ELEMENTS(pVM->pgm.s.aHandyPages)); /** @todo this should be changed to PGM_HANDY_PAGES_MIN but this needs proper testing... */
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync rc = MMHyperAlloc(pVM, sizeof(PGMTREES), 0, MM_TAG_PGM, (void **)&pVM->pgm.s.pTreesR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.pTreesR0 = MMHyperR3ToR0(pVM, pVM->pgm.s.pTreesR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.pTreesRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pTreesR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Allocate the zero page.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync rc = MMHyperAlloc(pVM, PAGE_SIZE, PAGE_SIZE, MM_TAG_PGM, &pVM->pgm.s.pvZeroPgR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.pvZeroPgRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pvZeroPgR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.pvZeroPgR0 = MMHyperR3ToR0(pVM, pVM->pgm.s.pvZeroPgR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.HCPhysZeroPg = MMR3HyperHCVirt2HCPhys(pVM, pVM->pgm.s.pvZeroPgR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync AssertRelease(pVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Allocate the invalid MMIO page.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * (The invalid bits in HCPhysInvMmioPg are set later on init complete.)
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync rc = MMHyperAlloc(pVM, PAGE_SIZE, PAGE_SIZE, MM_TAG_PGM, &pVM->pgm.s.pvMmioPgR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync ASMMemFill32(pVM->pgm.s.pvMmioPgR3, PAGE_SIZE, 0xfeedface);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.HCPhysMmioPg = MMR3HyperHCVirt2HCPhys(pVM, pVM->pgm.s.pvMmioPgR3);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync AssertRelease(pVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS);
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync pVM->pgm.s.HCPhysInvMmioPg = pVM->pgm.s.HCPhysMmioPg;
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Init the paging.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Init the page pool.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Info & statistics
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync "Shows the current paging mode. "
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync "Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing's given.",
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync "Dumps all the entries in the top level paging table. No arguments.",
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync "Dumps all the physical address ranges. No arguments.",
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync "Dumps physical, virtual and hyper virtual handlers. "
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync "Pass 'phys', 'virt', 'hyper' as argument if only one kind is wanted."
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync "Add 'nost' if the statistics are unwanted, use together with 'all' or explicit selection.",
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync "Dumps guest mappings.",
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Debugger commands.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync static bool s_fRegisteredCmds = false;
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync int rc2 = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync /* Almost no cleanup necessary, MM frees all memory. */
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * Initializes the per-VCPU PGM.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * @returns VBox status code.
a917e98de4393d1090e536fdbeb04c285f98c92evboxsync * @param pVM The VM to operate on.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Init paging.
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync * Since we need to check what mode the host is operating in before we can choose
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * the right paging functions for the host we have to delay this until R0 has
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * been initialized.
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * @returns VBox status code.
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * @param pVM VM handle.
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * Force a recalculation of modes and switcher so everyone gets notified.
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * Allocate static mapping space for whatever the cr3 register
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * points to and in the case of PAE mode to the 4 PDs.
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync int rc = MMR3HyperReserve(pVM, PAGE_SIZE * 5, "CR3 mapping", &pVM->pgm.s.GCPtrCR3Mapping);
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync AssertMsgFailed(("Failed to reserve two pages for cr mapping in HMA, rc=%Rrc\n", rc));
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * Allocate pages for the three possible intermediate contexts
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * (AMD64, PAE and plain 32-Bit). We maintain all three contexts
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * for the sake of simplicity. The AMD64 uses the PAE for the
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * lower levels, making the total number of pages 11 (3 + 7 + 1).
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * We assume that two page tables will be enought for the core code
110a95f133a07d2c98a580d226abf8850bc2bd47vboxsync * mappings (HC virtual and identity).
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync pVM->pgm.s.pInterPD = (PX86PD)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPD, VERR_NO_PAGE_MEMORY);
f1f5335f9ec8e56fe0e3e27f253e24b10ff20f2evboxsync pVM->pgm.s.apInterPTs[0] = (PX86PT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.apInterPTs[0], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPTs[1] = (PX86PT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.apInterPTs[1], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePTs[0] = (PX86PTPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePTs[0], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePTs[1] = (PX86PTPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePTs[1], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[0], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[1], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[2], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.apInterPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM); AssertReturn(pVM->pgm.s.apInterPaePDs[3], VERR_NO_PAGE_MEMORY);
pVM->pgm.s.pInterPaePDPT = (PX86PDPT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePDPT, VERR_NO_PAGE_MEMORY);
pVM->pgm.s.pInterPaePDPT64 = (PX86PDPT)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePDPT64, VERR_NO_PAGE_MEMORY);
pVM->pgm.s.pInterPaePML4 = (PX86PML4)MMR3PageAllocLow(pVM); AssertReturn(pVM->pgm.s.pInterPaePML4, VERR_NO_PAGE_MEMORY);
AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePDPT != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPT & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) && pVM->pgm.s.HCPhysInterPaePML4 < 0xffffffff);
pVM->pgm.s.pInterPaePDPT64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
LogRel(("Debug: apInterPTs={%RHp,%RHp} apInterPaePTs={%RHp,%RHp} apInterPaePDs={%RHp,%RHp,%RHp,%RHp} pInterPaePDPT64=%RHp\n",
MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
return VINF_SUCCESS;
return rc;
int rc;
STAM_REL_REG(pVM, &pPGM->cAllPages, STAMTYPE_U32, "/PGM/Page/cAllPages", STAMUNIT_COUNT, "The total number of pages.");
STAM_REL_REG(pVM, &pPGM->cPrivatePages, STAMTYPE_U32, "/PGM/Page/cPrivatePages", STAMUNIT_COUNT, "The number of private pages.");
STAM_REL_REG(pVM, &pPGM->cSharedPages, STAMTYPE_U32, "/PGM/Page/cSharedPages", STAMUNIT_COUNT, "The number of shared pages.");
STAM_REL_REG(pVM, &pPGM->cReusedSharedPages, STAMTYPE_U32, "/PGM/Page/cReusedSharedPages", STAMUNIT_COUNT, "The number of reused shared pages.");
STAM_REL_REG(pVM, &pPGM->cZeroPages, STAMTYPE_U32, "/PGM/Page/cZeroPages", STAMUNIT_COUNT, "The number of zero backed pages.");
STAM_REL_REG(pVM, &pPGM->cPureMmioPages, STAMTYPE_U32, "/PGM/Page/cPureMmioPages", STAMUNIT_COUNT, "The number of pure MMIO pages.");
STAM_REL_REG(pVM, &pPGM->cMonitoredPages, STAMTYPE_U32, "/PGM/Page/cMonitoredPages", STAMUNIT_COUNT, "The number of write monitored pages.");
STAM_REL_REG(pVM, &pPGM->cWrittenToPages, STAMTYPE_U32, "/PGM/Page/cWrittenToPages", STAMUNIT_COUNT, "The number of previously write monitored pages that have been written to.");
STAM_REL_REG(pVM, &pPGM->cWriteLockedPages, STAMTYPE_U32, "/PGM/Page/cWriteLockedPages", STAMUNIT_COUNT, "The number of write(/read) locked pages.");
STAM_REL_REG(pVM, &pPGM->cReadLockedPages, STAMTYPE_U32, "/PGM/Page/cReadLockedPages", STAMUNIT_COUNT, "The number of read (only) locked pages.");
STAM_REL_REG(pVM, &pPGM->cBalloonedPages, STAMTYPE_U32, "/PGM/Page/cBalloonedPages", STAMUNIT_COUNT, "The number of ballooned pages.");
STAM_REL_REG(pVM, &pPGM->cHandyPages, STAMTYPE_U32, "/PGM/Page/cHandyPages", STAMUNIT_COUNT, "The number of handy pages (not included in cAllPages).");
STAM_REL_REG(pVM, &pPGM->cRelocations, STAMTYPE_COUNTER, "/PGM/cRelocations", STAMUNIT_OCCURENCES,"Number of hypervisor relocations.");
STAM_REL_REG(pVM, &pPGM->ChunkR3Map.c, STAMTYPE_U32, "/PGM/ChunkR3Map/c", STAMUNIT_COUNT, "Number of mapped chunks.");
STAM_REL_REG(pVM, &pPGM->ChunkR3Map.cMax, STAMTYPE_U32, "/PGM/ChunkR3Map/cMax", STAMUNIT_COUNT, "Maximum number of mapped chunks.");
STAM_REL_REG(pVM, &pPGM->cMappedChunks, STAMTYPE_U32, "/PGM/ChunkR3Map/Mapped", STAMUNIT_COUNT, "Number of times we mapped a chunk.");
STAM_REL_REG(pVM, &pPGM->cUnmappedChunks, STAMTYPE_U32, "/PGM/ChunkR3Map/Unmapped", STAMUNIT_COUNT, "Number of times we unmapped a chunk.");
STAM_REL_REG(pVM, &pPGM->StatLargePageAlloc, STAMTYPE_COUNTER, "/PGM/LargePage/Alloc", STAMUNIT_OCCURENCES, "The number of large pages we've used.");
STAM_REL_REG(pVM, &pPGM->StatLargePageReused, STAMTYPE_COUNTER, "/PGM/LargePage/Reused", STAMUNIT_OCCURENCES, "The number of times we've reused a large page.");
STAM_REL_REG(pVM, &pPGM->StatLargePageRefused, STAMTYPE_COUNTER, "/PGM/LargePage/Refused", STAMUNIT_OCCURENCES, "The number of times we couldn't use a large page.");
STAM_REL_REG(pVM, &pPGM->StatLargePageRecheck, STAMTYPE_COUNTER, "/PGM/LargePage/Recheck", STAMUNIT_OCCURENCES, "The number of times we've rechecked a disabled large page.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.fActive, STAMTYPE_U8, "/PGM/LiveSave/fActive", STAMUNIT_COUNT, "Active or not.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cIgnoredPages, STAMTYPE_U32, "/PGM/LiveSave/cIgnoredPages", STAMUNIT_COUNT, "The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM).");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cDirtyPagesLong, STAMTYPE_U32, "/PGM/LiveSave/cDirtyPagesLong", STAMUNIT_COUNT, "Longer term dirty page average.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cDirtyPagesShort, STAMTYPE_U32, "/PGM/LiveSave/cDirtyPagesShort", STAMUNIT_COUNT, "Short term dirty page average.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cPagesPerSecond, STAMTYPE_U32, "/PGM/LiveSave/cPagesPerSecond", STAMUNIT_COUNT, "Pages per second.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.cSavedPages, STAMTYPE_U64, "/PGM/LiveSave/cSavedPages", STAMUNIT_COUNT, "The total number of saved pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cReadPages", STAMUNIT_COUNT, "RAM: Ready pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cDirtyPages", STAMUNIT_COUNT, "RAM: Dirty pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cZeroPages", STAMUNIT_COUNT, "RAM: Ready zero pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Ram.cMonitoredPages, STAMTYPE_U32, "/PGM/LiveSave/Ram/cMonitoredPages", STAMUNIT_COUNT, "RAM: Write monitored pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cReadPages", STAMUNIT_COUNT, "ROM: Ready pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cDirtyPages", STAMUNIT_COUNT, "ROM: Dirty pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cZeroPages", STAMUNIT_COUNT, "ROM: Ready zero pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Rom.cMonitoredPages, STAMTYPE_U32, "/PGM/LiveSave/Rom/cMonitoredPages", STAMUNIT_COUNT, "ROM: Write monitored pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cReadyPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cReadPages", STAMUNIT_COUNT, "MMIO2: Ready pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cDirtyPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cDirtyPages", STAMUNIT_COUNT, "MMIO2: Dirty pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cZeroPages, STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cZeroPages", STAMUNIT_COUNT, "MMIO2: Ready zero pages.");
STAM_REL_REG_USED(pVM, &pPGM->LiveSave.Mmio2.cMonitoredPages,STAMTYPE_U32, "/PGM/LiveSave/Mmio2/cMonitoredPages",STAMUNIT_COUNT, "MMIO2: Write monitored pages.");
#ifdef VBOX_WITH_STATISTICS
# define PGM_REG_COUNTER(a, b, c) \
# define PGM_REG_COUNTER_BYTES(a, b, c) \
# define PGM_REG_PROFILE(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b); \
PGM_REG_PROFILE(&pStats->StatAllocLargePage, "/PGM/LargePage/Prof/Alloc", "Time spent by the host OS for large page allocation.");
PGM_REG_PROFILE(&pStats->StatClearLargePage, "/PGM/LargePage/Prof/Clear", "Time spent clearing the newly allocated large pages.");
PGM_REG_PROFILE(&pStats->StatR3IsValidLargePage, "/PGM/LargePage/Prof/R3/IsValid", "pgmPhysIsValidLargePage profiling - R3.");
PGM_REG_PROFILE(&pStats->StatRZIsValidLargePage, "/PGM/LargePage/Prof/RZ/IsValid", "pgmPhysIsValidLargePage profiling - RZ.");
PGM_REG_COUNTER(&pStats->StatR3DetectedConflicts, "/PGM/R3/DetectedConflicts", "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
PGM_REG_PROFILE(&pStats->StatR3ResolveConflict, "/PGM/R3/ResolveConflict", "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
PGM_REG_COUNTER(&pStats->StatR3PhysRead, "/PGM/R3/Phys/Read", "The number of times PGMPhysRead was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatR3PhysReadBytes, "/PGM/R3/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
PGM_REG_COUNTER(&pStats->StatR3PhysWrite, "/PGM/R3/Phys/Write", "The number of times PGMPhysWrite was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatR3PhysWriteBytes, "/PGM/R3/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
PGM_REG_COUNTER(&pStats->StatR3PhysSimpleRead, "/PGM/R3/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatR3PhysSimpleReadBytes, "/PGM/R3/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
PGM_REG_COUNTER(&pStats->StatR3PhysSimpleWrite, "/PGM/R3/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatR3PhysSimpleWriteBytes, "/PGM/R3/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
PGM_REG_PROFILE(&pStats->StatChunkFindCandidate, "/PGM/ChunkR3Map/Map/Find", "Chunk unmap find profiling.");
PGM_REG_PROFILE(&pStats->StatChunkUnmap, "/PGM/ChunkR3Map/Map/Unmap", "Chunk unmap of address space profiling.");
PGM_REG_PROFILE(&pStats->StatChunkMap, "/PGM/ChunkR3Map/Map/Map", "Chunk map of address space profiling.");
PGM_REG_COUNTER(&pStats->StatPageMapTlbFlushes, "/PGM/R3/Page/MapTlbFlushes", "TLB flushes (all contexts).");
PGM_REG_COUNTER(&pStats->StatPageMapTlbFlushEntry, "/PGM/R3/Page/MapTlbFlushEntry", "TLB entry flushes (all contexts).");
PGM_REG_PROFILE(&pStats->StatRZSyncCR3HandlerVirtualUpdate, "/PGM/RZ/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates.");
PGM_REG_PROFILE(&pStats->StatRZSyncCR3HandlerVirtualReset, "/PGM/RZ/SyncCR3/Handlers/VirtualReset", "Profiling of the virtual handler resets.");
PGM_REG_PROFILE(&pStats->StatR3SyncCR3HandlerVirtualUpdate, "/PGM/R3/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates.");
PGM_REG_PROFILE(&pStats->StatR3SyncCR3HandlerVirtualReset, "/PGM/R3/SyncCR3/Handlers/VirtualReset", "Profiling of the virtual handler resets.");
PGM_REG_COUNTER(&pStats->StatRZPhysHandlerReset, "/PGM/RZ/PhysHandlerReset", "The number of times PGMHandlerPhysicalReset is called.");
PGM_REG_COUNTER(&pStats->StatR3PhysHandlerReset, "/PGM/R3/PhysHandlerReset", "The number of times PGMHandlerPhysicalReset is called.");
PGM_REG_COUNTER(&pStats->StatRZPhysHandlerLookupHits, "/PGM/RZ/PhysHandlerLookupHits", "The number of cache hits when looking up physical handlers.");
PGM_REG_COUNTER(&pStats->StatR3PhysHandlerLookupHits, "/PGM/R3/PhysHandlerLookupHits", "The number of cache hits when looking up physical handlers.");
PGM_REG_COUNTER(&pStats->StatRZPhysHandlerLookupMisses, "/PGM/RZ/PhysHandlerLookupMisses", "The number of cache misses when looking up physical handlers.");
PGM_REG_COUNTER(&pStats->StatR3PhysHandlerLookupMisses, "/PGM/R3/PhysHandlerLookupMisses", "The number of cache misses when looking up physical handlers.");
PGM_REG_PROFILE(&pStats->StatRZVirtHandlerSearchByPhys, "/PGM/RZ/VirtHandlerSearchByPhys", "Profiling of pgmHandlerVirtualFindByPhysAddr.");
PGM_REG_PROFILE(&pStats->StatR3VirtHandlerSearchByPhys, "/PGM/R3/VirtHandlerSearchByPhys", "Profiling of pgmHandlerVirtualFindByPhysAddr.");
PGM_REG_COUNTER(&pStats->StatRZPageReplaceShared, "/PGM/RZ/Page/ReplacedShared", "Times a shared page was replaced.");
PGM_REG_COUNTER(&pStats->StatRZPageReplaceZero, "/PGM/RZ/Page/ReplacedZero", "Times the zero page was replaced.");
/// @todo PGM_REG_COUNTER(&pStats->StatRZPageHandyAllocs, "/PGM/RZ/Page/HandyAllocs", "Number of times we've allocated more handy pages.");
PGM_REG_COUNTER(&pStats->StatR3PageReplaceShared, "/PGM/R3/Page/ReplacedShared", "Times a shared page was replaced.");
PGM_REG_COUNTER(&pStats->StatR3PageReplaceZero, "/PGM/R3/Page/ReplacedZero", "Times the zero page was replaced.");
/// @todo PGM_REG_COUNTER(&pStats->StatR3PageHandyAllocs, "/PGM/R3/Page/HandyAllocs", "Number of times we've allocated more handy pages.");
PGM_REG_COUNTER(&pStats->StatRZPhysRead, "/PGM/RZ/Phys/Read", "The number of times PGMPhysRead was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatRZPhysReadBytes, "/PGM/RZ/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
PGM_REG_COUNTER(&pStats->StatRZPhysWrite, "/PGM/RZ/Phys/Write", "The number of times PGMPhysWrite was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatRZPhysWriteBytes, "/PGM/RZ/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
PGM_REG_COUNTER(&pStats->StatRZPhysSimpleRead, "/PGM/RZ/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatRZPhysSimpleReadBytes, "/PGM/RZ/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
PGM_REG_COUNTER(&pStats->StatRZPhysSimpleWrite, "/PGM/RZ/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatRZPhysSimpleWriteBytes, "/PGM/RZ/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
PGM_REG_COUNTER(&pStats->StatRCInvlPgConflict, "/PGM/RC/InvlPgConflict", "Number of times PGMInvalidatePage() detected a mapping conflict.");
PGM_REG_COUNTER(&pStats->StatRCInvlPgSyncMonCR3, "/PGM/RC/InvlPgSyncMonitorCR3", "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3.");
PGM_REG_COUNTER(&pStats->StatRCPhysRead, "/PGM/RC/Phys/Read", "The number of times PGMPhysRead was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatRCPhysReadBytes, "/PGM/RC/Phys/Read/Bytes", "The number of bytes read by PGMPhysRead.");
PGM_REG_COUNTER(&pStats->StatRCPhysWrite, "/PGM/RC/Phys/Write", "The number of times PGMPhysWrite was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatRCPhysWriteBytes, "/PGM/RC/Phys/Write/Bytes", "The number of bytes written by PGMPhysWrite.");
PGM_REG_COUNTER(&pStats->StatRCPhysSimpleRead, "/PGM/RC/Phys/Simple/Read", "The number of times PGMPhysSimpleReadGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatRCPhysSimpleReadBytes, "/PGM/RC/Phys/Simple/Read/Bytes", "The number of bytes read by PGMPhysSimpleReadGCPtr.");
PGM_REG_COUNTER(&pStats->StatRCPhysSimpleWrite, "/PGM/RC/Phys/Simple/Write", "The number of times PGMPhysSimpleWriteGCPtr was called.");
PGM_REG_COUNTER_BYTES(&pStats->StatRCPhysSimpleWriteBytes, "/PGM/RC/Phys/Simple/Write/Bytes", "The number of bytes written by PGMPhysSimpleWriteGCPtr.");
PGM_REG_COUNTER(&pStats->StatTrackVirgin, "/PGM/Track/Virgin", "The number of first time shadowings");
PGM_REG_COUNTER(&pStats->StatTrackAliased, "/PGM/Track/Aliased", "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
PGM_REG_COUNTER(&pStats->StatTrackAliasedMany, "/PGM/Track/AliasedMany", "The number of times we're tracking using cRef2.");
PGM_REG_COUNTER(&pStats->StatTrackAliasedLots, "/PGM/Track/AliasedLots", "The number of times we're hitting pages which has overflowed cRef2");
PGM_REG_COUNTER(&pStats->StatTrackOverflows, "/PGM/Track/Overflows", "The number of times the extent list grows too long.");
PGM_REG_COUNTER(&pStats->StatTrackNoExtentsLeft, "/PGM/Track/NoExtentLeft", "The number of times the extent list was exhausted.");
PGM_REG_PROFILE(&pStats->StatTrackDeref, "/PGM/Track/Deref", "Profiling of SyncPageWorkerTrackDeref (expensive).");
#define PGM_REG_COUNTER(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
#define PGM_REG_PROFILE(a, b, c) \
rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
PGM_REG_COUNTER(&pPgmCpu->cGuestModeChanges, "/PGM/CPU%u/cGuestModeChanges", "Number of guest mode changes.");
#ifdef VBOX_WITH_STATISTICS
STAMR3RegisterF(pVM, &pCpuStats->StatSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
STAMR3RegisterF(pVM, &pCpuStats->StatSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
PGM_REG_PROFILE(&pCpuStats->StatR0NpMiscfg, "/PGM/CPU%u/R0/NpMiscfg", "PGMR0Trap0eHandlerNPMisconfig() profiling.");
PGM_REG_COUNTER(&pCpuStats->StatR0NpMiscfgSyncPage, "/PGM/CPU%u/R0/NpMiscfgSyncPage", "SyncPage calls from PGMR0Trap0eHandlerNPMisconfig().");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0e, "/PGM/CPU%u/RZ/Trap0e", "Profiling of the PGMTrap0eHandler() body.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Ballooned, "/PGM/CPU%u/RZ/Trap0e/Time2/Ballooned", "Profiling of the Trap0eHandler body when the cause is read access to a ballooned page.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2CSAM, "/PGM/CPU%u/RZ/Trap0e/Time2/CSAM", "Profiling of the Trap0eHandler body when the cause is CSAM.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2DirtyAndAccessed, "/PGM/CPU%u/RZ/Trap0e/Time2/DirtyAndAccessedBits", "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2GuestTrap, "/PGM/CPU%u/RZ/Trap0e/Time2/GuestTrap", "Profiling of the Trap0eHandler body when the cause is a guest trap.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2HndPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/HandlerPhysical", "Profiling of the Trap0eHandler body when the cause is a physical handler.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2HndVirt, "/PGM/CPU%u/RZ/Trap0e/Time2/HandlerVirtual", "Profiling of the Trap0eHandler body when the cause is a virtual handler.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2HndUnhandled, "/PGM/CPU%u/RZ/Trap0e/Time2/HandlerUnhandled", "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2InvalidPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/InvalidPhys", "Profiling of the Trap0eHandler body when the cause is access to an invalid physical guest address.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2MakeWritable, "/PGM/CPU%u/RZ/Trap0e/Time2/MakeWritable", "Profiling of the Trap0eHandler body when the cause is that a page needed to be made writeable.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Mapping, "/PGM/CPU%u/RZ/Trap0e/Time2/Mapping", "Profiling of the Trap0eHandler body when the cause is releated to the guest mappings.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2Misc, "/PGM/CPU%u/RZ/Trap0e/Time2/Misc", "Profiling of the Trap0eHandler body when the cause is not known.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSync, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSync", "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSyncHndPhys, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncHndPhys", "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSyncHndVirt, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncHndVirt", "Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2OutOfSyncHndObs, "/PGM/CPU%u/RZ/Trap0e/Time2/OutOfSyncObsHnd", "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2SyncPT, "/PGM/CPU%u/RZ/Trap0e/Time2/SyncPT", "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
PGM_REG_PROFILE(&pCpuStats->StatRZTrap0eTime2WPEmulation, "/PGM/CPU%u/RZ/Trap0e/Time2/WPEmulation", "Profiling of the Trap0eHandler body when the cause is CR0.WP emulation.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eConflicts, "/PGM/CPU%u/RZ/Trap0e/Conflicts", "The number of times #PF was caused by an undetected conflict.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersMapping, "/PGM/CPU%u/RZ/Trap0e/Handlers/Mapping", "Number of traps due to access handlers in mappings.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersOutOfSync, "/PGM/CPU%u/RZ/Trap0e/Handlers/OutOfSync", "Number of traps due to out-of-sync handled pages.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersPhysical, "/PGM/CPU%u/RZ/Trap0e/Handlers/Physical", "Number of traps due to physical access handlers.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersPhysicalOpt, "/PGM/CPU%u/RZ/Trap0e/Handlers/PhysicalOpt", "Number of the physical access handler traps using the optimization.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersVirtual, "/PGM/CPU%u/RZ/Trap0e/Handlers/Virtual", "Number of traps due to virtual access handlers.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersVirtualByPhys, "/PGM/CPU%u/RZ/Trap0e/Handlers/VirtualByPhys", "Number of traps due to virtual access handlers by physical address.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersVirtualUnmarked,"/PGM/CPU%u/RZ/Trap0e/Handlers/VirtualUnmarked","Number of traps due to virtual access handlers by virtual address (without proper physical flags).");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersUnhandled, "/PGM/CPU%u/RZ/Trap0e/Handlers/Unhandled", "Number of traps due to access outside range of monitored page(s).");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eHandlersInvalid, "/PGM/CPU%u/RZ/Trap0e/Handlers/Invalid", "Number of traps due to access to invalid physical memory.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSNotPresentRead, "/PGM/CPU%u/RZ/Trap0e/Err/User/NPRead", "Number of user mode not present read page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSNotPresentWrite, "/PGM/CPU%u/RZ/Trap0e/Err/User/NPWrite", "Number of user mode not present write page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSWrite, "/PGM/CPU%u/RZ/Trap0e/Err/User/Write", "Number of user mode write page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSReserved, "/PGM/CPU%u/RZ/Trap0e/Err/User/Reserved", "Number of user mode reserved bit page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSNXE, "/PGM/CPU%u/RZ/Trap0e/Err/User/NXE", "Number of user mode NXE page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eUSRead, "/PGM/CPU%u/RZ/Trap0e/Err/User/Read", "Number of user mode read page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSVNotPresentRead, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NPRead", "Number of supervisor mode not present read page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSVNotPresentWrite, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NPWrite", "Number of supervisor mode not present write page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSVWrite, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/Write", "Number of supervisor mode write page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSVReserved, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/Reserved", "Number of supervisor mode reserved bit page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eSNXE, "/PGM/CPU%u/RZ/Trap0e/Err/Supervisor/NXE", "Number of supervisor mode NXE page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eGuestPF, "/PGM/CPU%u/RZ/Trap0e/GuestPF", "Number of real guest page faults.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eGuestPFMapping, "/PGM/CPU%u/RZ/Trap0e/GuestPF/InMapping", "Number of real guest page faults in a mapping.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eWPEmulInRZ, "/PGM/CPU%u/RZ/Trap0e/WP/InRZ", "Number of guest page faults due to X86_CR0_WP emulation.");
PGM_REG_COUNTER(&pCpuStats->StatRZTrap0eWPEmulToR3, "/PGM/CPU%u/RZ/Trap0e/WP/ToR3", "Number of guest page faults due to X86_CR0_WP emulation (forward to R3 for emulation).");
STAMR3RegisterF(pVM, &pCpuStats->StatRZTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
PGM_REG_COUNTER(&pCpuStats->StatRZGuestCR3WriteHandled, "/PGM/CPU%u/RZ/CR3WriteHandled", "The number of times the Guest CR3 change was successfully handled.");
PGM_REG_COUNTER(&pCpuStats->StatRZGuestCR3WriteUnhandled, "/PGM/CPU%u/RZ/CR3WriteUnhandled", "The number of times the Guest CR3 change was passed back to the recompiler.");
PGM_REG_COUNTER(&pCpuStats->StatRZGuestCR3WriteConflict, "/PGM/CPU%u/RZ/CR3WriteConflict", "The number of times the Guest CR3 monitoring detected a conflict.");
PGM_REG_COUNTER(&pCpuStats->StatRZGuestROMWriteHandled, "/PGM/CPU%u/RZ/ROMWriteHandled", "The number of times the Guest ROM change was successfully handled.");
PGM_REG_COUNTER(&pCpuStats->StatRZGuestROMWriteUnhandled, "/PGM/CPU%u/RZ/ROMWriteUnhandled", "The number of times the Guest ROM change was passed back to the recompiler.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapMigrateInvlPg, "/PGM/CPU%u/RZ/DynMap/MigrateInvlPg", "invlpg count in PGMR0DynMapMigrateAutoSet.");
PGM_REG_PROFILE(&pCpuStats->StatRZDynMapGCPageInl, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl", "Calls to pgmR0DynMapGCPageInlined.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlHits, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Hits", "Hash table lookup hits.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlMisses, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/Misses", "Misses that falls back to the code common.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamHits, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamHits", "1st ram range hits.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapGCPageInlRamMisses, "/PGM/CPU%u/RZ/DynMap/PageGCPageInl/RamMisses", "1st ram range misses, takes slow path.");
PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPageInl, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl", "Calls to pgmRZDynMapHCPageInlined.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlHits, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Hits", "Hash table lookup hits.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapHCPageInlMisses, "/PGM/CPU%u/RZ/DynMap/PageHCPageInl/Misses", "Misses that falls back to the code common.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPage, "/PGM/CPU%u/RZ/DynMap/Page", "Calls to pgmR0DynMapPage");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetOptimize, "/PGM/CPU%u/RZ/DynMap/Page/SetOptimize", "Calls to pgmRZDynMapOptimizeAutoSet.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchFlushes, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchFlushes", "Set search restorting to subset flushes.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchHits, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchHits", "Set search hits.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSetSearchMisses, "/PGM/CPU%u/RZ/DynMap/Page/SetSearchMisses", "Set search misses.");
PGM_REG_PROFILE(&pCpuStats->StatRZDynMapHCPage, "/PGM/CPU%u/RZ/DynMap/Page/HCPage", "Calls to pgmRZDynMapHCPageCommon (ring-0).");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits0, "/PGM/CPU%u/RZ/DynMap/Page/Hits0", "Hits at iPage+0");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits1, "/PGM/CPU%u/RZ/DynMap/Page/Hits1", "Hits at iPage+1");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageHits2, "/PGM/CPU%u/RZ/DynMap/Page/Hits2", "Hits at iPage+2");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageInvlPg, "/PGM/CPU%u/RZ/DynMap/Page/InvlPg", "invlpg count in pgmR0DynMapPageSlow.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlow, "/PGM/CPU%u/RZ/DynMap/Page/Slow", "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopHits, "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopHits" , "Hits in the loop path.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLoopMisses, "/PGM/CPU%u/RZ/DynMap/Page/SlowLoopMisses", "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
//PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPageSlowLostHits, "/PGM/CPU%u/R0/DynMap/Page/SlowLostHits", "Lost hits.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapSubsets, "/PGM/CPU%u/RZ/DynMap/Subsets", "Times PGMRZDynMapPushAutoSubset was called.");
PGM_REG_COUNTER(&pCpuStats->StatRZDynMapPopFlushes, "/PGM/CPU%u/RZ/DynMap/SubsetPopFlushes", "Times PGMRZDynMapPopAutoSubset flushes the subset.");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[0], "/PGM/CPU%u/RZ/DynMap/SetFilledPct000..09", "00-09% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[1], "/PGM/CPU%u/RZ/DynMap/SetFilledPct010..19", "10-19% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[2], "/PGM/CPU%u/RZ/DynMap/SetFilledPct020..29", "20-29% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[3], "/PGM/CPU%u/RZ/DynMap/SetFilledPct030..39", "30-39% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[4], "/PGM/CPU%u/RZ/DynMap/SetFilledPct040..49", "40-49% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[5], "/PGM/CPU%u/RZ/DynMap/SetFilledPct050..59", "50-59% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[6], "/PGM/CPU%u/RZ/DynMap/SetFilledPct060..69", "60-69% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[7], "/PGM/CPU%u/RZ/DynMap/SetFilledPct070..79", "70-79% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[8], "/PGM/CPU%u/RZ/DynMap/SetFilledPct080..89", "80-89% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[9], "/PGM/CPU%u/RZ/DynMap/SetFilledPct090..99", "90-99% filled (RC: min(set-size, dynmap-size))");
PGM_REG_COUNTER(&pCpuStats->aStatRZDynMapSetFilledPct[10], "/PGM/CPU%u/RZ/DynMap/SetFilledPct100", "100% filled (RC: min(set-size, dynmap-size))");
PGM_REG_PROFILE(&pCpuStats->StatRZSyncCR3, "/PGM/CPU%u/RZ/SyncCR3", "Profiling of the PGMSyncCR3() body.");
PGM_REG_PROFILE(&pCpuStats->StatRZSyncCR3Handlers, "/PGM/CPU%u/RZ/SyncCR3/Handlers", "Profiling of the PGMSyncCR3() update handler section.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3Global, "/PGM/CPU%u/RZ/SyncCR3/Global", "The number of global CR3 syncs.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3NotGlobal, "/PGM/CPU%u/RZ/SyncCR3/NotGlobal", "The number of non-global CR3 syncs.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstCacheHit, "/PGM/CPU%u/RZ/SyncCR3/DstChacheHit", "The number of times we got some kind of a cache hit.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstFreed, "/PGM/CPU%u/RZ/SyncCR3/DstFreed", "The number of times we've had to free a shadow entry.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstFreedSrcNP, "/PGM/CPU%u/RZ/SyncCR3/DstFreedSrcNP", "The number of times we've had to free a shadow entry for which the source entry was not present.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstNotPresent, "/PGM/CPU%u/RZ/SyncCR3/DstNotPresent", "The number of times we've encountered a not present shadow entry for a present guest entry.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstSkippedGlobalPD, "/PGM/CPU%u/RZ/SyncCR3/DstSkippedGlobalPD", "The number of times a global page directory wasn't flushed.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncCR3DstSkippedGlobalPT, "/PGM/CPU%u/RZ/SyncCR3/DstSkippedGlobalPT", "The number of times a page table with only global entries wasn't flushed.");
PGM_REG_PROFILE(&pCpuStats->StatRZSyncPT, "/PGM/CPU%u/RZ/SyncPT", "Profiling of the pfnSyncPT() body.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncPTFailed, "/PGM/CPU%u/RZ/SyncPT/Failed", "The number of times pfnSyncPT() failed.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncPagePDNAs, "/PGM/CPU%u/RZ/SyncPagePDNAs", "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
PGM_REG_COUNTER(&pCpuStats->StatRZSyncPagePDOutOfSync, "/PGM/CPU%u/RZ/SyncPagePDOutOfSync", "The number of time we've encountered an out-of-sync PD in SyncPage.");
PGM_REG_COUNTER(&pCpuStats->StatRZAccessedPage, "/PGM/CPU%u/RZ/AccessedPage", "The number of pages marked not present for accessed bit emulation.");
PGM_REG_PROFILE(&pCpuStats->StatRZDirtyBitTracking, "/PGM/CPU%u/RZ/DirtyPage", "Profiling the dirty bit tracking in CheckPageFault().");
PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPage, "/PGM/CPU%u/RZ/DirtyPage/Mark", "The number of pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPageBig, "/PGM/CPU%u/RZ/DirtyPage/MarkBig", "The number of 4MB pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPageSkipped, "/PGM/CPU%u/RZ/DirtyPage/Skipped", "The number of pages already dirty or readonly.");
PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPageTrap, "/PGM/CPU%u/RZ/DirtyPage/Trap", "The number of traps generated for dirty bit tracking.");
PGM_REG_COUNTER(&pCpuStats->StatRZDirtyPageStale, "/PGM/CPU%u/RZ/DirtyPage/Stale", "The number of traps generated for dirty bit tracking (stale tlb entries).");
PGM_REG_COUNTER(&pCpuStats->StatRZDirtiedPage, "/PGM/CPU%u/RZ/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses.");
PGM_REG_COUNTER(&pCpuStats->StatRZDirtyTrackRealPF, "/PGM/CPU%u/RZ/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking.");
PGM_REG_COUNTER(&pCpuStats->StatRZPageAlreadyDirty, "/PGM/CPU%u/RZ/DirtyPage/AlreadySet", "The number of pages already marked dirty because of write accesses.");
PGM_REG_PROFILE(&pCpuStats->StatRZInvalidatePage, "/PGM/CPU%u/RZ/InvalidatePage", "PGMInvalidatePage() profiling.");
PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4KBPages, "/PGM/CPU%u/RZ/InvalidatePage/4KBPages", "The number of times PGMInvalidatePage() was called for a 4KB page.");
PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4MBPages, "/PGM/CPU%u/RZ/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page.");
PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePage4MBPagesSkip, "/PGM/CPU%u/RZ/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDMappings, "/PGM/CPU%u/RZ/InvalidatePage/PDMappings", "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNAs, "/PGM/CPU%u/RZ/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDNPs, "/PGM/CPU%u/RZ/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory.");
PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePagePDOutOfSync, "/PGM/CPU%u/RZ/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
PGM_REG_COUNTER(&pCpuStats->StatRZInvalidatePageSkipped, "/PGM/CPU%u/RZ/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncSupervisor, "/PGM/CPU%u/RZ/OutOfSync/SuperVisor", "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncUser, "/PGM/CPU%u/RZ/OutOfSync/User", "Number of traps due to pages out of sync (P) and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncSupervisorWrite,"/PGM/CPU%u/RZ/OutOfSync/SuperVisorWrite", "Number of traps due to pages out of sync (RW) and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncUserWrite, "/PGM/CPU%u/RZ/OutOfSync/UserWrite", "Number of traps due to pages out of sync (RW) and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pCpuStats->StatRZPageOutOfSyncBallloon, "/PGM/CPU%u/RZ/OutOfSync/Balloon", "The number of times a ballooned page was accessed (read).");
PGM_REG_PROFILE(&pCpuStats->StatRZPrefetch, "/PGM/CPU%u/RZ/Prefetch", "PGMPrefetchPage profiling.");
PGM_REG_PROFILE(&pCpuStats->StatRZFlushTLB, "/PGM/CPU%u/RZ/FlushTLB", "Profiling of the PGMFlushTLB() body.");
PGM_REG_COUNTER(&pCpuStats->StatRZFlushTLBNewCR3, "/PGM/CPU%u/RZ/FlushTLB/NewCR3", "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
PGM_REG_COUNTER(&pCpuStats->StatRZFlushTLBNewCR3Global, "/PGM/CPU%u/RZ/FlushTLB/NewCR3Global", "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
PGM_REG_COUNTER(&pCpuStats->StatRZFlushTLBSameCR3, "/PGM/CPU%u/RZ/FlushTLB/SameCR3", "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
PGM_REG_COUNTER(&pCpuStats->StatRZFlushTLBSameCR3Global, "/PGM/CPU%u/RZ/FlushTLB/SameCR3Global", "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
PGM_REG_PROFILE(&pCpuStats->StatRZGstModifyPage, "/PGM/CPU%u/RZ/GstModifyPage", "Profiling of the PGMGstModifyPage() body.");
PGM_REG_PROFILE(&pCpuStats->StatR3SyncCR3, "/PGM/CPU%u/R3/SyncCR3", "Profiling of the PGMSyncCR3() body.");
PGM_REG_PROFILE(&pCpuStats->StatR3SyncCR3Handlers, "/PGM/CPU%u/R3/SyncCR3/Handlers", "Profiling of the PGMSyncCR3() update handler section.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3Global, "/PGM/CPU%u/R3/SyncCR3/Global", "The number of global CR3 syncs.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3NotGlobal, "/PGM/CPU%u/R3/SyncCR3/NotGlobal", "The number of non-global CR3 syncs.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstCacheHit, "/PGM/CPU%u/R3/SyncCR3/DstChacheHit", "The number of times we got some kind of a cache hit.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstFreed, "/PGM/CPU%u/R3/SyncCR3/DstFreed", "The number of times we've had to free a shadow entry.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstFreedSrcNP, "/PGM/CPU%u/R3/SyncCR3/DstFreedSrcNP", "The number of times we've had to free a shadow entry for which the source entry was not present.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstNotPresent, "/PGM/CPU%u/R3/SyncCR3/DstNotPresent", "The number of times we've encountered a not present shadow entry for a present guest entry.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstSkippedGlobalPD, "/PGM/CPU%u/R3/SyncCR3/DstSkippedGlobalPD", "The number of times a global page directory wasn't flushed.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncCR3DstSkippedGlobalPT, "/PGM/CPU%u/R3/SyncCR3/DstSkippedGlobalPT", "The number of times a page table with only global entries wasn't flushed.");
PGM_REG_PROFILE(&pCpuStats->StatR3SyncPT, "/PGM/CPU%u/R3/SyncPT", "Profiling of the pfnSyncPT() body.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncPTFailed, "/PGM/CPU%u/R3/SyncPT/Failed", "The number of times pfnSyncPT() failed.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncPagePDNAs, "/PGM/CPU%u/R3/SyncPagePDNAs", "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
PGM_REG_COUNTER(&pCpuStats->StatR3SyncPagePDOutOfSync, "/PGM/CPU%u/R3/SyncPagePDOutOfSync", "The number of time we've encountered an out-of-sync PD in SyncPage.");
PGM_REG_COUNTER(&pCpuStats->StatR3AccessedPage, "/PGM/CPU%u/R3/AccessedPage", "The number of pages marked not present for accessed bit emulation.");
PGM_REG_PROFILE(&pCpuStats->StatR3DirtyBitTracking, "/PGM/CPU%u/R3/DirtyPage", "Profiling the dirty bit tracking in CheckPageFault().");
PGM_REG_COUNTER(&pCpuStats->StatR3DirtyPage, "/PGM/CPU%u/R3/DirtyPage/Mark", "The number of pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pCpuStats->StatR3DirtyPageBig, "/PGM/CPU%u/R3/DirtyPage/MarkBig", "The number of 4MB pages marked read-only for dirty bit tracking.");
PGM_REG_COUNTER(&pCpuStats->StatR3DirtyPageSkipped, "/PGM/CPU%u/R3/DirtyPage/Skipped", "The number of pages already dirty or readonly.");
PGM_REG_COUNTER(&pCpuStats->StatR3DirtyPageTrap, "/PGM/CPU%u/R3/DirtyPage/Trap", "The number of traps generated for dirty bit tracking.");
PGM_REG_COUNTER(&pCpuStats->StatR3DirtiedPage, "/PGM/CPU%u/R3/DirtyPage/SetDirty", "The number of pages marked dirty because of write accesses.");
PGM_REG_COUNTER(&pCpuStats->StatR3DirtyTrackRealPF, "/PGM/CPU%u/R3/DirtyPage/RealPF", "The number of real pages faults during dirty bit tracking.");
PGM_REG_COUNTER(&pCpuStats->StatR3PageAlreadyDirty, "/PGM/CPU%u/R3/DirtyPage/AlreadySet", "The number of pages already marked dirty because of write accesses.");
PGM_REG_PROFILE(&pCpuStats->StatR3InvalidatePage, "/PGM/CPU%u/R3/InvalidatePage", "PGMInvalidatePage() profiling.");
PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4KBPages, "/PGM/CPU%u/R3/InvalidatePage/4KBPages", "The number of times PGMInvalidatePage() was called for a 4KB page.");
PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4MBPages, "/PGM/CPU%u/R3/InvalidatePage/4MBPages", "The number of times PGMInvalidatePage() was called for a 4MB page.");
PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePage4MBPagesSkip, "/PGM/CPU%u/R3/InvalidatePage/4MBPagesSkip","The number of times PGMInvalidatePage() skipped a 4MB page.");
PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDMappings, "/PGM/CPU%u/R3/InvalidatePage/PDMappings", "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNAs, "/PGM/CPU%u/R3/InvalidatePage/PDNAs", "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDNPs, "/PGM/CPU%u/R3/InvalidatePage/PDNPs", "The number of times PGMInvalidatePage() was called for a not present page directory.");
PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePagePDOutOfSync, "/PGM/CPU%u/R3/InvalidatePage/PDOutOfSync", "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
PGM_REG_COUNTER(&pCpuStats->StatR3InvalidatePageSkipped, "/PGM/CPU%u/R3/InvalidatePage/Skipped", "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
PGM_REG_COUNTER(&pCpuStats->StatR3PageOutOfSyncSupervisor, "/PGM/CPU%u/R3/OutOfSync/SuperVisor", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pCpuStats->StatR3PageOutOfSyncUser, "/PGM/CPU%u/R3/OutOfSync/User", "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
PGM_REG_COUNTER(&pCpuStats->StatR3PageOutOfSyncBallloon, "/PGM/CPU%u/R3/OutOfSync/Balloon", "The number of times a ballooned page was accessed (read).");
PGM_REG_PROFILE(&pCpuStats->StatR3Prefetch, "/PGM/CPU%u/R3/Prefetch", "PGMPrefetchPage profiling.");
PGM_REG_PROFILE(&pCpuStats->StatR3FlushTLB, "/PGM/CPU%u/R3/FlushTLB", "Profiling of the PGMFlushTLB() body.");
PGM_REG_COUNTER(&pCpuStats->StatR3FlushTLBNewCR3, "/PGM/CPU%u/R3/FlushTLB/NewCR3", "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
PGM_REG_COUNTER(&pCpuStats->StatR3FlushTLBNewCR3Global, "/PGM/CPU%u/R3/FlushTLB/NewCR3Global", "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
PGM_REG_COUNTER(&pCpuStats->StatR3FlushTLBSameCR3, "/PGM/CPU%u/R3/FlushTLB/SameCR3", "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
PGM_REG_COUNTER(&pCpuStats->StatR3FlushTLBSameCR3Global, "/PGM/CPU%u/R3/FlushTLB/SameCR3Global", "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
PGM_REG_PROFILE(&pCpuStats->StatR3GstModifyPage, "/PGM/CPU%u/R3/GstModifyPage", "Profiling of the PGMGstModifyPage() body.");
return VINF_SUCCESS;
int rc;
&& (pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT))
AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT));
return rc;
int rc;
pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTRC + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);
pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
pVCpu->pgm.s.fGst32BitMbzBigPdeMask = ((uint32_t)(fMbzPageFrameMask >> (32 - 13)) & X86_PDE4M_PG_HIGH_MASK)
return rc;
LogFlow(("PGMR3Relocate %RGv to %RGv\n", pVM->pgm.s.GCPtrCR3Mapping, pVM->pgm.s.GCPtrCR3Mapping + offDelta));
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, true, pgmR3RelocatePhysHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->VirtHandlers, true, pgmR3RelocateVirtHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3RelocateHyperVirtHandler, &offDelta);
#ifdef VBOX_WITH_STATISTICS
int rc;
#ifdef DEBUG
#ifdef VBOX_STRICT
static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
if (pszArgs)
fGuest = true;
fShadow = true;
fHost = true;
if (fGuest)
if (fShadow)
pHlp->pfnPrintf(pHlp, "Shadow paging mode: %s\n", PGMGetModeName(pVM->aCpus[0].pgm.s.enmShadowMode));
if (fHost)
const char *psz;
pVM,
Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
iPD,
iPD,
return rc;
switch (pgmMode)
case PGMMODE_PAE:
case PGMMODE_AMD64:
int rc;
pVM->pgm.s.paModeData = (PPGMMODEDATA)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMMODEDATA) * pgmModeDataMaxIndex());
#ifdef VBOX_WITH_64_BITS_GUESTS
#ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_32_BIT:
# ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
# ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
# ifdef VBOX_WITH_64_BITS_GUESTS
AssertFailed();
#ifdef VBOX_WITH_64_BITS_GUESTS
return VINF_SUCCESS;
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher)
switch (enmGuestMode)
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifdef DEBUG_bird
case PGMMODE_32_BIT:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifdef DEBUG_bird
case PGMMODE_PAE:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
return PGMMODE_INVALID;
return enmShadowMode;
Log(("PGMR3ChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
PGMMODE enmShadowMode = pgmR3CalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode, &enmSwitcher);
#ifdef VBOX_WITH_RAW_MODE
return rc;
/* The nested shadow paging mode for AMD-V does change when running 64 bits guests on 32 bits hosts; typically PAE <-> AMD64 */
const bool fForceShwEnterExit = false;
LogFlow(("PGMR3ChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
return rc;
LogFlow(("PGMR3ChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
return rc;
int rc;
switch (enmShadowMode)
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
return VERR_INTERNAL_ERROR;
return rc;
switch (enmGuestMode)
case PGMMODE_REAL:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_PROTECTED:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_32_BIT:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_PAE_NX:
case PGMMODE_PAE:
N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (General/Advanced)"));
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_32_BIT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
#ifdef VBOX_WITH_64_BITS_GUESTS
case PGMMODE_AMD64_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
default: AssertFailed(); break;
return rc;
/* Exit the current shadow paging mode as well; nested paging and EPT use a root CR3 which will get flushed here. */
return rc;
("%RHp != %RHp %s\n", (RTHCPHYS)CPUMGetHyperCR3(pVCpu), PGMGetHyperCR3(pVCpu), PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
return rc;
static int pgmR3DumpHierarchyHCPaePT(PVM pVM, PX86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
return VINF_SUCCESS;
static int pgmR3DumpHierarchyHCPaePD(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPD)
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory at HCPhys=%RHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
pHlp->pfnPrintf(pHlp, "%0*llx error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
if (pPT)
return rc;
static int pgmR3DumpHierarchyHCPaePDPT(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPDPT)
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory pointer table at HCPhys=%RHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
if (fLongMode)
i << X86_PDPT_SHIFT,
int rc2 = pgmR3DumpHierarchyHCPaePD(pVM, Pdpe.u & X86_PDPE_PG_MASK, u64Address + ((uint64_t)i << X86_PDPT_SHIFT),
return rc;
static int pgmR3DumpHierarchyHcPaePML4(PVM pVM, RTHCPHYS HCPhys, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPML4)
return VERR_INVALID_PARAMETER;
uint64_t u64Address = ((uint64_t)i << X86_PML4_SHIFT) | (((uint64_t)i >> (X86_PML4_SHIFT - X86_PDPT_SHIFT - 1)) * 0xffff000000000000ULL);
int rc2 = pgmR3DumpHierarchyHCPaePDPT(pVM, Pml4e.u & X86_PML4E_PG_MASK, u64Address, cr4, true, cMaxDepth - 1, pHlp);
return rc;
return VINF_SUCCESS;
int pgmR3DumpHierarchyHC32BitPD(PVM pVM, uint32_t cr3, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPD)
pHlp->pfnPrintf(pHlp, "Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK);
return VERR_INVALID_PARAMETER;
pHlp->pfnPrintf(pHlp, "%08x error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
if (pPT)
pHlp->pfnPrintf(pHlp, "%08x error! Page table at %#x was not found in the page pool!\n", u32Address, HCPhys);
return rc;
Log(("Found %RGp at %RGv -> flags=%llx\n", PhysSearch, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), fPageShw));
return VINF_SUCCESS;
bool fLongMode = false;
|| !pPD)
return VERR_INVALID_PARAMETER;
if (pPT)
return rc;
VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint64_t cr3, uint64_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pHlp)
if (!cMaxDepth)
return VINF_SUCCESS;
if (fLongMode)
return pgmR3DumpHierarchyHCPaePDPT(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, 0, cr4, false, cMaxDepth, pHlp);
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdError(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
if (!cArgs)
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
#ifdef VBOX_STRICT
static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
static DECLCALLBACK(int) pgmR3CmdPhysToFile(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: Invalid 2nd argument '%s', must be 'nozero'.\n", paArgs[1].u.pszString);
int rc = RTFileOpen(&hFile, paArgs[0].u.pszString, RTFILE_O_WRITE | RTFILE_O_CREATE_REPLACE | RTFILE_O_DENY_WRITE);
return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: RTFileOpen(,'%s',) -> %Rrc.\n", paArgs[0].u.pszString, rc);
if (fIncZeroPgs)
case PGMPAGETYPE_RAM:
case PGMPAGETYPE_ROM:
case PGMPAGETYPE_MMIO2:
void const *pvPage;
pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: PGMPhysGCPhys2CCPtrReadOnly -> %Rrc at GCPhys=%RGp.\n", rc, GCPhys);
AssertFailed();
case PGMPAGETYPE_MMIO:
if (fIncZeroPgs)
pPage++;
return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Successfully saved physical memory to '%s'.\n", paArgs[0].u.pszString);
return VINF_SUCCESS;
typedef struct PGMCHECKINTARGS
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys->Core.KeyLast > pCur->Core.Key),
pArgs->pPrevPhys, pArgs->pPrevPhys->Core.Key, pArgs->pPrevPhys->Core.KeyLast, pArgs->pPrevPhys->pszDesc,
static DECLCALLBACK(int) pgmR3CheckIntegrityVirtHandlerNode(PAVLROGCPTRNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGv-%RGv %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevVirt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevVirt->Core.KeyLast > pCur->Core.Key),
pArgs->pPrevVirt, pArgs->pPrevVirt->Core.Key, pArgs->pPrevVirt->Core.KeyLast, pArgs->pPrevVirt->pszDesc,
AssertReleaseMsg(pCur->aPhysToVirt[iPage].offVirtHandler == -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage]),
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysToVirtHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp\n", pCur, pCur->Core.Key, pCur->Core.KeyLast));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
AssertReleaseMsg((pCur->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD),
pCur2 = (PPGMPHYS2VIRTHANDLER)((intptr_t)pCur + (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
AssertReleaseMsg((pCur2->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == PGMPHYS2VIRTHANDLER_IN_TREE,
int cErrors = 0;
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, true, pgmR3CheckIntegrityPhysHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, false, pgmR3CheckIntegrityPhysHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, true, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, false, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);