PGM.cpp revision 9f1e0743f565b1975bb4efd7311b30cfa0d3e384
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
1ef682c649d3f26392a8b7a091337adb78ddd87dvboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * available from http://www.virtualbox.org. This file is free software;
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * you can redistribute it and/or modify it under the terms of the GNU
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * General Public License (GPL) as published by the Free Software
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * additional information or have any questions.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync/** @page pg_pgm PGM - The Page Manager and Monitor
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @see grp_pgm,
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @ref pg_pgm_pool,
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @ref pg_pgm_phys.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @section sec_pgm_modes Paging Modes
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * There are three memory contexts: Host Context (HC), Guest Context (GC)
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * and intermediate context. When talking about paging HC can also be refered to
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * as "host paging", and GC refered to as "shadow paging".
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * is defined by the host operating system. The mode used in the shadow paging mode
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * depends on the host paging mode and what the mode the guest is currently in. The
543d2dd34b3036927165be4f72c0cccd85daaa33vboxsync * following relation between the two is defined:
543d2dd34b3036927165be4f72c0cccd85daaa33vboxsync * @verbatim
543d2dd34b3036927165be4f72c0cccd85daaa33vboxsync Host > 32-bit | PAE | AMD64 |
b2640405e06105d868b5fc8f7b676bb680884380vboxsync Guest | | | |
b2640405e06105d868b5fc8f7b676bb680884380vboxsync ==v================================
b2640405e06105d868b5fc8f7b676bb680884380vboxsync 32-bit 32-bit PAE PAE
b2640405e06105d868b5fc8f7b676bb680884380vboxsync -------|--------|--------|--------|
b2640405e06105d868b5fc8f7b676bb680884380vboxsync PAE PAE PAE PAE
b2640405e06105d868b5fc8f7b676bb680884380vboxsync -------|--------|--------|--------|
b2640405e06105d868b5fc8f7b676bb680884380vboxsync AMD64 AMD64 AMD64 AMD64
b2640405e06105d868b5fc8f7b676bb680884380vboxsync -------|--------|--------|--------| @endverbatim
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * All configuration except those in the diagonal (upper left) are expected to
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * require special effort from the switcher (i.e. a bit slower).
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @section sec_pgm_shw The Shadow Memory Context
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Because of guest context mappings requires PDPT and PML4 entries to allow
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * writing on AMD64, the two upper levels will have fixed flags whatever the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * guest is thinking of using there. So, when shadowing the PD level we will
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * calculate the effective flags of PD and all the higher levels. In legacy
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * PAE mode this only applies to the PWT and PCD bits (the rest are
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * ignored/reserved/MBZ). We will ignore those bits for the present.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @section sec_pgm_int The Intermediate Memory Context
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The world switch goes thru an intermediate memory context which purpose it is
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * to provide different mappings of the switcher code. All guest mappings are also
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * present in this context.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The switcher code is mapped at the same location as on the host, at an
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * identity mapped location (physical equals virtual address), and at the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * hypervisor location. The identity mapped location is for when the world
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * switches that involves disabling paging.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * simplifies switching guest CPU mode and consistency at the cost of more
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * code to do the work. All memory use for those page tables is located below
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 4GB (this includes page tables for guest context mappings).
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @subsection subsec_pgm_int_gc Guest Context Mappings
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * During assignment and relocation of a guest context mapping the intermediate
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * memory context is used to verify the new location.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Guest context mappings are currently restricted to below 4GB, for reasons
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * of simplicity. This may change when we implement AMD64 support.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @section sec_pgm_misc Misc
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @subsection subsec_pgm_misc_diff Differences Between Legacy PAE and Long Mode PAE
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * The differences between legacy PAE and long mode PAE are:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * usual meanings while 6 is ignored (AMD). This means that upon switching to
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * legacy PAE mode we'll have to clear these bits and when going to long mode
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * they must be set. This applies to both intermediate and shadow contexts,
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * however we don't need to do it for the intermediate one since we're
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * executing with CR0.WP at that time.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * a page aligned one is required.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @section sec_pgm_handlers Access Handlers
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Placeholder.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @subsection sec_pgm_handlers_virt Virtual Access Handlers
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Placeholder.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @subsection sec_pgm_handlers_virt Virtual Access Handlers
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * We currently implement three types of virtual access handlers: ALL, WRITE
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * and HYPERVISOR (WRITE). See PGMVIRTHANDLERTYPE for some more details.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The HYPERVISOR access handlers is kept in a separate tree since it doesn't apply
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * to physical pages (PGMTREES::HyperVirtHandlers) and only needs to be consulted in
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * a special \#PF case. The ALL and WRITE are in the PGMTREES::VirtHandlers tree, the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * rest of this section is going to be about these handlers.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * We'll go thru the life cycle of a handler and try make sense of it all, don't know
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * how successfull this is gonna be...
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 1. A handler is registered thru the PGMR3HandlerVirtualRegister and
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * PGMHandlerVirtualRegisterEx APIs. We check for conflicting virtual handlers
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * and create a new node that is inserted into the AVL tree (range key). Then
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * a full PGM resync is flagged (clear pool, sync cr3, update virtual bit of PGMPAGE).
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 2. The following PGMSyncCR3/SyncCR3 operation will first make invoke HandlerVirtualUpdate.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 2a. HandlerVirtualUpdate will will lookup all the pages covered by virtual handlers
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * via the current guest CR3 and update the physical page -> virtual handler
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * translation. Needless to say, this doesn't exactly scale very well. If any changes
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * are detected, it will flag a virtual bit update just like we did on registration.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * PGMPHYS pages with changes will have their virtual handler state reset to NONE.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 2b. The virtual bit update process will iterate all the pages covered by all the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * virtual handlers and update the PGMPAGE virtual handler state to the max of all
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * virtual handlers on that page.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * 2c. Back in SyncCR3 we will now flush the entire shadow page cache to make sure
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * we don't miss any alias mappings of the monitored pages.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 2d. SyncCR3 will then proceed with syncing the CR3 table.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * 3. \#PF(np,read) on a page in the range. This will cause it to be synced
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * read-only and resumed if it's a WRITE handler. If it's an ALL handler we
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * will call the handlers like in the next step. If the physical mapping has
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * changed we will - some time in the future - perform a handler callback
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * (optional) and update the physical -> virtual handler cache.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * 4. \#PF(,write) on a page in the range. This will cause the handler to
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * be invoked.
2347f07aa55c4c0035118a2a1634e5187a3ffdf4vboxsync * 5. The guest invalidates the page and changes the physical backing or
eb4f1fa4c357485330370c0eaba27e5a2af7d9c4vboxsync * unmaps it. This should cause the invalidation callback to be invoked
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * (it might not yet be 100% perfect). Exactly what happens next... is
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * this where we mess up and end up out of sync for a while?
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 6. The handler is deregistered by the client via PGMHandlerVirtualDeregister.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * We will then set all PGMPAGEs in the physical -> virtual handler cache for
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * this handler to NONE and trigger a full PGM resync (basically the same
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * as int step 1). Which means 2 is executed again.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * @subsubsection sub_sec_pgm_handler_virt_todo TODOs
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * There is a bunch of things that needs to be done to make the virtual handlers
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * work 100% correctly and work more efficiently.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * The first bit hasn't been implemented yet because it's going to slow the
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * whole mess down even more, and besides it seems to be working reliably for
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * our current uses. OTOH, some of the optimizations might end up more or less
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * implementing the missing bits, so we'll see.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * On the optimization side, the first thing to do is to try avoid unnecessary
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * cache flushing. Then try team up with the shadowing code to track changes
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * in mappings by means of access to them (shadow in), updates to shadows pages,
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * invlpg, and shadow PT discarding (perhaps).
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * Some idea that have popped up for optimization for current and new features:
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * - bitmap indicating where there are virtual handlers installed.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * (4KB => 2**20 pages, page 2**12 => covers 32-bit address space 1:1!)
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * - Further optimize this by min/max (needs min/max avl getters).
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * - Shadow page table entry bit (if any left)?
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync/** @page pg_pgm_phys PGM Physical Guest Memory Management
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * Objectives:
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - Guest RAM over-commitment using memory ballooning,
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * zero pages and general page sharing.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - Moving or mirroring a VM onto a different physical machine.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * @subsection subsec_pgmPhys_Definitions Definitions
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * Allocation chunk - A RTR0MemObjAllocPhysNC object and the tracking
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * machinery assoicated with it.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * @subsection subsec_pgmPhys_AllocPage Allocating a page.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Initially we map *all* guest memory to the (per VM) zero page, which
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * means that none of the read functions will cause pages to be allocated.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Exception, access bit in page tables that have been shared. This must
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * be handled, but we must also make sure PGMGst*Modify doesn't make
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * unnecessary modifications.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * Allocation points:
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - PGMPhysSimpleWriteGCPhys and PGMPhysWrite.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - Replacing a zero page mapping at \#PF.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - Replacing a shared page mapping at \#PF.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - ROM registration (currently MMR3RomRegister).
3383321ffc6907012f92f16b26b026908de7fe7fvboxsync * - VM restore (pgmR3Load).
3383321ffc6907012f92f16b26b026908de7fe7fvboxsync * For the first three it would make sense to keep a few pages handy
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * until we've reached the max memory commitment for the VM.
69dc1a8b612032236f9f04033e7c53ce7a478f17vboxsync * For the ROM registration, we know exactly how many pages we need
485e602154df33e5466e0dcca16d8f97914ce41dvboxsync * and will request these from ring-0. For restore, we will save
485e602154df33e5466e0dcca16d8f97914ce41dvboxsync * the number of non-zero pages in the saved state and allocate
3383321ffc6907012f92f16b26b026908de7fe7fvboxsync * them up front. This would allow the ring-0 component to refuse
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * the request if the isn't sufficient memory available for VM use.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * Btw. for both ROM and restore allocations we won't be requiring
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * zeroed pages as they are going to be filled instantly.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @subsection subsec_pgmPhys_FreePage Freeing a page
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * There are a few points where a page can be freed:
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * - After being replaced by the zero page.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - After being replaced by a shared page.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * - After being ballooned by the guest additions.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - At reset.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * - At restore.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * When freeing one or more pages they will be returned to the ring-0
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * component and replaced by the zero page.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * The reasoning for clearing out all the pages on reset is that it will
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * return us to the exact same state as on power on, and may thereby help
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * us reduce the memory load on the system. Further it might have a
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * (temporary) positive influence on memory fragmentation (@see subsec_pgmPhys_Fragmentation).
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * On restore, as mention under the allocation topic, pages should be
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * freed / allocated depending on how many is actually required by the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * new VM state. The simplest approach is to do like on reset, and free
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * all non-ROM pages and then allocate what we need.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * A measure to prevent some fragmentation, would be to let each allocation
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * chunk have some affinity towards the VM having allocated the most pages
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * from it. Also, try make sure to allocate from allocation chunks that
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * are almost full. Admittedly, both these measures might work counter to
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * our intentions and its probably not worth putting a lot of effort,
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * cpu time or memory into this.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * @subsection subsec_pgmPhys_SharePage Sharing a page
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * The basic idea is that there there will be a idle priority kernel
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * thread walking the non-shared VM pages hashing them and looking for
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * pages with the same checksum. If such pages are found, it will compare
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * them byte-by-byte to see if they actually are identical. If found to be
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * identical it will allocate a shared page, copy the content, check that
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * the page didn't change while doing this, and finally request both the
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * VMs to use the shared page instead. If the page is all zeros (special
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * checksum and byte-by-byte check) it will request the VM that owns it
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * to replace it with the zero page.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * To make this efficient, we will have to make sure not to try share a page
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * that will change its contents soon. This part requires the most work.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * A simple idea would be to request the VM to write monitor the page for
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * a while to make sure it isn't modified any time soon. Also, it may
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * make sense to skip pages that are being write monitored since this
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * information is readily available to the thread if it works on the
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * per-VM guest memory structures (presently called PGMRAMRANGE).
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * @subsection subsec_pgmPhys_Fragmentation Fragmentation Concerns and Counter Measures
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * The pages are organized in allocation chunks in ring-0, this is a necessity
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * if we wish to have an OS agnostic approach to this whole thing. (On Linux we
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * could easily work on a page-by-page basis if we liked. Whether this is possible
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * or efficient on NT I don't quite know.) Fragmentation within these chunks may
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * become a problem as part of the idea here is that we wish to return memory to
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * the host system.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * For instance, starting two VMs at the same time, they will both allocate the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * guest memory on-demand and if permitted their page allocations will be
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * intermixed. Shut down one of the two VMs and it will be difficult to return
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * any memory to the host system because the page allocation for the two VMs are
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * mixed up in the same allocation chunks.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * To further complicate matters, when pages are freed because they have been
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * ballooned or become shared/zero the whole idea is that the page is supposed
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * to be reused by another VM or returned to the host system. This will cause
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * allocation chunks to contain pages belonging to different VMs and prevent
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * returning memory to the host when one of those VM shuts down.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * The only way to really deal with this problem is to move pages. This can
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * either be done at VM shutdown and or by the idle priority worker thread
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * that will be responsible for finding sharable/zero pages. The mechanisms
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * involved for coercing a VM to move a page (or to do it for it) will be
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * the same as when telling it to share/zero a page.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * @subsection subsec_pgmPhys_Tracking Tracking Structures And Their Cost
3383321ffc6907012f92f16b26b026908de7fe7fvboxsync * There's a difficult balance between keeping the per-page tracking structures
3383321ffc6907012f92f16b26b026908de7fe7fvboxsync * (global and guest page) easy to use and keeping them from eating too much
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * memory. We have limited virtual memory resources available when operating in
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * 32-bit kernel space (on 64-bit there'll it's quite a different story). The
69dc1a8b612032236f9f04033e7c53ce7a478f17vboxsync * tracking structures will be attemted designed such that we can deal with up
485e602154df33e5466e0dcca16d8f97914ce41dvboxsync * to 32GB of memory on a 32-bit system and essentially unlimited on 64-bit ones.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * @subsubsection subsubsec_pgmPhys_Tracking_Kernel Kernel Space
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * @see pg_GMM
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @subsubsection subsubsec_pgmPhys_Tracking_PerVM Per-VM
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Fixed info is the physical address of the page (HCPhys) and the page id
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * (described above). Theoretically we'll need 48(-12) bits for the HCPhys part.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * Today we've restricting ourselves to 40(-12) bits because this is the current
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * restrictions of all AMD64 implementations (I think Barcelona will up this
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * to 48(-12) bits, not that it really matters) and I needed the bits for
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * tracking mappings of a page. 48-12 = 36. That leaves 28 bits, which means a
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * decent range for the page id: 2^(28+12) = 1024TB.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * In additions to these, we'll have to keep maintaining the page flags as we
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * currently do. Although it wouldn't harm to optimize these quite a bit, like
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * for instance the ROM shouldn't depend on having a write handler installed
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * in order for it to become read-only. A RO/RW bit should be considered so
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * that the page syncing code doesn't have to mess about checking multiple
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * flag combinations (ROM || RW handler || write monitored) in order to
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * figure out how to setup a shadow PTE. But this of course, is second
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * priority at present. Current this requires 12 bits, but could probably
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * be optimized to ~8.
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * Then there's the 24 bits used to track which shadow page tables are
0c802efc285bf77b849eaf660a9c18a0e7f62445vboxsync * currently mapping a page for the purpose of speeding up physical
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * access handlers, and thereby the page pool cache. More bit for this
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * purpose wouldn't hurt IIRC.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Then there is a new bit in which we need to record what kind of page
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * this is, shared, zero, normal or write-monitored-normal. This'll
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * require 2 bits. One bit might be needed for indicating whether a
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * write monitored page has been written to. And yet another one or
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * two for tracking migration status. 3-4 bits total then.
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * Whatever is left will can be used to record the sharabilitiy of a
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * page. The page checksum will not be stored in the per-VM table as
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * the idle thread will not be permitted to do modifications to it.
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * It will instead have to keep its own working set of potentially
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * shareable pages and their check sums and stuff.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * For the present we'll keep the current packing of the
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * PGMRAMRANGE::aHCPhys to keep the changes simple, only of course,
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * we'll have to change it to a struct with a total of 128-bits at
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * our disposal.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The initial layout will be like this:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @verbatim
b2640405e06105d868b5fc8f7b676bb680884380vboxsync RTHCPHYS HCPhys; The current stuff.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync 63:40 Current shadow PT tracking stuff.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync 39:12 The physical page frame number.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync 11:0 The current flags.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync uint32_t u28PageId : 28; The page id.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u2State : 2; The page state { zero, shared, normal, write monitored }.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t fWrittenTo : 1; Whether a write monitored page was written to.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u1Reserved : 1; Reserved for later.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u32Reserved; Reserved for later, mostly sharing stats.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync @endverbatim
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The final layout will be something like this:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @verbatim
b2640405e06105d868b5fc8f7b676bb680884380vboxsync RTHCPHYS HCPhys; The current stuff.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync 63:48 High page id (12+).
b2640405e06105d868b5fc8f7b676bb680884380vboxsync 47:12 The physical page frame number.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync 11:0 Low page id.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t fReadOnly : 1; Whether it's readonly page (rom or monitored in some way).
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u3Type : 3; The page type {RESERVED, MMIO, MMIO2, ROM, shadowed ROM, RAM}.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u2PhysMon : 2; Physical access handler type {none, read, write, all}.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u2VirtMon : 2; Virtual access handler type {none, read, write, all}..
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u2State : 2; The page state { zero, shared, normal, write monitored }.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t fWrittenTo : 1; Whether a write monitored page was written to.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u20Reserved : 20; Reserved for later, mostly sharing stats.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync uint32_t u32Tracking; The shadow PT tracking stuff, roughly.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync @endverbatim
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Cost wise, this means we'll double the cost for guest memory. There isn't anyway
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * around that I'm afraid. It means that the cost of dealing out 32GB of memory
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * to one or more VMs is: (32GB >> PAGE_SHIFT) * 16 bytes, or 128MBs. Or another
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * example, the VM heap cost when assigning 1GB to a VM will be: 4MB.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * A couple of cost examples for the total cost per-VM + kernel.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 32-bit Windows and 32-bit linux:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 1GB guest ram, 256K pages: 4MB + 2MB(+) = 6MB
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 4GB guest ram, 1M pages: 16MB + 8MB(+) = 24MB
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 32GB guest ram, 8M pages: 128MB + 64MB(+) = 192MB
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 64-bit Windows and 64-bit linux:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 1GB guest ram, 256K pages: 4MB + 3MB(+) = 7MB
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 4GB guest ram, 1M pages: 16MB + 12MB(+) = 28MB
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 32GB guest ram, 8M pages: 128MB + 96MB(+) = 224MB
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * UPDATE - 2007-09-27:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Will need a ballooned flag/state too because we cannot
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * trust the guest 100% and reporting the same page as ballooned more
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * than once will put the GMM off balance.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * @subsection subsec_pgmPhys_Serializing Serializing Access
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Initially, we'll try a simple scheme:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * - The per-VM RAM tracking structures (PGMRAMRANGE) is only modified
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * by the EMT thread of that VM while in the pgm critsect.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * - Other threads in the VM process that needs to make reliable use of
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * the per-VM RAM tracking structures will enter the critsect.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * - No process external thread or kernel thread will ever try enter
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * the pgm critical section, as that just won't work.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * - The idle thread (and similar threads) doesn't not need 100% reliable
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * data when performing it tasks as the EMT thread will be the one to
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * do the actual changes later anyway. So, as long as it only accesses
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * the main ram range, it can do so by somehow preventing the VM from
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * being destroyed while it works on it...
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * - The over-commitment management, including the allocating/freeing
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * chunks, is serialized by a ring-0 mutex lock (a fast one since the
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * more mundane mutex implementation is broken on Linux).
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * - A separeate mutex is protecting the set of allocation chunks so
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * that pages can be shared or/and freed up while some other VM is
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * allocating more chunks. This mutex can be take from under the other
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * one, but not the otherway around.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @subsection subsec_pgmPhys_Request VM Request interface
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * When in ring-0 it will become necessary to send requests to a VM so it can
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * for instance move a page while defragmenting during VM destroy. The idle
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * thread will make use of this interface to request VMs to setup shared
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * pages and to perform write monitoring of pages.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * I would propose an interface similar to the current VMReq interface, similar
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * in that it doesn't require locking and that the one sending the request may
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * wait for completion if it wishes to. This shouldn't be very difficult to
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * The requests themselves are also pretty simple. They are basically:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Check that some precondition is still true.
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * -# Do the update.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Update all shadow page tables involved with the page.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The 3rd step is identical to what we're already doing when updating a
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * physical handler, see pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @section sec_pgmPhys_MappingCaches Mapping Caches
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * In order to be able to map in and out memory and to be able to support
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * guest with more RAM than we've got virtual address space, we'll employing
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * a mapping cache. There is already a tiny one for GC (see PGMGCDynMapGCPageEx)
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * and we'll create a similar one for ring-0 unless we decide to setup a dedicate
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * memory context for the HWACCM execution.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @subsection subsec_pgmPhys_MappingCaches_R3 Ring-3
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * We've considered implementing the ring-3 mapping cache page based but found
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * that this was bother some when one had to take into account TLBs+SMP and
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * portability (missing the necessary APIs on several platforms). There were
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * also some performance concerns with this approach which hadn't quite been
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * worked out.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Instead, we'll be mapping allocation chunks into the VM process. This simplifies
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * matters greatly quite a bit since we don't need to invent any new ring-0 stuff,
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * only some minor RTR0MEMOBJ mapping stuff. The main concern here is that mapping
ce95f18112057771f68abe58df709edd7c467db1vboxsync * compared to the previous idea is that mapping or unmapping a 1MB chunk is more
ce95f18112057771f68abe58df709edd7c467db1vboxsync * costly than a single page, although how much more costly is uncertain. We'll
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * try address this by using a very big cache, preferably bigger than the actual
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * VM RAM size if possible. The current VM RAM sizes should give some idea for
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * 32-bit boxes, while on 64-bit we can probably get away with employing an
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * unlimited cache.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The cache have to parts, as already indicated, the ring-3 side and the
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * ring-0 side.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The ring-0 will be tied to the page allocator since it will operate on the
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * memory objects it contains. It will therefore require the first ring-0 mutex
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * discussed in @ref subsec_pgmPhys_Serializing. We
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * some double house keeping wrt to who has mapped what I think, since both
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * VMMR0.r0 and RTR0MemObj will keep track of mapping relataions
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The ring-3 part will be protected by the pgm critsect. For simplicity, we'll
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * require anyone that desires to do changes to the mapping cache to do that
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * from within this critsect. Alternatively, we could employ a separate critsect
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * for serializing changes to the mapping cache as this would reduce potential
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * contention with other threads accessing mappings unrelated to the changes
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * that are in process. We can see about this later, contention will show
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * up in the statistics anyway, so it'll be simple to tell.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The organization of the ring-3 part will be very much like how the allocation
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * chunks are organized in ring-0, that is in an AVL tree by chunk id. To avoid
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * having to walk the tree all the time, we'll have a couple of lookaside entries
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * like in we do for I/O ports and MMIO in IOM.
083344b49cc7370da15d3cb7e3a9c9cb2d8dfbb0vboxsync * The simplified flow of a PGMPhysRead/Write function:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Enter the PGM critsect.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Lookup GCPhys in the ram ranges and get the Page ID.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Calc the Allocation Chunk ID from the Page ID.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Check the lookaside entries and then the AVL tree for the Chunk ID.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * If not found in cache:
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Call ring-0 and request it to be mapped and supply
8ad79874169cc981a694a15e8a806b9a39133673vboxsync * a chunk to be unmapped if the cache is maxed out already.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Insert the new mapping into the AVL tree (id + R3 address).
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Update the relevant lookaside entry and return the mapping address.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Do the read/write according to monitoring flags and everything.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * -# Leave the critsect.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @section sec_pgmPhys_Fallback Fallback
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Current all the "second tier" hosts will not support the RTR0MemObjAllocPhysNC
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * API and thus require a fallback.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * So, when RTR0MemObjAllocPhysNC returns VERR_NOT_SUPPORTED the page allocator
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * will return to the ring-3 caller (and later ring-0) and asking it to seed
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * the page allocator with some fresh pages (VERR_GMM_SEED_ME). Ring-3 will
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * then perform an SUPPageAlloc(cbChunk >> PAGE_SHIFT) call and make a
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * "SeededAllocPages" call to ring-0.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * The first time ring-0 sees the VERR_NOT_SUPPORTED failure it will disable
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * all page sharing (zero page detection will continue). It will also force
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * all allocations to come from the VM which seeded the page. Both these
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * measures are taken to make sure that there will never be any need for
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * mapping anything into ring-3 - everything will be mapped already.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Whether we'll continue to use the current MM locked memory management
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * for this I don't quite know (I'd prefer not to and just ditch that all
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * togther), we'll see what's simplest to do.
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * @section sec_pgmPhys_Changes Changes
b2640405e06105d868b5fc8f7b676bb680884380vboxsync * Breakdown of the changes involved?
b2640405e06105d868b5fc8f7b676bb680884380vboxsync/** Saved state data unit version. */
b2640405e06105d868b5fc8f7b676bb680884380vboxsync/*******************************************************************************
b2640405e06105d868b5fc8f7b676bb680884380vboxsync* Header Files *
#include "PGMInternal.h"
#ifdef DEBUG_bird
#ifdef VBOX_STRICT
static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
#ifdef VBOX_WITH_STATISTICS
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
# ifdef VBOX_STRICT
static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
#ifdef VBOX_WITH_DEBUGGER
/* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
#ifdef VBOX_STRICT
{ "pgmsyncalways", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSyncAlways, "", "Toggle permanent CR3 syncing." },
#include "PGMShw.h"
#include "PGMGst.h"
#include "PGMBth.h"
#include "PGMGst.h"
#include "PGMBth.h"
#include "PGMGst.h"
#include "PGMBth.h"
#include "PGMShw.h"
#include "PGMBth.h"
#include "PGMBth.h"
#include "PGMBth.h"
#include "PGMGst.h"
#include "PGMBth.h"
#include "PGMShw.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMGst.h"
# include "PGMBth.h"
#include "PGMShw.h"
#include "PGMBth.h"
#include "PGMBth.h"
#include "PGMBth.h"
#include "PGMBth.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMBth.h"
#include "PGMShw.h"
#include "PGMBth.h"
#include "PGMBth.h"
#include "PGMBth.h"
#include "PGMBth.h"
#ifdef VBOX_WITH_64_BITS_GUESTS
# include "PGMBth.h"
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifdef VBOX_STRICT
cbRam = 0;
return rc;
return rc;
"Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing's given.",
STAM_REL_REG(pVM, &pVM->pgm.s.cGuestModeChanges, STAMTYPE_COUNTER, "/PGM/cGuestModeChanges", STAMUNIT_OCCURENCES, "Number of guest mode changes.");
#ifdef VBOX_WITH_STATISTICS
#ifdef VBOX_WITH_DEBUGGER
static bool fRegisteredCmds = false;
if (!fRegisteredCmds)
fRegisteredCmds = true;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
return rc;
return VERR_NO_PAGE_MEMORY;
AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePDPT != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPT & PAGE_OFFSET_MASK));
AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK) && pVM->pgm.s.HCPhysInterPaePML4 < 0xffffffff);
pVM->pgm.s.pInterPaePDPT64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[0] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[1]);
AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[1] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[2]);
AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[2] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[3]);
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
return VERR_NO_PAGE_MEMORY;
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
pVM->pgm.s.pShwPaePdptR3->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT | pVM->pgm.s.aHCPhysPaePDs[i];
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
pVM->pgm.s.aHCPhysPaePDs[0], pVM->pgm.s.aHCPhysPaePDs[1], pVM->pgm.s.aHCPhysPaePDs[2], pVM->pgm.s.aHCPhysPaePDs[3],
LogRel(("Debug: apInterPTs={%RHp,%RHp} apInterPaePTs={%RHp,%RHp} apInterPaePDs={%RHp,%RHp,%RHp,%RHp} pInterPaePDPT64=%RHp\n",
MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
return VINF_SUCCESS;
return rc;
#ifdef VBOX_WITH_STATISTICS
STAM_REG(pVM, &pPGM->cAllPages, STAMTYPE_U32, "/PGM/Page/cAllPages", STAMUNIT_OCCURENCES, "The total number of pages.");
STAM_REG(pVM, &pPGM->cPrivatePages, STAMTYPE_U32, "/PGM/Page/cPrivatePages", STAMUNIT_OCCURENCES, "The number of private pages.");
STAM_REG(pVM, &pPGM->cSharedPages, STAMTYPE_U32, "/PGM/Page/cSharedPages", STAMUNIT_OCCURENCES, "The number of shared pages.");
STAM_REG(pVM, &pPGM->cZeroPages, STAMTYPE_U32, "/PGM/Page/cZeroPages", STAMUNIT_OCCURENCES, "The number of zero backed pages.");
STAM_REG(pVM, &pPGM->ChunkR3Map.c, STAMTYPE_U32, "/PGM/ChunkR3Map/c", STAMUNIT_OCCURENCES, "Number of mapped chunks.");
STAM_REG(pVM, &pPGM->ChunkR3Map.cMax, STAMTYPE_U32, "/PGM/ChunkR3Map/cMax", STAMUNIT_OCCURENCES, "Maximum number of mapped chunks.");
#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
STAM_REG(pVM, &pPGM->StatTrackVirgin, STAMTYPE_COUNTER, "/PGM/Track/Virgin", STAMUNIT_OCCURENCES, "The number of first time shadowings");
STAM_REG(pVM, &pPGM->StatTrackAliased, STAMTYPE_COUNTER, "/PGM/Track/Aliased", STAMUNIT_OCCURENCES, "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
STAM_REG(pVM, &pPGM->StatTrackAliasedMany, STAMTYPE_COUNTER, "/PGM/Track/AliasedMany", STAMUNIT_OCCURENCES, "The number of times we're tracking using cRef2.");
STAM_REG(pVM, &pPGM->StatTrackAliasedLots, STAMTYPE_COUNTER, "/PGM/Track/AliasedLots", STAMUNIT_OCCURENCES, "The number of times we're hitting pages which has overflowed cRef2");
STAM_REG(pVM, &pPGM->StatTrackOverflows, STAMTYPE_COUNTER, "/PGM/Track/Overflows", STAMUNIT_OCCURENCES, "The number of times the extent list grows to long.");
STAM_REG(pVM, &pPGM->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Track/Deref", STAMUNIT_OCCURENCES, "Profiling of SyncPageWorkerTrackDeref (expensive).");
STAMR3RegisterF(pVM, &pPGM->StatSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
STAMR3RegisterF(pVM, &pPGM->StatSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
STAM_REG(pVM, &pPGM->StatR3DetectedConflicts, STAMTYPE_COUNTER, "/PGM/R3/DetectedConflicts", STAMUNIT_OCCURENCES, "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
STAM_REG(pVM, &pPGM->StatR3ResolveConflict, STAMTYPE_PROFILE, "/PGM/R3/ResolveConflict", STAMUNIT_TICKS_PER_CALL, "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
STAM_REG(pVM, &pPGM->StatR3GuestPDWrite, STAMTYPE_COUNTER, "/PGM/R3/PDWrite", STAMUNIT_OCCURENCES, "The total number of times pgmHCGuestPDWriteHandler() was called.");
STAM_REG(pVM, &pPGM->StatR3GuestPDWriteConflict, STAMTYPE_COUNTER, "/PGM/R3/PDWriteConflict", STAMUNIT_OCCURENCES, "The number of times pgmHCGuestPDWriteHandler() detected a conflict.");
STAM_REG(pVM, &pPGM->StatR3DynRamTotal, STAMTYPE_COUNTER, "/PGM/DynAlloc/TotalAlloc", STAMUNIT_MEGABYTES, "Allocated MBs of guest ram.");
STAM_REG(pVM, &pPGM->StatR3DynRamGrow, STAMTYPE_COUNTER, "/PGM/DynAlloc/Grow", STAMUNIT_OCCURENCES, "Nr of pgmr3PhysGrowRange calls.");
STAM_REG(pVM, &pPGM->StatR0DynMapMigrateInvlPg, STAMTYPE_COUNTER, "/PGM/R0/DynMapMigrateInvlPg", STAMUNIT_OCCURENCES, "invlpg count in PGMDynMapMigrateAutoSet.");
STAM_REG(pVM, &pPGM->StatR0DynMapGCPageInl, STAMTYPE_PROFILE, "/PGM/R0/DynMapPageGCPageInl", STAMUNIT_TICKS_PER_CALL, "Calls to pgmR0DynMapGCPageInlined.");
STAM_REG(pVM, &pPGM->StatR0DynMapGCPageInlHits, STAMTYPE_COUNTER, "/PGM/R0/DynMapPageGCPageInl/Hits", STAMUNIT_OCCURENCES, "Hash table lookup hits.");
STAM_REG(pVM, &pPGM->StatR0DynMapGCPageInlMisses, STAMTYPE_COUNTER, "/PGM/R0/DynMapPageGCPageInl/Misses", STAMUNIT_OCCURENCES, "Misses that falls back to code common with PGMDynMapHCPage.");
STAM_REG(pVM, &pPGM->StatR0DynMapGCPageInlRamHits, STAMTYPE_COUNTER, "/PGM/R0/DynMapPageGCPageInl/RamHits", STAMUNIT_OCCURENCES, "1st ram range hits.");
STAM_REG(pVM, &pPGM->StatR0DynMapGCPageInlRamMisses, STAMTYPE_COUNTER, "/PGM/R0/DynMapPageGCPageInl/RamMisses", STAMUNIT_OCCURENCES, "1st ram range misses, takes slow path.");
STAM_REG(pVM, &pPGM->StatR0DynMapHCPageInl, STAMTYPE_PROFILE, "/PGM/R0/DynMapPageHCPageInl", STAMUNIT_TICKS_PER_CALL, "Calls to pgmR0DynMapHCPageInlined.");
STAM_REG(pVM, &pPGM->StatR0DynMapHCPageInlHits, STAMTYPE_COUNTER, "/PGM/R0/DynMapPageHCPageInl/Hits", STAMUNIT_OCCURENCES, "Hash table lookup hits.");
STAM_REG(pVM, &pPGM->StatR0DynMapHCPageInlMisses, STAMTYPE_COUNTER, "/PGM/R0/DynMapPageHCPageInl/Misses", STAMUNIT_OCCURENCES, "Misses that falls back to code common with PGMDynMapHCPage.");
STAM_REG(pVM, &pPGM->StatR0DynMapPage, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage", STAMUNIT_OCCURENCES, "Calls to pgmR0DynMapPage");
STAM_REG(pVM, &pPGM->StatR0DynMapSetOptimize, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/SetOptimize", STAMUNIT_OCCURENCES, "Calls to pgmDynMapOptimizeAutoSet.");
STAM_REG(pVM, &pPGM->StatR0DynMapSetSearchFlushes, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/SetSearchFlushes",STAMUNIT_OCCURENCES, "Set search restorting to subset flushes.");
STAM_REG(pVM, &pPGM->StatR0DynMapSetSearchHits, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/SetSearchHits", STAMUNIT_OCCURENCES, "Set search hits.");
STAM_REG(pVM, &pPGM->StatR0DynMapSetSearchMisses, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/SetSearchMisses", STAMUNIT_OCCURENCES, "Set search misses.");
STAM_REG(pVM, &pPGM->StatR0DynMapHCPage, STAMTYPE_PROFILE, "/PGM/R0/DynMapPage/HCPage", STAMUNIT_TICKS_PER_CALL, "Calls to PGMDynMapHCPage (ring-0).");
STAM_REG(pVM, &pPGM->StatR0DynMapPageHits0, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/Hits0", STAMUNIT_OCCURENCES, "Hits at iPage+0");
STAM_REG(pVM, &pPGM->StatR0DynMapPageHits1, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/Hits1", STAMUNIT_OCCURENCES, "Hits at iPage+1");
STAM_REG(pVM, &pPGM->StatR0DynMapPageHits2, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/Hits2", STAMUNIT_OCCURENCES, "Hits at iPage+2");
STAM_REG(pVM, &pPGM->StatR0DynMapPageInvlPg, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/InvlPg", STAMUNIT_OCCURENCES, "invlpg count in pgmR0DynMapPageSlow.");
STAM_REG(pVM, &pPGM->StatR0DynMapPageSlow, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/Slow", STAMUNIT_OCCURENCES, "Calls to pgmR0DynMapPageSlow - subtract this from pgmR0DynMapPage to get 1st level hits.");
STAM_REG(pVM, &pPGM->StatR0DynMapPageSlowLoopHits, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/SlowLoopHits" , STAMUNIT_OCCURENCES, "Hits in the loop path.");
STAM_REG(pVM, &pPGM->StatR0DynMapPageSlowLoopMisses, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/SlowLoopMisses", STAMUNIT_OCCURENCES, "Misses in the loop path. NonLoopMisses = Slow - SlowLoopHit - SlowLoopMisses");
//STAM_REG(pVM, &pPGM->StatR0DynMapPageSlowLostHits, STAMTYPE_COUNTER, "/PGM/R0/DynMapPage/SlowLostHits", STAMUNIT_OCCURENCES, "Lost hits.");
STAM_REG(pVM, &pPGM->StatR0DynMapSubsets, STAMTYPE_COUNTER, "/PGM/R0/Subsets", STAMUNIT_OCCURENCES, "Times PGMDynMapPushAutoSubset was called.");
STAM_REG(pVM, &pPGM->StatR0DynMapPopFlushes, STAMTYPE_COUNTER, "/PGM/R0/SubsetPopFlushes", STAMUNIT_OCCURENCES, "Times PGMDynMapPopAutoSubset flushes the subset.");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[0], STAMTYPE_COUNTER, "/PGM/R0/SetSize000..09", STAMUNIT_OCCURENCES, "00-09% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[1], STAMTYPE_COUNTER, "/PGM/R0/SetSize010..19", STAMUNIT_OCCURENCES, "10-19% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[2], STAMTYPE_COUNTER, "/PGM/R0/SetSize020..29", STAMUNIT_OCCURENCES, "20-29% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[3], STAMTYPE_COUNTER, "/PGM/R0/SetSize030..39", STAMUNIT_OCCURENCES, "30-39% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[4], STAMTYPE_COUNTER, "/PGM/R0/SetSize040..49", STAMUNIT_OCCURENCES, "40-49% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[5], STAMTYPE_COUNTER, "/PGM/R0/SetSize050..59", STAMUNIT_OCCURENCES, "50-59% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[6], STAMTYPE_COUNTER, "/PGM/R0/SetSize060..69", STAMUNIT_OCCURENCES, "60-69% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[7], STAMTYPE_COUNTER, "/PGM/R0/SetSize070..79", STAMUNIT_OCCURENCES, "70-79% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[8], STAMTYPE_COUNTER, "/PGM/R0/SetSize080..89", STAMUNIT_OCCURENCES, "80-89% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[9], STAMTYPE_COUNTER, "/PGM/R0/SetSize090..99", STAMUNIT_OCCURENCES, "90-99% filled");
STAM_REG(pVM, &pPGM->aStatR0DynMapSetSize[10], STAMTYPE_COUNTER, "/PGM/R0/SetSize100", STAMUNIT_OCCURENCES, "100% filled");
STAM_REG(pVM, &pPGM->StatRCDynMapCacheHits, STAMTYPE_COUNTER, "/PGM/RC/DynMapCache/Hits" , STAMUNIT_OCCURENCES, "Number of dynamic page mapping cache hits.");
STAM_REG(pVM, &pPGM->StatRCDynMapCacheMisses, STAMTYPE_COUNTER, "/PGM/RC/DynMapCache/Misses" , STAMUNIT_OCCURENCES, "Number of dynamic page mapping cache misses.");
STAM_REG(pVM, &pPGM->StatRCInvlPgConflict, STAMTYPE_COUNTER, "/PGM/RC/InvlPgConflict", STAMUNIT_OCCURENCES, "Number of times PGMInvalidatePage() detected a mapping conflict.");
STAM_REG(pVM, &pPGM->StatRCInvlPgSyncMonCR3, STAMTYPE_COUNTER, "/PGM/RC/InvlPgSyncMonitorCR3", STAMUNIT_OCCURENCES, "Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3.");
STAM_REG(pVM, &pPGM->StatRZTrap0e, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMTrap0eHandler() body.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTimeCheckPageFault, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time/CheckPageFault", STAMUNIT_TICKS_PER_CALL, "Profiling of checking for dirty/access emulation faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTimeSyncPT, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of lazy page table syncing.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTimeMapping, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time/Mapping", STAMUNIT_TICKS_PER_CALL, "Profiling of checking virtual mappings.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTimeOutOfSync, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time/OutOfSync", STAMUNIT_TICKS_PER_CALL, "Profiling of out of sync page handling.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTimeHandlers, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of checking handlers.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2CSAM, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/CSAM", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is CSAM.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2DirtyAndAccessed, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/DirtyAndAccessedBits", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2GuestTrap, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/GuestTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a guest trap.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2HndPhys, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/HandlerPhysical", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a physical handler.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2HndVirt, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/HandlerVirtual", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a virtual handler.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2HndUnhandled, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/HandlerUnhandled", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2Misc, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/Misc", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is not known.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2OutOfSync, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/OutOfSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2OutOfSyncHndPhys, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/OutOfSyncHndPhys", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2OutOfSyncHndVirt, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/OutOfSyncHndVirt", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2OutOfSyncHndObs, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/OutOfSyncObsHnd", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
STAM_REG(pVM, &pPGM->StatRZTrap0eTime2SyncPT, STAMTYPE_PROFILE, "/PGM/RZ/Trap0e/Time2/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
STAM_REG(pVM, &pPGM->StatRZTrap0eConflicts, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Conflicts", STAMUNIT_OCCURENCES, "The number of times #PF was caused by an undetected conflict.");
STAM_REG(pVM, &pPGM->StatRZTrap0eHandlersMapping, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Handlers/Mapping", STAMUNIT_OCCURENCES, "Number of traps due to access handlers in mappings.");
STAM_REG(pVM, &pPGM->StatRZTrap0eHandlersOutOfSync, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Handlers/OutOfSync", STAMUNIT_OCCURENCES, "Number of traps due to out-of-sync handled pages.");
STAM_REG(pVM, &pPGM->StatRZTrap0eHandlersPhysical, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Handlers/Physical", STAMUNIT_OCCURENCES, "Number of traps due to physical access handlers.");
STAM_REG(pVM, &pPGM->StatRZTrap0eHandlersVirtual, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Handlers/Virtual", STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers.");
STAM_REG(pVM, &pPGM->StatRZTrap0eHandlersVirtualByPhys, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Handlers/VirtualByPhys", STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers by physical address.");
STAM_REG(pVM, &pPGM->StatRZTrap0eHandlersVirtualUnmarked,STAMTYPE_COUNTER,"/PGM/RZ/Trap0e/Handlers/VirtualUnmarked",STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers by virtual address (without proper physical flags).");
STAM_REG(pVM, &pPGM->StatRZTrap0eHandlersUnhandled, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Handlers/Unhandled", STAMUNIT_OCCURENCES, "Number of traps due to access outside range of monitored page(s).");
STAM_REG(pVM, &pPGM->StatRZTrap0eHandlersInvalid, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Handlers/Invalid", STAMUNIT_OCCURENCES, "Number of traps due to access to invalid physical memory.");
STAM_REG(pVM, &pPGM->StatRZTrap0eUSNotPresentRead, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/User/NPRead", STAMUNIT_OCCURENCES, "Number of user mode not present read page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eUSNotPresentWrite, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/User/NPWrite", STAMUNIT_OCCURENCES, "Number of user mode not present write page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eUSWrite, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/User/Write", STAMUNIT_OCCURENCES, "Number of user mode write page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eUSReserved, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/User/Reserved", STAMUNIT_OCCURENCES, "Number of user mode reserved bit page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eUSNXE, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/User/NXE", STAMUNIT_OCCURENCES, "Number of user mode NXE page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eUSRead, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/User/Read", STAMUNIT_OCCURENCES, "Number of user mode read page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eSVNotPresentRead, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/Supervisor/NPRead", STAMUNIT_OCCURENCES, "Number of supervisor mode not present read page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eSVNotPresentWrite, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/Supervisor/NPWrite", STAMUNIT_OCCURENCES, "Number of supervisor mode not present write page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eSVWrite, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/Supervisor/Write", STAMUNIT_OCCURENCES, "Number of supervisor mode write page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eSVReserved, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/Supervisor/Reserved", STAMUNIT_OCCURENCES, "Number of supervisor mode reserved bit page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eSNXE, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/Err/Supervisor/NXE", STAMUNIT_OCCURENCES, "Number of supervisor mode NXE page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eGuestPF, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/GuestPF", STAMUNIT_OCCURENCES, "Number of real guest page faults.");
STAM_REG(pVM, &pPGM->StatRZTrap0eGuestPFUnh, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/GuestPF/Unhandled", STAMUNIT_OCCURENCES, "Number of real guest page faults from the 'unhandled' case.");
STAM_REG(pVM, &pPGM->StatRZTrap0eGuestPFMapping, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/GuestPF/InMapping", STAMUNIT_OCCURENCES, "Number of real guest page faults in a mapping.");
STAM_REG(pVM, &pPGM->StatRZTrap0eWPEmulInRZ, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/WP/InRZ", STAMUNIT_OCCURENCES, "Number of guest page faults due to X86_CR0_WP emulation.");
STAM_REG(pVM, &pPGM->StatRZTrap0eWPEmulToR3, STAMTYPE_COUNTER, "/PGM/RZ/Trap0e/WP/ToR3", STAMUNIT_OCCURENCES, "Number of guest page faults due to X86_CR0_WP emulation (forward to R3 for emulation).");
STAMR3RegisterF(pVM, &pPGM->StatRZTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
STAM_REG(pVM, &pPGM->StatRZGuestCR3WriteHandled, STAMTYPE_COUNTER, "/PGM/RZ/CR3WriteHandled", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 change was successfully handled.");
STAM_REG(pVM, &pPGM->StatRZGuestCR3WriteUnhandled, STAMTYPE_COUNTER, "/PGM/RZ/CR3WriteUnhandled", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 change was passed back to the recompiler.");
STAM_REG(pVM, &pPGM->StatRZGuestCR3WriteConflict, STAMTYPE_COUNTER, "/PGM/RZ/CR3WriteConflict", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 monitoring detected a conflict.");
STAM_REG(pVM, &pPGM->StatRZGuestROMWriteHandled, STAMTYPE_COUNTER, "/PGM/RZ/ROMWriteHandled", STAMUNIT_OCCURENCES, "The number of times the Guest ROM change was successfully handled.");
STAM_REG(pVM, &pPGM->StatRZGuestROMWriteUnhandled, STAMTYPE_COUNTER, "/PGM/RZ/ROMWriteUnhandled", STAMUNIT_OCCURENCES, "The number of times the Guest ROM change was passed back to the recompiler.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3, STAMTYPE_PROFILE, "/PGM/RZ/SyncCR3", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() body.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3Handlers, STAMTYPE_PROFILE, "/PGM/RZ/SyncCR3/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() update handler section.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3HandlerVirtualUpdate, STAMTYPE_PROFILE, "/PGM/RZ/SyncCR3/Handlers/VirtualUpdate", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler updates.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3HandlerVirtualReset, STAMTYPE_PROFILE, "/PGM/RZ/SyncCR3/Handlers/VirtualReset", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler resets.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3Global, STAMTYPE_COUNTER, "/PGM/RZ/SyncCR3/Global", STAMUNIT_OCCURENCES, "The number of global CR3 syncs.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3NotGlobal, STAMTYPE_COUNTER, "/PGM/RZ/SyncCR3/NotGlobal", STAMUNIT_OCCURENCES, "The number of non-global CR3 syncs.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3DstCacheHit, STAMTYPE_COUNTER, "/PGM/RZ/SyncCR3/DstChacheHit", STAMUNIT_OCCURENCES, "The number of times we got some kind of a cache hit.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3DstFreed, STAMTYPE_COUNTER, "/PGM/RZ/SyncCR3/DstFreed", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3DstFreedSrcNP, STAMTYPE_COUNTER, "/PGM/RZ/SyncCR3/DstFreedSrcNP", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry for which the source entry was not present.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3DstNotPresent, STAMTYPE_COUNTER, "/PGM/RZ/SyncCR3/DstNotPresent", STAMUNIT_OCCURENCES, "The number of times we've encountered a not present shadow entry for a present guest entry.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3DstSkippedGlobalPD, STAMTYPE_COUNTER, "/PGM/RZ/SyncCR3/DstSkippedGlobalPD", STAMUNIT_OCCURENCES, "The number of times a global page directory wasn't flushed.");
STAM_REG(pVM, &pPGM->StatRZSyncCR3DstSkippedGlobalPT, STAMTYPE_COUNTER, "/PGM/RZ/SyncCR3/DstSkippedGlobalPT", STAMUNIT_OCCURENCES, "The number of times a page table with only global entries wasn't flushed.");
STAM_REG(pVM, &pPGM->StatRZSyncPT, STAMTYPE_PROFILE, "/PGM/RZ/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the pfnSyncPT() body.");
STAM_REG(pVM, &pPGM->StatRZSyncPTFailed, STAMTYPE_COUNTER, "/PGM/RZ/SyncPT/Failed", STAMUNIT_OCCURENCES, "The number of times pfnSyncPT() failed.");
STAM_REG(pVM, &pPGM->StatRZSyncPT4K, STAMTYPE_COUNTER, "/PGM/RZ/SyncPT/4K", STAMUNIT_OCCURENCES, "Nr of 4K PT syncs");
STAM_REG(pVM, &pPGM->StatRZSyncPT4M, STAMTYPE_COUNTER, "/PGM/RZ/SyncPT/4M", STAMUNIT_OCCURENCES, "Nr of 4M PT syncs");
STAM_REG(pVM, &pPGM->StatRZSyncPagePDNAs, STAMTYPE_COUNTER, "/PGM/RZ/SyncPagePDNAs", STAMUNIT_OCCURENCES, "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
STAM_REG(pVM, &pPGM->StatRZSyncPagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/RZ/SyncPagePDOutOfSync", STAMUNIT_OCCURENCES, "The number of time we've encountered an out-of-sync PD in SyncPage.");
STAM_REG(pVM, &pPGM->StatRZAccessedPage, STAMTYPE_COUNTER, "/PGM/RZ/AccessedPage", STAMUNIT_OCCURENCES, "The number of pages marked not present for accessed bit emulation.");
STAM_REG(pVM, &pPGM->StatRZDirtyBitTracking, STAMTYPE_PROFILE, "/PGM/RZ/DirtyPage", STAMUNIT_TICKS_PER_CALL, "Profiling the dirty bit tracking in CheckPageFault().");
STAM_REG(pVM, &pPGM->StatRZDirtyPage, STAMTYPE_COUNTER, "/PGM/RZ/DirtyPage/Mark", STAMUNIT_OCCURENCES, "The number of pages marked read-only for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatRZDirtyPageBig, STAMTYPE_COUNTER, "/PGM/RZ/DirtyPage/MarkBig", STAMUNIT_OCCURENCES, "The number of 4MB pages marked read-only for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatRZDirtyPageSkipped, STAMTYPE_COUNTER, "/PGM/RZ/DirtyPage/Skipped", STAMUNIT_OCCURENCES, "The number of pages already dirty or readonly.");
STAM_REG(pVM, &pPGM->StatRZDirtyPageTrap, STAMTYPE_COUNTER, "/PGM/RZ/DirtyPage/Trap", STAMUNIT_OCCURENCES, "The number of traps generated for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatRZDirtiedPage, STAMTYPE_COUNTER, "/PGM/RZ/DirtyPage/SetDirty", STAMUNIT_OCCURENCES, "The number of pages marked dirty because of write accesses.");
STAM_REG(pVM, &pPGM->StatRZDirtyTrackRealPF, STAMTYPE_COUNTER, "/PGM/RZ/DirtyPage/RealPF", STAMUNIT_OCCURENCES, "The number of real pages faults during dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatRZPageAlreadyDirty, STAMTYPE_COUNTER, "/PGM/RZ/DirtyPage/AlreadySet", STAMUNIT_OCCURENCES, "The number of pages already marked dirty because of write accesses.");
STAM_REG(pVM, &pPGM->StatRZInvalidatePage, STAMTYPE_PROFILE, "/PGM/RZ/InvalidatePage", STAMUNIT_TICKS_PER_CALL, "PGMInvalidatePage() profiling.");
STAM_REG(pVM, &pPGM->StatRZInvalidatePage4KBPages, STAMTYPE_COUNTER, "/PGM/RZ/InvalidatePage/4KBPages", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a 4KB page.");
STAM_REG(pVM, &pPGM->StatRZInvalidatePage4MBPages, STAMTYPE_COUNTER, "/PGM/RZ/InvalidatePage/4MBPages", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a 4MB page.");
STAM_REG(pVM, &pPGM->StatRZInvalidatePage4MBPagesSkip, STAMTYPE_COUNTER, "/PGM/RZ/InvalidatePage/4MBPagesSkip",STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() skipped a 4MB page.");
STAM_REG(pVM, &pPGM->StatRZInvalidatePagePDMappings, STAMTYPE_COUNTER, "/PGM/RZ/InvalidatePage/PDMappings", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
STAM_REG(pVM, &pPGM->StatRZInvalidatePagePDNAs, STAMTYPE_COUNTER, "/PGM/RZ/InvalidatePage/PDNAs", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
STAM_REG(pVM, &pPGM->StatRZInvalidatePagePDNPs, STAMTYPE_COUNTER, "/PGM/RZ/InvalidatePage/PDNPs", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a not present page directory.");
STAM_REG(pVM, &pPGM->StatRZInvalidatePagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/RZ/InvalidatePage/PDOutOfSync", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
STAM_REG(pVM, &pPGM->StatRZInvalidatePageSkipped, STAMTYPE_COUNTER, "/PGM/RZ/InvalidatePage/Skipped", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
STAM_REG(pVM, &pPGM->StatRZVirtHandlerSearchByPhys, STAMTYPE_PROFILE, "/PGM/RZ/VirtHandlerSearchByPhys", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmHandlerVirtualFindByPhysAddr.");
STAM_REG(pVM, &pPGM->StatRZPhysHandlerReset, STAMTYPE_COUNTER, "/PGM/RZ/PhysHandlerReset", STAMUNIT_OCCURENCES, "The number of times PGMHandlerPhysicalReset is called.");
STAM_REG(pVM, &pPGM->StatRZPageOutOfSyncSupervisor, STAMTYPE_COUNTER, "/PGM/RZ/OutOfSync/SuperVisor", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
STAM_REG(pVM, &pPGM->StatRZPageOutOfSyncUser, STAMTYPE_COUNTER, "/PGM/RZ/OutOfSync/User", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
STAM_REG(pVM, &pPGM->StatRZPrefetch, STAMTYPE_PROFILE, "/PGM/RZ/Prefetch", STAMUNIT_TICKS_PER_CALL, "PGMPrefetchPage profiling.");
STAM_REG(pVM, &pPGM->StatRZChunkR3MapTlbHits, STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbHitsRZ", STAMUNIT_OCCURENCES, "TLB hits.");
STAM_REG(pVM, &pPGM->StatRZChunkR3MapTlbMisses, STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbMissesRZ", STAMUNIT_OCCURENCES, "TLB misses.");
STAM_REG(pVM, &pPGM->StatRZPageMapTlbHits, STAMTYPE_COUNTER, "/PGM/RZ/Page/MapTlbHits", STAMUNIT_OCCURENCES, "TLB hits.");
STAM_REG(pVM, &pPGM->StatRZPageMapTlbMisses, STAMTYPE_COUNTER, "/PGM/RZ/Page/MapTlbMisses", STAMUNIT_OCCURENCES, "TLB misses.");
STAM_REG(pVM, &pPGM->StatRZPageReplaceShared, STAMTYPE_COUNTER, "/PGM/RZ/Page/ReplacedShared", STAMUNIT_OCCURENCES, "Times a shared page was replaced.");
STAM_REG(pVM, &pPGM->StatRZPageReplaceZero, STAMTYPE_COUNTER, "/PGM/RZ/Page/ReplacedZero", STAMUNIT_OCCURENCES, "Times the zero page was replaced.");
/// @todo STAM_REG(pVM, &pPGM->StatRZPageHandyAllocs, STAMTYPE_COUNTER, "/PGM/RZ/Page/HandyAllocs", STAMUNIT_OCCURENCES, "Number of times we've allocated more handy pages.");
STAM_REG(pVM, &pPGM->StatRZFlushTLB, STAMTYPE_PROFILE, "/PGM/RZ/FlushTLB", STAMUNIT_OCCURENCES, "Profiling of the PGMFlushTLB() body.");
STAM_REG(pVM, &pPGM->StatRZFlushTLBNewCR3, STAMTYPE_COUNTER, "/PGM/RZ/FlushTLB/NewCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
STAM_REG(pVM, &pPGM->StatRZFlushTLBNewCR3Global, STAMTYPE_COUNTER, "/PGM/RZ/FlushTLB/NewCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
STAM_REG(pVM, &pPGM->StatRZFlushTLBSameCR3, STAMTYPE_COUNTER, "/PGM/RZ/FlushTLB/SameCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
STAM_REG(pVM, &pPGM->StatRZFlushTLBSameCR3Global, STAMTYPE_COUNTER, "/PGM/RZ/FlushTLB/SameCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
STAM_REG(pVM, &pPGM->StatRZGstModifyPage, STAMTYPE_PROFILE, "/PGM/RZ/GstModifyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGstModifyPage() body.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3, STAMTYPE_PROFILE, "/PGM/R3/SyncCR3", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() body.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3Handlers, STAMTYPE_PROFILE, "/PGM/R3/SyncCR3/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() update handler section.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3HandlerVirtualUpdate, STAMTYPE_PROFILE, "/PGM/R3/SyncCR3/Handlers/VirtualUpdate", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler updates.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3HandlerVirtualReset, STAMTYPE_PROFILE, "/PGM/R3/SyncCR3/Handlers/VirtualReset", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler resets.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3Global, STAMTYPE_COUNTER, "/PGM/R3/SyncCR3/Global", STAMUNIT_OCCURENCES, "The number of global CR3 syncs.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3NotGlobal, STAMTYPE_COUNTER, "/PGM/R3/SyncCR3/NotGlobal", STAMUNIT_OCCURENCES, "The number of non-global CR3 syncs.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3DstCacheHit, STAMTYPE_COUNTER, "/PGM/R3/SyncCR3/DstChacheHit", STAMUNIT_OCCURENCES, "The number of times we got some kind of a cache hit.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3DstFreed, STAMTYPE_COUNTER, "/PGM/R3/SyncCR3/DstFreed", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3DstFreedSrcNP, STAMTYPE_COUNTER, "/PGM/R3/SyncCR3/DstFreedSrcNP", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry for which the source entry was not present.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3DstNotPresent, STAMTYPE_COUNTER, "/PGM/R3/SyncCR3/DstNotPresent", STAMUNIT_OCCURENCES, "The number of times we've encountered a not present shadow entry for a present guest entry.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3DstSkippedGlobalPD, STAMTYPE_COUNTER, "/PGM/R3/SyncCR3/DstSkippedGlobalPD", STAMUNIT_OCCURENCES, "The number of times a global page directory wasn't flushed.");
STAM_REG(pVM, &pPGM->StatR3SyncCR3DstSkippedGlobalPT, STAMTYPE_COUNTER, "/PGM/R3/SyncCR3/DstSkippedGlobalPT", STAMUNIT_OCCURENCES, "The number of times a page table with only global entries wasn't flushed.");
STAM_REG(pVM, &pPGM->StatR3SyncPT, STAMTYPE_PROFILE, "/PGM/R3/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the pfnSyncPT() body.");
STAM_REG(pVM, &pPGM->StatR3SyncPTFailed, STAMTYPE_COUNTER, "/PGM/R3/SyncPT/Failed", STAMUNIT_OCCURENCES, "The number of times pfnSyncPT() failed.");
STAM_REG(pVM, &pPGM->StatR3SyncPT4K, STAMTYPE_COUNTER, "/PGM/R3/SyncPT/4K", STAMUNIT_OCCURENCES, "Nr of 4K PT syncs");
STAM_REG(pVM, &pPGM->StatR3SyncPT4M, STAMTYPE_COUNTER, "/PGM/R3/SyncPT/4M", STAMUNIT_OCCURENCES, "Nr of 4M PT syncs");
STAM_REG(pVM, &pPGM->StatR3SyncPagePDNAs, STAMTYPE_COUNTER, "/PGM/R3/SyncPagePDNAs", STAMUNIT_OCCURENCES, "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
STAM_REG(pVM, &pPGM->StatR3SyncPagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/R3/SyncPagePDOutOfSync", STAMUNIT_OCCURENCES, "The number of time we've encountered an out-of-sync PD in SyncPage.");
STAM_REG(pVM, &pPGM->StatR3AccessedPage, STAMTYPE_COUNTER, "/PGM/R3/AccessedPage", STAMUNIT_OCCURENCES, "The number of pages marked not present for accessed bit emulation.");
STAM_REG(pVM, &pPGM->StatR3DirtyBitTracking, STAMTYPE_PROFILE, "/PGM/R3/DirtyPage", STAMUNIT_TICKS_PER_CALL, "Profiling the dirty bit tracking in CheckPageFault().");
STAM_REG(pVM, &pPGM->StatR3DirtyPage, STAMTYPE_COUNTER, "/PGM/R3/DirtyPage/Mark", STAMUNIT_OCCURENCES, "The number of pages marked read-only for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatR3DirtyPageBig, STAMTYPE_COUNTER, "/PGM/R3/DirtyPage/MarkBig", STAMUNIT_OCCURENCES, "The number of 4MB pages marked read-only for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatR3DirtyPageSkipped, STAMTYPE_COUNTER, "/PGM/R3/DirtyPage/Skipped", STAMUNIT_OCCURENCES, "The number of pages already dirty or readonly.");
STAM_REG(pVM, &pPGM->StatR3DirtyPageTrap, STAMTYPE_COUNTER, "/PGM/R3/DirtyPage/Trap", STAMUNIT_OCCURENCES, "The number of traps generated for dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatR3DirtiedPage, STAMTYPE_COUNTER, "/PGM/R3/DirtyPage/SetDirty", STAMUNIT_OCCURENCES, "The number of pages marked dirty because of write accesses.");
STAM_REG(pVM, &pPGM->StatR3DirtyTrackRealPF, STAMTYPE_COUNTER, "/PGM/R3/DirtyPage/RealPF", STAMUNIT_OCCURENCES, "The number of real pages faults during dirty bit tracking.");
STAM_REG(pVM, &pPGM->StatR3PageAlreadyDirty, STAMTYPE_COUNTER, "/PGM/R3/DirtyPage/AlreadySet", STAMUNIT_OCCURENCES, "The number of pages already marked dirty because of write accesses.");
STAM_REG(pVM, &pPGM->StatR3InvalidatePage, STAMTYPE_PROFILE, "/PGM/R3/InvalidatePage", STAMUNIT_TICKS_PER_CALL, "PGMInvalidatePage() profiling.");
STAM_REG(pVM, &pPGM->StatR3InvalidatePage4KBPages, STAMTYPE_COUNTER, "/PGM/R3/InvalidatePage/4KBPages", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a 4KB page.");
STAM_REG(pVM, &pPGM->StatR3InvalidatePage4MBPages, STAMTYPE_COUNTER, "/PGM/R3/InvalidatePage/4MBPages", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a 4MB page.");
STAM_REG(pVM, &pPGM->StatR3InvalidatePage4MBPagesSkip, STAMTYPE_COUNTER, "/PGM/R3/InvalidatePage/4MBPagesSkip",STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() skipped a 4MB page.");
STAM_REG(pVM, &pPGM->StatR3InvalidatePagePDMappings, STAMTYPE_COUNTER, "/PGM/R3/InvalidatePage/PDMappings", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict).");
STAM_REG(pVM, &pPGM->StatR3InvalidatePagePDNAs, STAMTYPE_COUNTER, "/PGM/R3/InvalidatePage/PDNAs", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a not accessed page directory.");
STAM_REG(pVM, &pPGM->StatR3InvalidatePagePDNPs, STAMTYPE_COUNTER, "/PGM/R3/InvalidatePage/PDNPs", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for a not present page directory.");
STAM_REG(pVM, &pPGM->StatR3InvalidatePagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/R3/InvalidatePage/PDOutOfSync", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was called for an out of sync page directory.");
STAM_REG(pVM, &pPGM->StatR3InvalidatePageSkipped, STAMTYPE_COUNTER, "/PGM/R3/InvalidatePage/Skipped", STAMUNIT_OCCURENCES, "The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
STAM_REG(pVM, &pPGM->StatR3VirtHandlerSearchByPhys, STAMTYPE_PROFILE, "/PGM/R3/VirtHandlerSearchByPhys", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmHandlerVirtualFindByPhysAddr.");
STAM_REG(pVM, &pPGM->StatR3PhysHandlerReset, STAMTYPE_COUNTER, "/PGM/R3/PhysHandlerReset", STAMUNIT_OCCURENCES, "The number of times PGMHandlerPhysicalReset is called.");
STAM_REG(pVM, &pPGM->StatR3PageOutOfSyncSupervisor, STAMTYPE_COUNTER, "/PGM/R3/OutOfSync/SuperVisor", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
STAM_REG(pVM, &pPGM->StatR3PageOutOfSyncUser, STAMTYPE_COUNTER, "/PGM/R3/OutOfSync/User", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync and times VerifyAccessSyncPage calls SyncPage.");
STAM_REG(pVM, &pPGM->StatR3Prefetch, STAMTYPE_PROFILE, "/PGM/R3/Prefetch", STAMUNIT_TICKS_PER_CALL, "PGMPrefetchPage profiling.");
STAM_REG(pVM, &pPGM->StatR3ChunkR3MapTlbHits, STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbHitsR3", STAMUNIT_OCCURENCES, "TLB hits.");
STAM_REG(pVM, &pPGM->StatR3ChunkR3MapTlbMisses, STAMTYPE_COUNTER, "/PGM/ChunkR3Map/TlbMissesR3", STAMUNIT_OCCURENCES, "TLB misses.");
STAM_REG(pVM, &pPGM->StatR3PageMapTlbHits, STAMTYPE_COUNTER, "/PGM/R3/Page/MapTlbHits", STAMUNIT_OCCURENCES, "TLB hits.");
STAM_REG(pVM, &pPGM->StatR3PageMapTlbMisses, STAMTYPE_COUNTER, "/PGM/R3/Page/MapTlbMisses", STAMUNIT_OCCURENCES, "TLB misses.");
STAM_REG(pVM, &pPGM->StatR3PageReplaceShared, STAMTYPE_COUNTER, "/PGM/R3/Page/ReplacedShared", STAMUNIT_OCCURENCES, "Times a shared page was replaced.");
STAM_REG(pVM, &pPGM->StatR3PageReplaceZero, STAMTYPE_COUNTER, "/PGM/R3/Page/ReplacedZero", STAMUNIT_OCCURENCES, "Times the zero page was replaced.");
/// @todo STAM_REG(pVM, &pPGM->StatR3PageHandyAllocs, STAMTYPE_COUNTER, "/PGM/R3/Page/HandyAllocs", STAMUNIT_OCCURENCES, "Number of times we've allocated more handy pages.");
STAM_REG(pVM, &pPGM->StatR3FlushTLB, STAMTYPE_PROFILE, "/PGM/R3/FlushTLB", STAMUNIT_OCCURENCES, "Profiling of the PGMFlushTLB() body.");
STAM_REG(pVM, &pPGM->StatR3FlushTLBNewCR3, STAMTYPE_COUNTER, "/PGM/R3/FlushTLB/NewCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
STAM_REG(pVM, &pPGM->StatR3FlushTLBNewCR3Global, STAMTYPE_COUNTER, "/PGM/R3/FlushTLB/NewCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
STAM_REG(pVM, &pPGM->StatR3FlushTLBSameCR3, STAMTYPE_COUNTER, "/PGM/R3/FlushTLB/SameCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
STAM_REG(pVM, &pPGM->StatR3FlushTLBSameCR3Global, STAMTYPE_COUNTER, "/PGM/R3/FlushTLB/SameCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
STAM_REG(pVM, &pPGM->StatR3GstModifyPage, STAMTYPE_PROFILE, "/PGM/R3/GstModifyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGstModifyPage() body.");
int rc;
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
rc = MMR3HyperReserve(pVM, PAGE_SIZE * (2 + RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3) + 1 + 2 + 2), "Paging", &GCPtr);
&& (pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT))
AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> X86_PD_PAE_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> X86_PD_PAE_SHIFT));
return rc;
int rc;
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTRC + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);
pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsRC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
return rc;
AssertMsg(pVM->pgm.s.pShwNestedRootR3, ("Init order, no relocation before paging is initialized!\n"));
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, true, pgmR3RelocatePhysHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->VirtHandlers, true, pgmR3RelocateVirtHandler, &offDelta);
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3RelocateHyperVirtHandler, &offDelta);
#ifdef DEBUG
#ifdef VBOX_WITH_NEW_PHYS_CODE
#ifdef VBOX_STRICT
static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
uint32_t i = 0;
SSMR3PutU16(pSSM, (uint16_t)(pRam->aPages[iPage].HCPhys & ~X86_PTE_PAE_PG_MASK)); /** @todo PAGE FLAGS */
return rc;
AssertMsgFailed(("pgmR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, PGM_SAVED_STATE_VERSION));
uint32_t u;
return rc;
return rc;
uint32_t i = 0;
return rc;
if (u32Sep == ~0U)
if (u32Sep != i)
return rc;
return rc;
if (!pMapping)
AssertFailed();
return VERR_SSM_LOAD_CONFIG_MISMATCH;
return rc;
if (u32Sep == ~0U)
if (u32Sep != i)
return rc;
while (cPages-- > 0)
// &= MM_RAM_FLAGS_DYNAMIC_ALLOC | MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2
pRam->aPages[iPage].HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) | (RTHCPHYS)u16; /** @todo PAGE FLAGS */
return rc;
if (fValidChunk)
return rc;
return rc;
/* Restore pVM->pgm.s.GCPhysCR3. */
return rc;
if (pszArgs)
fGuest = true;
fShadow = true;
fHost = true;
if (fGuest)
if (fShadow)
if (fHost)
const char *psz;
pVM,
Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
iPD,
iPD,
return rc;
switch (pgmMode)
case PGMMODE_PAE:
case PGMMODE_AMD64:
int rc;
pVM->pgm.s.paModeData = (PPGMMODEDATA)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMMODEDATA) * pgmModeDataMaxIndex());
#ifdef VBOX_WITH_64_BITS_GUESTS
#ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_32_BIT:
# ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
# ifdef VBOX_WITH_64_BITS_GUESTS
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
# ifdef VBOX_WITH_64_BITS_GUESTS
AssertFailed();
#ifdef VBOX_WITH_64_BITS_GUESTS
return VINF_SUCCESS;
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
static PGMMODE pgmR3CalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher)
switch (enmGuestMode)
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifdef DEBUG_bird
case PGMMODE_32_BIT:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
#ifdef DEBUG_bird
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
#ifdef DEBUG_bird
case PGMMODE_PAE:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
switch (enmHostMode)
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
return PGMMODE_INVALID;
return enmShadowMode;
Log(("PGMR3ChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVM->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
PGMMODE enmShadowMode = pgmR3CalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVM->pgm.s.enmShadowMode, &enmSwitcher);
return rc;
LogFlow(("PGMR3ChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVM->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
return rc;
return rc;
int rc;
switch (enmShadowMode)
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_REAL:
case PGMMODE_PROTECTED:
return VERR_INTERNAL_ERROR;
return rc;
#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
switch (enmGuestMode)
case PGMMODE_REAL:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_PROTECTED:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_32_BIT:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
case PGMMODE_PAE_NX:
case PGMMODE_PAE:
N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (General/Advanced)"));
return VINF_SUCCESS;
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_32_BIT:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
default: AssertFailed(); break;
#ifdef VBOX_WITH_64_BITS_GUESTS
case PGMMODE_AMD64_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_NESTED:
case PGMMODE_EPT:
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
default: AssertFailed(); break;
return rc;
static int pgmR3DumpHierarchyHCPaePT(PVM pVM, PX86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
return VINF_SUCCESS;
static int pgmR3DumpHierarchyHCPaePD(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPD)
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory at HCPhys=%RHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
pHlp->pfnPrintf(pHlp, "%0*llx error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
if (pPT)
return rc;
static int pgmR3DumpHierarchyHCPaePDPT(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPDPT)
pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory pointer table at HCPhys=%RHp was not found in the page pool!\n",
return VERR_INVALID_PARAMETER;
if (fLongMode)
i << X86_PDPT_SHIFT,
int rc2 = pgmR3DumpHierarchyHCPaePD(pVM, Pdpe.u & X86_PDPE_PG_MASK, u64Address + ((uint64_t)i << X86_PDPT_SHIFT),
return rc;
static int pgmR3DumpHierarchyHcPaePML4(PVM pVM, RTHCPHYS HCPhys, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPML4)
return VERR_INVALID_PARAMETER;
uint64_t u64Address = ((uint64_t)i << X86_PML4_SHIFT) | (((uint64_t)i >> (X86_PML4_SHIFT - X86_PDPT_SHIFT - 1)) * 0xffff000000000000ULL);
int rc2 = pgmR3DumpHierarchyHCPaePDPT(pVM, Pml4e.u & X86_PML4E_PG_MASK, u64Address, cr4, true, cMaxDepth - 1, pHlp);
return rc;
return VINF_SUCCESS;
int pgmR3DumpHierarchyHC32BitPD(PVM pVM, uint32_t cr3, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pPD)
pHlp->pfnPrintf(pHlp, "Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK);
return VERR_INVALID_PARAMETER;
pHlp->pfnPrintf(pHlp, "%08x error! Mapping error! PT %d has HCPhysPT=%RHp not %RHp is in the PD.\n",
if (pPT)
pHlp->pfnPrintf(pHlp, "%08x error! Page table at %#x was not found in the page pool!\n", u32Address, HCPhys);
return rc;
Log(("Found %RGp at %RGv -> flags=%llx\n", PhysSearch, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), fPageShw));
return VINF_SUCCESS;
bool fLongMode = false;
return VERR_INVALID_PARAMETER;
if (pPT)
return rc;
VMMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint64_t cr3, uint64_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
if (!pHlp)
if (!cMaxDepth)
return VINF_SUCCESS;
if (fLongMode)
return pgmR3DumpHierarchyHCPaePDPT(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, 0, cr4, false, cMaxDepth, pHlp);
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, pVM->pgm.s.fMappingsFixed ? "The mappings are FIXED.\n" : "The mappings are FLOATING.\n");
return rc;
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
#ifdef VBOX_STRICT
static DECLCALLBACK(int) pgmR3CmdAssertCR3(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
return rc;
return VINF_SUCCESS;
static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
if (!pVM)
typedef struct PGMCHECKINTARGS
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys->Core.KeyLast > pCur->Core.Key),
pArgs->pPrevPhys, pArgs->pPrevPhys->Core.Key, pArgs->pPrevPhys->Core.KeyLast, pArgs->pPrevPhys->pszDesc,
static DECLCALLBACK(int) pgmR3CheckIntegrityVirtHandlerNode(PAVLROGCPTRNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGv-%RGv %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
|| (pArgs->fLeftToRight ? pArgs->pPrevVirt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevVirt->Core.KeyLast > pCur->Core.Key),
pArgs->pPrevVirt, pArgs->pPrevVirt->Core.Key, pArgs->pPrevVirt->Core.KeyLast, pArgs->pPrevVirt->pszDesc,
AssertReleaseMsg(pCur->aPhysToVirt[iPage].offVirtHandler == -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage]),
static DECLCALLBACK(int) pgmR3CheckIntegrityPhysToVirtHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %RGp-%RGp\n", pCur, pCur->Core.Key, pCur->Core.KeyLast));
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
|| (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
AssertReleaseMsg((pCur->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD),
pCur2 = (PPGMPHYS2VIRTHANDLER)((intptr_t)pCur + (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
AssertReleaseMsg((pCur2->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == PGMPHYS2VIRTHANDLER_IN_TREE,
int cErrors = 0;
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, true, pgmR3CheckIntegrityPhysHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysHandlers, false, pgmR3CheckIntegrityPhysHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->VirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesR3->HyperVirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, true, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesR3->PhysToVirtHandlers, false, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
return VINF_SUCCESS;