PGMAllPhys.cpp revision 1b8df74d9f30c75a5e4a5dbfff7f2524c378e814
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * PGM - Page Manager and Monitor, Physical Memory Addressing.
c7814cf6e1240a519cbec0441e033d0e2470ed00vboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * available from http://www.virtualbox.org. This file is free software;
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * you can redistribute it and/or modify it under the terms of the GNU
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * General Public License (GPL) as published by the Free Software
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * additional information or have any questions.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync/** @def PGM_IGNORE_RAM_FLAGS_RESERVED
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Don't respect the MM_RAM_FLAGS_RESERVED flag when converting to HC addresses.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Since this flag is currently incorrectly kept set for ROM regions we will
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * have to ignore it for now so we don't break stuff.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * @todo this has been fixed now I believe, remove this hack.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync/*******************************************************************************
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync* Header Files *
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync*******************************************************************************/
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * \#PF Handler callback for Guest ROM range write access.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @returns VBox status code (appropritate for trap handling and GC return).
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pVM VM Handle.
1c822ec4298d5d20b0fb1cc20346c5d4e4e596bfvboxsync * @param uErrorCode CPU Error code.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pRegFrame Trap register frame.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pvFault The fault address (cr2).
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param GCPhysFault The GC physical address corresponding to pvFault.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pvUser User argument. Pointer to the ROM range structure.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsyncVMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser)
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * If it's a simple instruction which doesn't change the cpu state
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * we will simply skip it. Otherwise we'll have to defer it to REM.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync rc = EMInterpretDisasOne(pVM, pRegFrame, &Cpu, &cbOp);
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync && Cpu.mode == CPUMODE_32BIT /** @todo why does this matter? */
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync && !(Cpu.prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync /** @todo Find other instructions we can safely skip, possibly
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * adding this kind of detection to DIS or EM. */
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteHandled);
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync /* Handle it in ring-3 because it's *way* easier there. */
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
7af218a7441de38fc9e814919db04bae3e917664vboxsync STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestROMWriteUnhandled);
7af218a7441de38fc9e814919db04bae3e917664vboxsync#endif /* IN_RING3 */
7af218a7441de38fc9e814919db04bae3e917664vboxsync * Checks if Address Gate 20 is enabled or not.
7af218a7441de38fc9e814919db04bae3e917664vboxsync * @returns true if enabled.
7af218a7441de38fc9e814919db04bae3e917664vboxsync * @returns false if disabled.
7af218a7441de38fc9e814919db04bae3e917664vboxsync * @param pVM VM handle.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync LogFlow(("PGMPhysIsA20Enabled %d\n", pVM->pgm.s.fA20Enabled));
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync return !!pVM->pgm.s.fA20Enabled ; /* stupid MS compiler doesn't trust me. */
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Validates a GC physical address.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @returns true if valid.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @returns false if invalid.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pVM The VM handle.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param GCPhys The physical address to validate.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsyncVMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * Checks if a GC physical address is a normal page,
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * i.e. not ROM, MMIO or reserved.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @returns true if normal.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @returns false if invalid, ROM, MMIO or reserved page.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @param pVM The VM handle.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @param GCPhys The physical address to check.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsyncVMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync && !(pPage->HCPhys & (MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2));
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * Converts a GC physical address to a HC physical address.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @returns VINF_SUCCESS on success.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * page but has no physical backing.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * GC physical address.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @param pVM The VM handle.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @param GCPhys The GC physical address to convert.
7af218a7441de38fc9e814919db04bae3e917664vboxsync * @param pHCPhys Where to store the HC physical address on success.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsyncVMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync if (RT_UNLIKELY(pPage->HCPhys & MM_RAM_FLAGS_RESERVED)) /** @todo PAGE FLAGS */
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Invalidates the GC page mapping TLB.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pVM The VM handle.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsyncVMMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync /* later */
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Invalidates the ring-0 page mapping TLB.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pVM The VM handle.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsyncVMMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Invalidates the ring-3 page mapping TLB.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pVM The VM handle.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsyncVMMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Frees the specified RAM page.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * This is used by ballooning and remapping MMIO2.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pVM Pointer to the shared VM structure.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pPage Pointer to the page structure.
a438caaf732f7839dc66b4f8dad672527845a003vboxsync * @param GCPhys The guest physical address of the page, if applicable.
a438caaf732f7839dc66b4f8dad672527845a003vboxsyncvoid pgmPhysFreePage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
a438caaf732f7839dc66b4f8dad672527845a003vboxsync AssertFatal(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
7af218a7441de38fc9e814919db04bae3e917664vboxsync /** @todo implement this... */
7af218a7441de38fc9e814919db04bae3e917664vboxsync * Makes sure that there is at least one handy page ready for use.
7af218a7441de38fc9e814919db04bae3e917664vboxsync * This will also take the appropriate actions when reaching water-marks.
7af218a7441de38fc9e814919db04bae3e917664vboxsync * @returns The following VBox status codes.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @retval VINF_SUCCESS on success.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @retval VERR_EM_NO_MEMORY if we're really out of memory.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pVM The VM handle.
7af218a7441de38fc9e814919db04bae3e917664vboxsync * @remarks Must be called from within the PGM critical section. It may
7af218a7441de38fc9e814919db04bae3e917664vboxsync * nip back to ring-3/0 in some cases.
7af218a7441de38fc9e814919db04bae3e917664vboxsync /** @remarks
7af218a7441de38fc9e814919db04bae3e917664vboxsync * low-water mark logic for R0 & GC:
7af218a7441de38fc9e814919db04bae3e917664vboxsync * - 75%: Set FF.
7af218a7441de38fc9e814919db04bae3e917664vboxsync * - 50%: Force return to ring-3 ASAP.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * For ring-3 there is a little problem wrt to the recompiler, so:
7af218a7441de38fc9e814919db04bae3e917664vboxsync * - 75%: Set FF.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * - 50%: Try allocate pages; on failure we'll force REM to quite ASAP.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * The basic idea is that we should be able to get out of any situation with
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * only 50% of handy pages remaining.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * At the moment we'll not adjust the number of handy pages relative to the
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * actual VM RAM committment, that's too much work for now.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages));
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync || pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2 /* 50% */
20593760b116c90f3e439552763eef632a3bbb17vboxsync Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync /** @todo call PGMR0PhysAllocateHandyPages directly - need to make sure we can call kernel code first and deal with the seeding fallback. */
20593760b116c90f3e439552763eef632a3bbb17vboxsync int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
20593760b116c90f3e439552763eef632a3bbb17vboxsync int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages));
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync else if (pVM->pgm.s.cHandyPages - 1 <= (RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 4) * 3) /* 75% */
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync if (pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2)
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * Replace a zero or shared page with new page that we can write to.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @returns The following VBox status codes.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @retval VINF_SUCCESS on success, pPage is modified.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pVM The VM address.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pPage The physical page tracking structure. This will
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * be modified on success.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param GCPhys The address of the page.
9939c713bffcfc4305d99d994552aa2ad9bce097vboxsync * @remarks Must be called from within the PGM critical section. It may
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync * nip back to ring-3/0 in some cases.
db55f7b1060a6a72704b5369a8e776c59e5e4f64vboxsync * @remarks This function shouldn't really fail, however if it does
4e47bb772df0d04d1ded3e06354de547d52e2d06vboxsync * it probably means we've screwed up the size of the amount
4e47bb772df0d04d1ded3e06354de547d52e2d06vboxsync * and/or the low-water mark of handy pages. Or, that some
db55f7b1060a6a72704b5369a8e776c59e5e4f64vboxsync * device I/O is causing a lot of pages to be allocated while
4e47bb772df0d04d1ded3e06354de547d52e2d06vboxsync * while the host is in a low-memory condition.
db55f7b1060a6a72704b5369a8e776c59e5e4f64vboxsyncint pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
db55f7b1060a6a72704b5369a8e776c59e5e4f64vboxsync * Ensure that we've got a page handy, take it and use it.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%d %RGp\n", PGM_PAGE_GET_STATE(pPage), GCPhys));
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync Assert(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages));
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * There are one or two action to be taken the next time we allocate handy pages:
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * - Tell the GMM (global memory manager) what the page is being used for.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * (Speeds up replacement operations - sharing and defragmenting.)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * - If the current backing is shared, it must be freed.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys;
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync/** @todo err.. what about copying the page content? */
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync/** @todo verify that the handy page is zero! */
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Do the PGMPAGE modifications.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @returns VBox status code.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @retval VINF_SUCCESS on success.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pVM The VM address.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pPage The physical page tracking structure.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param GCPhys The address of the page.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @remarks Called from within the PGM critical section.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsyncint pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync /* fall thru */
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync default: /* to shut up GCC */
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Zero pages can be dummy pages for MMIO or reserved memory,
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * so we need to check the flags before joining cause with
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * shared page replacement.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync /* fall thru */
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Maps a page into the current virtual address space so it can be accessed.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @returns VBox status code.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @retval VINF_SUCCESS on success.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pVM The VM address.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param pPage The physical page tracking structure.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param GCPhys The address of the page.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param ppMap Where to store the address of the mapping tracking structure.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @param ppv Where to store the mapping address of the page. The page
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * offset is masked off!
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * @remarks Called from within the PGM critical section.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsyncint pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync * Just some sketchy GC/R0-darwin code.
c58c758d3642ac45d3f12356c406c631fcd8f538vboxsync#else /* IN_RING3 || IN_RING0 */
9939c713bffcfc4305d99d994552aa2ad9bce097vboxsync * Find/make Chunk TLB entry for the mapping chunk.
4e47bb772df0d04d1ded3e06354de547d52e2d06vboxsync const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * Find the chunk, map it if necessary.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * Enter it into the Chunk TLB.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync#endif /* IN_RING3 */
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync#if !defined(IN_GC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * Load a guest page into the ring-3 physical TLB.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @returns VBox status code.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @retval VINF_SUCCESS on success
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @param pPGM The PGM instance pointer.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * @param GCPhys The guest physical address in question.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsyncint pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * Find the ram range.
72a6fe3989272cb2d409b50caca25e1edbca9398vboxsync * 99.8% of requests are expected to be in the first range.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * Map the page.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Make a special case for the zero page as it is kind of special.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
de6e321f351aa489a6a62bed474390a0056e8093vboxsync Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync#endif /* !IN_GC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * Requests the mapping of a guest page into the current context.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * This API should only be used for very short term, as it will consume
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * scarse resources (R0 and GC) in the mapping cache. When you're done
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * This API will assume your intention is to write to the page, and will
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * therefore replace shared and zero pages. If you do not intend to modify
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @returns VBox status code.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @retval VINF_SUCCESS on success.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pVM The VM handle.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * @param GCPhys The guest physical address of the page that should be mapped.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param ppv Where to store the address corresponding to GCPhys.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * @remark Avoid calling this API from within critical sections (other than
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync * the PGM one) because of the deadlock risk.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * @thread Any thread.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsyncVMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync# if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync/** @todo this needs to be fixed, it really ain't right. */
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync /* Until a physical TLB is implemented for GC or/and R0-darwin, let PGMDynMapGCPageEx handle it. */
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Query the Physical TLB entry for the page (may fail).
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync int rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * If the page is shared, the zero page, or being write monitored
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync * it must be converted to an page that's writable if possible.
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync /** @todo stuff is missing here! */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * Now, just perform the locking and calculate the return address.
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
172ae196da38208e5f1e3485715a89f2d53c6880vboxsync if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync AssertMsgFailed(("%VGp is entering permanent locked state!\n", GCPhys));
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync pMap->cRefs++; /* Extra ref to prevent it from going away. */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync#endif /* IN_RING3 || IN_RING0 */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * Temporary fallback code.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync# if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync/** @todo @bugref{3202}: check up this path. */
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync * Requests the mapping of a guest page into the current context.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * This API should only be used for very short term, as it will consume
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * scarse resources (R0 and GC) in the mapping cache. When you're done
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @returns VBox status code.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @retval VINF_SUCCESS on success.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * @param pVM The VM handle.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param GCPhys The guest physical address of the page that should be mapped.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param ppv Where to store the address corresponding to GCPhys.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @remark Avoid calling this API from within critical sections (other than
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * the PGM one) because of the deadlock risk.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @thread Any thread.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsyncVMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync /** @todo implement this */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * Requests the mapping of a guest page given by virtual address into the current context.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * This API should only be used for very short term, as it will consume
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * scarse resources (R0 and GC) in the mapping cache. When you're done
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * This API will assume your intention is to write to the page, and will
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * therefore replace shared and zero pages. If you do not intend to modify
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @returns VBox status code.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @retval VINF_SUCCESS on success.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @param pVM The VM handle.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @param GCPhys The guest physical address of the page that should be mapped.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @param ppv Where to store the address corresponding to GCPhys.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @remark Avoid calling this API from within critical sections (other than
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * the PGM one) because of the deadlock risk.
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * @thread EMT
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsyncVMMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
bbb4c0bfd5ea55e99591d8811771257a437053eevboxsync * Requests the mapping of a guest page given by virtual address into the current context.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * This API should only be used for very short term, as it will consume
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * scarse resources (R0 and GC) in the mapping cache. When you're done
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @returns VBox status code.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @retval VINF_SUCCESS on success.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param pVM The VM handle.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param GCPhys The guest physical address of the page that should be mapped.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param ppv Where to store the address corresponding to GCPhys.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @remark Avoid calling this API from within critical sections (other than
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * the PGM one) because of the deadlock risk.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @thread EMT
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsyncVMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, pLock);
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * Release the mapping of a guest page.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param pVM The VM handle.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param pLock The lock structure initialized by the mapping function.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsyncVMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync /* currently nothing to do here. */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync/* --- postponed
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync#elif defined(IN_RING0)
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync#else /* IN_RING3 */
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pLock->pvChunk;
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync#endif /* IN_RING3 */
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Converts a GC physical address to a HC pointer.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @returns VINF_SUCCESS on success.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * page but has no physical backing.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * GC physical address.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * a dynamic ram chunk boundary
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param pVM The VM handle.
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync * @param GCPhys The GC physical address to convert.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param cbRange Physical range
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param pHCPtr Where to store the HC pointer on success.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsyncVMMDECL(int) PGMPhysGCPhys2HCPtr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTHCPTR pHCPtr) /** @todo @bugref{1865}: HCPtr -> R3Ptr */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync if ((GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK) != ((GCPhys+cbRange-1) & PGM_DYNAMIC_CHUNK_BASE_MASK))
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync AssertMsgFailed(("%VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync LogRel(("PGMPhysGCPhys2HCPtr %VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync AssertMsgFailed(("%VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys + cbRange));
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* ASSUMES this is a rare occurence */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync PRTR3UINTPTR paChunkR3Ptrs = (PRTR3UINTPTR)MMHyperR3ToCC(pVM, pRam->paChunkR3Ptrs);
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync *pHCPtr = (RTHCPTR)(paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync *pHCPtr = (RTHCPTR)((RTR3UINTPTR)pRam->pvR3 + off);
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * PGMPhysGCPhys2HCPtr convenience for use with assertions.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * @returns The HCPtr, NIL_RTHCPTR on failure.
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync * @param pVM The VM handle.
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * @param GCPhys The GC Physical addresss.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param cbRange Physical range.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsyncVMMDECL(RTHCPTR) PGMPhysGCPhys2HCPtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange) /** @todo @bugref{1865}: HCPtr -> R3Ptr */
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync int rc = PGMPhysGCPhys2HCPtr(pVM, GCPhys, cbRange, &HCPtr);
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * Converts a guest pointer to a GC physical address.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * This uses the current CR3/CR0/CR4 of the guest.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @returns VBox status code.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param pVM The VM Handle
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsync * @param GCPtr The guest pointer to convert.
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync * @param pGCPhys Where to store the GC physical address.
c0e27f622f9bd6d9e77d2d959aab71d69dabf0d3vboxsyncVMMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * Converts a guest pointer to a HC physical address.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * This uses the current CR3/CR0/CR4 of the guest.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @returns VBox status code.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param pVM The VM Handle
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param GCPtr The guest pointer to convert.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param pHCPhys Where to store the HC physical address.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsyncVMMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
a1df400bbe9d64aad400442e56eb637019300a5evboxsync int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * Converts a guest pointer to a HC pointer.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * This uses the current CR3/CR0/CR4 of the guest.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @returns VBox status code.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param pVM The VM Handle
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param GCPtr The guest pointer to convert.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param pHCPtr Where to store the HC virtual address.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsyncVMMDECL(int) PGMPhysGCPtr2HCPtr(PVM pVM, RTGCPTR GCPtr, PRTHCPTR pHCPtr) /** @todo @bugref{1865}: HCPtr -> R3Ptr */
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync rc = PGMPhysGCPhys2HCPtr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * Converts a guest virtual address to a HC pointer by specfied CR3 and flags.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @returns VBox status code.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param pVM The VM Handle
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param GCPtr The guest pointer to convert.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param cr3 The guest CR3.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param fFlags Flags used for interpreting the PD correctly: X86_CR4_PSE and X86_CR4_PAE
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @param pHCPtr Where to store the HC pointer.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * @remark This function is used by the REM at a time where PGM could
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * potentially not be in sync. It could also be used by a
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * future DBGF API to cpu state independent conversions.
044af0d1e6474076366759db86f101778c5f20ccvboxsyncVMMDECL(int) PGMPhysGCPtr2HCPtrByGstCR3(PVM pVM, RTGCPTR GCPtr, uint64_t cr3, unsigned fFlags, PRTHCPTR pHCPtr) /** @todo @bugref{1865}: HCPtr -> R3Ptr */
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * PAE or 32-bit?
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAGE_MASK, &pPD);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync X86PDE Pde = pPD->a[(RTGCUINTPTR)GCPtr >> X86_PD_SHIFT];
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync { /* (big page) */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = PGMPhysGCPhys2HCPtr(pVM, pgmGstGet4MBPhysPage(&pVM->pgm.s, Pde) | ((RTGCUINTPTR)GCPtr & X86_PAGE_4M_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync { /* (normal page) */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & X86_PDE_PG_MASK, &pPT);
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync X86PTE Pte = pPT->a[((RTGCUINTPTR)GCPtr >> X86_PT_SHIFT) & X86_PT_MASK];
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync return PGMPhysGCPhys2HCPtr(pVM, (Pte.u & X86_PTE_PG_MASK) | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync /** @todo long mode! */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, &pPdpt);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync X86PDPE Pdpe = pPdpt->a[((RTGCUINTPTR)GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = PGM_GCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPD);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync X86PDEPAE Pde = pPD->a[((RTGCUINTPTR)GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK];
a1df400bbe9d64aad400442e56eb637019300a5evboxsync { /* (big page) */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = PGMPhysGCPhys2HCPtr(pVM, (Pde.u & X86_PDE2M_PAE_PG_MASK) | ((RTGCUINTPTR)GCPtr & X86_PAGE_2M_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync { /* (normal page) */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = PGM_GCPHYS_2_PTR(pVM, (Pde.u & X86_PDE_PAE_PG_MASK), &pPT);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync X86PTEPAE Pte = pPT->a[((RTGCUINTPTR)GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK];
a1df400bbe9d64aad400442e56eb637019300a5evboxsync return PGMPhysGCPhys2HCPtr(pVM, (Pte.u & X86_PTE_PAE_PG_MASK) | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync * Cache PGMPhys memory access
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param pVM VM Handle.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param pCache Cache structure pointer
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param GCPhys GC physical address
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param pbHC HC pointer corresponding to physical page
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @thread EMT.
a1df400bbe9d64aad400442e56eb637019300a5evboxsyncstatic void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
a1df400bbe9d64aad400442e56eb637019300a5evboxsync iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * Read physical memory.
2d8870843ff566fee9bd3a6a5942414254106479vboxsync * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
2d8870843ff566fee9bd3a6a5942414254106479vboxsync * want to ignore those.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param pVM VM Handle.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param GCPhys Physical address start reading from.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param pvBuf Where to put the read bits.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * @param cbRead How many bytes to read.
a1df400bbe9d64aad400442e56eb637019300a5evboxsyncVMMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
a1df400bbe9d64aad400442e56eb637019300a5evboxsync bool fGrabbedLock = false;
a1df400bbe9d64aad400442e56eb637019300a5evboxsync AssertMsg(cbRead > 0, ("don't even think about reading zero bytes!\n"));
a1df400bbe9d64aad400442e56eb637019300a5evboxsync LogFlow(("PGMPhysRead: %VGp %d\n", GCPhys, cbRead));
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * Copy loop on ram ranges.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync /* Find range. */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync /* Inside range or not? */
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync * Must work our way thru this page by page.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync /* Physical chunk in dynamically allocated range not present? */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync /* Treat it as reserved; return zeros */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync /* temp hacks, will be reorganized. */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * Physical handler.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync else if ( RT_UNLIKELY(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_ALL)
2d8870843ff566fee9bd3a6a5942414254106479vboxsync && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
a1df400bbe9d64aad400442e56eb637019300a5evboxsync#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync /* find and call the handler */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync rc = pNode->pfnHandlerR3(pVM, GCPhys, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pNode->pvUserR3);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync#endif /* IN_RING3 */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
a1df400bbe9d64aad400442e56eb637019300a5evboxsync PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync * Virtual handlers.
a1df400bbe9d64aad400442e56eb637019300a5evboxsync else if ( RT_UNLIKELY(PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) >= PGM_PAGE_HNDL_VIRT_STATE_ALL)
a1df400bbe9d64aad400442e56eb637019300a5evboxsync && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
a1df400bbe9d64aad400442e56eb637019300a5evboxsync#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync /* Search the whole tree for matching physical addresses (rather expensive!) */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->Core.Key & PAGE_BASE_GC_MASK)
a1df400bbe9d64aad400442e56eb637019300a5evboxsync + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync /* Note: Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync rc = pNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, 0);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync#endif /* IN_RING3 */
a1df400bbe9d64aad400442e56eb637019300a5evboxsync#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
a1df400bbe9d64aad400442e56eb637019300a5evboxsync PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc);
a1df400bbe9d64aad400442e56eb637019300a5evboxsync switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM)) /** @todo PAGE FLAGS */
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync * Normal memory or ROM.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync //case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* = shadow */ - //MMIO2 isn't in the mask.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync case MM_RAM_FLAGS_MMIO2: // MMIO2 isn't in the mask.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvSrc);
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphysreadcache, GCPhys, (uint8_t*)pvSrc);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * All reserved, nothing there.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync * The rest needs to be taken more carefully.
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync#if 1 /** @todo r=bird: Can you do this properly please. */
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync /** @todo Try MMIO; quick hack */
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync if (cbRead <= 8 && IOMMMIORead(pVM, GCPhys, (uint32_t *)pvBuf, cbRead) == VINF_SUCCESS)
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync /** @todo fix me later. */
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync AssertReleaseMsgFailed(("Unknown read at %VGp size %d implement the complex physical reading case %x\n",
ca3da10d05961c339b5180fbd40a54587d6bad35vboxsync pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM))); /** @todo PAGE FLAGS */
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync LogFlow(("PGMPhysRead: Unassigned %VGp size=%d\n", GCPhys, cbRead));
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * Unassigned address space.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * Write to physical memory.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * want to ignore those.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * @param pVM VM Handle.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * @param GCPhys Physical address to write to.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * @param pvBuf What to write.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * @param cbWrite How many bytes to write.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsyncVMMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync bool fGrabbedLock = false;
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync AssertMsg(cbWrite > 0, ("don't even think about writing zero bytes!\n"));
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync LogFlow(("PGMPhysWrite: %VGp %d\n", GCPhys, cbWrite));
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * Copy loop on ram ranges.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* Find range. */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* Inside range or not? */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * Must work our way thru this page by page.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* Physical chunk in dynamically allocated range not present? */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync PGMPhysWrite(pVM, GCPhys, pvBuf, cbWrite); /* try again; can't assume pRam is still valid (paranoia) */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* temporary hack, will reogranize is later. */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * Virtual handlers
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync && !(pPage->HCPhys & MM_RAM_FLAGS_MMIO)) /// @todo PAGE FLAGS
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * Physical write handler + virtual write handler.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * Consider this a quick workaround for the CSAM + shadow caching problem.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * We hand it to the shadow caching first since it requires the unchanged
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync * data. CSAM will have to put up with it already being changed.
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* 1. The physical handler */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync PPGMPHYSHANDLER pPhysNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync size_t cbRange = pPhysNode->Core.KeyLast - GCPhys + 1;
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync rc = pPhysNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pPhysNode->pvUserR3);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* 2. The virtual handler (will see incorrect data) */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirtNode, &iPage);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync size_t cbRange = pVirtNode->Core.KeyLast - GCPhys + 1;
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirtNode->Core.Key & PAGE_BASE_GC_MASK)
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync rc2 = pVirtNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync#endif /* IN_RING3 */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync PGMDynMapHCPageOff(pVM, PGM_PAGE_GET_HCPHYS(pPage) + (off & PAGE_OFFSET_MASK), &pvDst);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync/** @todo deal with this in GC and R0! */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync /* Search the whole tree for matching physical addresses (rather expensive!) */
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->Core.Key & PAGE_BASE_GC_MASK)
df8e6a449f00e1884fbf4a1fc67143614d7d528dvboxsync + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
8b4a8db7768e94d025f1216ecfcd50d727fa2b7cvboxsync /* Note! Dangerous assumption that R3 handlers don't do anything that really requires an EMT lock! */
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync rc = pNode->pfnHandlerR3(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
goto end;
goto end;
PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys);
/** @todo Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
rc = pNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pNode->pvUserR3);
goto end;
goto end;
switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)) /** @todo PAGE FLAGS */
case MM_RAM_FLAGS_MMIO2:
goto end;
case MM_RAM_FLAGS_RESERVED:
goto end;
goto end;
AssertReleaseMsgFailed(("Unknown write at %VGp size %d implement the complex physical writing case %x\n",
(pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)))); /** @todo PAGE FLAGS */
if ( !pRam
goto end;
end:
#ifdef IN_RING3
if (fGrabbedLock)
if (!cb)
return VINF_SUCCESS;
void const *pvSrc;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
pRam;
void *pvSrc;
return rc;
return VINF_SUCCESS;
return VINF_SUCCESS;
return VERR_PGM_PHYS_PAGE_RESERVED;
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
pRam;
void *pvDst;
return rc;
return VINF_SUCCESS;
return VINF_SUCCESS;
return VERR_PGM_PHYS_PAGE_RESERVED;
if (!cb)
return VINF_SUCCESS;
void const *pvSrc;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void *pvSrc;
return rc;
return VINF_SUCCESS;
void *pvSrc;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return VINF_SUCCESS;
rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return rc;
rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return VINF_SUCCESS;
rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
rc = PGMGstModifyPage(pVM, GCPtrDst, cb, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
int rc;
if (!cb)
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
int rc;
if (!cb)
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
VMMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
int rc;
void *pvSrc;
switch (rc)
case VINF_SUCCESS:
Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
return rc;
return VINF_SUCCESS;
void *pvSrc1;
switch (rc)
case VINF_SUCCESS:
return rc;
void *pvSrc2;
switch (rc)
case VINF_SUCCESS:
return rc;
return VINF_SUCCESS;
switch (rc)
case VINF_SUCCESS:
case VERR_PAGE_NOT_PRESENT:
return rc;