PGMAllPhys.cpp revision af339f291411c3a74d52c1ea236ba9694154735b
5b281ba489ca18f0380d7efc7a5108b606cce449vboxsync * PGM - Page Manager and Monitor, Physical Memory Addressing.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Copyright (C) 2006-2007 Oracle Corporation
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * available from http://www.virtualbox.org. This file is free software;
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * you can redistribute it and/or modify it under the terms of the GNU
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * General Public License (GPL) as published by the Free Software
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync/*******************************************************************************
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync* Header Files *
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync*******************************************************************************/
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync/*******************************************************************************
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync* Defined Constants And Macros *
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync*******************************************************************************/
881b5ff6bc55e1fb0f4ef42f9782ccec79c0a138vboxsync/** Enable the physical TLB. */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * This simply pushes everything to the HC handler.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns VBox status code (appropritate for trap handling and GC return).
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM VM Handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param uErrorCode CPU Error code.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pRegFrame Trap register frame.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pvFault The fault address (cr2).
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param GCPhysFault The GC physical address corresponding to pvFault.
9621896680fea9b2078823e8ef2e64cec5bf2da0vboxsync * @param pvUser User argument.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsyncVMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * \#PF Handler callback for Guest ROM range write access.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns VBox status code (appropritate for trap handling and GC return).
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM VM Handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param uErrorCode CPU Error code.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pRegFrame Trap register frame.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pvFault The fault address (cr2).
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param GCPhysFault The GC physical address corresponding to pvFault.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pvUser User argument. Pointer to the ROM range structure.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsyncVMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * If it's a simple instruction which doesn't change the cpu state
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * we will simply skip it. Otherwise we'll have to defer it to REM.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
590bfe12ce22cd3716448fbb9f4dc51664bfe5e2vboxsync && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync /** @todo Find other instructions we can safely skip, possibly
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * adding this kind of detection to DIS or EM. */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync /* Handle it in ring-3 because it's *way* easier there. */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync#endif /* IN_RING3 */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Checks if Address Gate 20 is enabled or not.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns true if enabled.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns false if disabled.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVCpu VMCPU handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Validates a GC physical address.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns true if valid.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns false if invalid.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM The VM handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param GCPhys The physical address to validate.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsyncVMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Checks if a GC physical address is a normal page,
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * i.e. not ROM, MMIO or reserved.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns true if normal.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns false if invalid, ROM, MMIO or reserved page.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM The VM handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param GCPhys The physical address to check.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsyncVMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Converts a GC physical address to a HC physical address.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns VINF_SUCCESS on success.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * page but has no physical backing.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * GC physical address.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM The VM handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param GCPhys The GC physical address to convert.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pHCPhys Where to store the HC physical address on success.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsyncVMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Invalidates all page mapping TLBs.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM The VM handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushes);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync /* Clear the shared R0/R3 TLB completely. */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync /* @todo clear the RC TLB whenever we add it. */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Invalidates a page mapping TLB entry
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM The VM handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param GCPhys GCPhys entry to flush
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsyncVMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushEntry);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync /* Clear the shared R0/R3 TLB entry. */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync /* @todo clear the RC TLB whenever we add it. */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Makes sure that there is at least one handy page ready for use.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * This will also take the appropriate actions when reaching water-marks.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns VBox status code.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @retval VINF_SUCCESS on success.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @retval VERR_EM_NO_MEMORY if we're really out of memory.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM The VM handle.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @remarks Must be called from within the PGM critical section. It may
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * nip back to ring-3/0 in some cases.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Do we need to do anything special?
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Allocate pages only if we're out of them, or in ring-3, almost out.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
0f77dc54d7ec617480988ccdfcd080f480e79698vboxsync Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * Replace a zero or shared page with new page that we can write to.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @returns The following VBox status codes.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @retval VINF_SUCCESS on success, pPage is modified.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pVM The VM address.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param pPage The physical page tracking structure. This will
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * be modified on success.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @param GCPhys The address of the page.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @remarks Must be called from within the PGM critical section. It may
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * nip back to ring-3/0 in some cases.
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * @remarks This function shouldn't really fail, however if it does
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * it probably means we've screwed up the size of handy pages and/or
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * the low-water mark. Or, that some device I/O is causing a lot of
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * pages to be allocated while while the host is in a low-memory
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * condition. This latter should be handled elsewhere and in a more
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsync * controlled manner, it's on the @bugref{3170} todo list...
d1c5a03c19683c719b94496bb998fde2f2e5e622vboxsyncint pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
# ifdef PGM_WITH_LARGE_PAGES
return rc;
bool fFlushTLBs = false;
AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
if (fFlushTLBs)
return rc2;
AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
/* Mark this shared page for freeing/derefencing. */
Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
if (pvSharedPage)
void *pvNewPage;
if ( fFlushTLBs
return rc;
#ifdef PGM_WITH_LARGE_PAGES
unsigned iPage;
|| PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO) /* allocated, monitored or shared means we can't use a large page here */
LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_STATE(pPage), rc));
# ifdef IN_RING3
return VINF_SUCCESS;
/* If we fail once, it most likely means the host's memory is too fragmented; don't bother trying again. */
return rc;
LogFlow(("pgmPhysIsValidLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
LogFlow(("pgmPhysIsValidLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
return VINF_SUCCESS;
case PGM_PAGE_STATE_ALLOCATED:
return VINF_SUCCESS;
case PGM_PAGE_STATE_ZERO:
return VERR_PGM_PHYS_PAGE_RESERVED;
case PGM_PAGE_STATE_SHARED:
case PGM_PAGE_STATE_BALLOONED:
return VERR_PGM_PHYS_PAGE_BALLOONED;
#ifdef IN_RC
if (!pMap)
# ifdef IN_RING0
return rc;
return VINF_SUCCESS;
static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
return VINF_SUCCESS;
AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
*ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
return VINF_SUCCESS;
if (!pMap)
#ifdef IN_RING0
return rc;
return VINF_SUCCESS;
return rc;
if (!pRam)
void *pv;
return rc;
#ifdef PGM_WITH_PHYS_TLB
return VINF_SUCCESS;
void *pv;
return rc;
#ifdef PGM_WITH_PHYS_TLB
return VINF_SUCCESS;
int rc;
return rc;
return rc;
return VINF_SUCCESS;
*ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
return rc;
return VINF_SUCCESS;
*ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
if (pMap)
if (cLocks == 0)
if (pMap)
return rc;
VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
*ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
if (pMap)
if (cLocks == 0)
AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
if (pMap)
return rc;
return rc;
VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
return rc;
if (fWriteLock)
if (pMap)
#ifndef DEBUG_sandervl
Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
return rc;
#ifdef VBOX_STRICT
return R3Ptr;
return NIL_RTR3PTR;
return rc;
return rc;
#ifdef IN_RING3
AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
return VINF_SUCCESS;
#ifdef IN_RING3
Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
# ifdef VBOX_WITH_STATISTICS
if (pPhys)
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
unsigned iPage;
Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
#ifdef IN_RING3
if (!pPhys)
Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
return rc;
return rc;
const void *pvSrc;
AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
return VINF_SUCCESS;
if (!pRam)
return VINF_SUCCESS;
static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
int rc;
PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
if (pCur)
#ifndef IN_RING3
return VERR_PGM_PHYS_WR_HIT_HANDLER;
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
# ifdef VBOX_WITH_STATISTICS
if (pCur)
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
return VINF_SUCCESS;
unsigned iPage;
#ifndef IN_RING3
return VERR_PGM_PHYS_WR_HIT_HANDLER;
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
return VINF_SUCCESS;
if (!pvDst)
unsigned iVirtPage = 0;
offVirt = 0;
offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
if ( pVirtPhys
fMoreVirt = false;
if (pPhys)
offPhys = 0;
if ( pPhys
fMorePhys = false;
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
#ifdef IN_RING3
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
# ifdef VBOX_WITH_STATISTICS
if (pPhys)
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
#ifdef IN_RING3
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
#ifdef IN_RING3
Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
# ifdef VBOX_WITH_STATISTICS
if (pPhys)
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
return VINF_SUCCESS;
return rc;
void *pvDst;
return VINF_SUCCESS;
if (!pRam)
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void const *pvSrc;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
/* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
void const *pvSrc;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return VINF_SUCCESS;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return rc;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return VINF_SUCCESS;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
int rc;
if (!cb)
return VINF_SUCCESS;
return rc;
int rc;
if (!cb)
return VINF_SUCCESS;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
return rc;
VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
int rc;
void *pvSrc;
switch (rc)
case VINF_SUCCESS:
Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
return rc;
return VINF_SUCCESS;
void *pvSrc1;
switch (rc)
case VINF_SUCCESS:
return rc;
void *pvSrc2;
switch (rc)
case VINF_SUCCESS:
return rc;
return VINF_SUCCESS;
switch (rc)
case VINF_SUCCESS:
case VERR_PAGE_NOT_PRESENT:
return rc;
VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
int rc;
const void *pvSrc;
switch (rc)
case VINF_SUCCESS:
return rc;
return VINF_SUCCESS;
const void *pvSrc;
switch (rc)
case VINF_SUCCESS:
return rc;
switch (rc)
case VINF_SUCCESS:
return rc;
return VINF_SUCCESS;
switch (rc)
case VINF_SUCCESS:
case VERR_PAGE_NOT_PRESENT:
return rc;
if (fRaiseTrap)
Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
return rc;
VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
int rc;
&& CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
void *pvDst;
switch (rc)
case VINF_SUCCESS:
return rc;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
return VINF_SUCCESS;
void *pvDst;
switch (rc)
case VINF_SUCCESS:
return rc;
switch (rc)
case VINF_SUCCESS:
return rc;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
return VINF_SUCCESS;
switch (rc)
case VINF_SUCCESS:
case VERR_ACCESS_DENIED:
case VERR_PAGE_NOT_PRESENT:
return rc;
if (fRaiseTrap)
Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
return rc;