PGMAllPhys.cpp revision 7563bf2391ce1d3ac358dcdbaa608c1401d2a12c
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * PGM - Page Manager and Monitor, Physical Memory Addressing.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Copyright (C) 2006-2007 Oracle Corporation
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * available from http://www.virtualbox.org. This file is free software;
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * you can redistribute it and/or modify it under the terms of the GNU
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * General Public License (GPL) as published by the Free Software
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync/*******************************************************************************
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync* Header Files *
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync*******************************************************************************/
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync/*******************************************************************************
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync* Defined Constants And Macros *
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync*******************************************************************************/
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync/** Enable the physical TLB. */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * This simply pushes everything to the HC handler.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns VBox status code (appropritate for trap handling and GC return).
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVM VM Handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param uErrorCode CPU Error code.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pRegFrame Trap register frame.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pvFault The fault address (cr2).
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param GCPhysFault The GC physical address corresponding to pvFault.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pvUser User argument.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsyncVMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * \#PF Handler callback for Guest ROM range write access.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns VBox status code (appropritate for trap handling and GC return).
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVM VM Handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param uErrorCode CPU Error code.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pRegFrame Trap register frame.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pvFault The fault address (cr2).
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param GCPhysFault The GC physical address corresponding to pvFault.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pvUser User argument. Pointer to the ROM range structure.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsyncVMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * If it's a simple instruction which doesn't change the cpu state
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * we will simply skip it. Otherwise we'll have to defer it to REM.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync /** @todo Find other instructions we can safely skip, possibly
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * adding this kind of detection to DIS or EM. */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync /* Handle it in ring-3 because it's *way* easier there. */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync#endif /* IN_RING3 */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Checks if Address Gate 20 is enabled or not.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns true if enabled.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns false if disabled.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVCpu VMCPU handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Validates a GC physical address.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns true if valid.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns false if invalid.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVM The VM handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param GCPhys The physical address to validate.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsyncVMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Checks if a GC physical address is a normal page,
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * i.e. not ROM, MMIO or reserved.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns true if normal.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns false if invalid, ROM, MMIO or reserved page.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVM The VM handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param GCPhys The physical address to check.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsyncVMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Converts a GC physical address to a HC physical address.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns VINF_SUCCESS on success.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * page but has no physical backing.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * GC physical address.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVM The VM handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param GCPhys The GC physical address to convert.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pHCPhys Where to store the HC physical address on success.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsyncVMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Invalidates all page mapping TLBs.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVM The VM handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync /* Clear the shared R0/R3 TLB completely. */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync /** @todo clear the RC TLB whenever we add it. */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Invalidates a page mapping TLB entry
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVM The VM handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param GCPhys GCPhys entry to flush
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsyncVMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync /* Clear the shared R0/R3 TLB entry. */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync /* @todo clear the RC TLB whenever we add it. */
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Makes sure that there is at least one handy page ready for use.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * This will also take the appropriate actions when reaching water-marks.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @returns VBox status code.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @retval VINF_SUCCESS on success.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @retval VERR_EM_NO_MEMORY if we're really out of memory.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @param pVM The VM handle.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * @remarks Must be called from within the PGM critical section. It may
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * nip back to ring-3/0 in some cases.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Do we need to do anything special?
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync * Allocate pages only if we're out of them, or in ring-3, almost out.
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
069b9101fbd3b049610c5511b1cc9534d01ea472vboxsync Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
#ifdef IN_RING3
return rc;
return VERR_EM_NO_MEMORY;
#ifdef IN_RING3
#ifndef IN_RING3
Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
return VINF_SUCCESS;
AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
# ifdef PGM_WITH_LARGE_PAGES
return rc;
bool fFlushTLBs = false;
AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
if (fFlushTLBs)
return rc2;
AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
/* Mark this shared page for freeing/derefencing. */
Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
if (pvSharedPage)
void *pvNewPage;
if ( fFlushTLBs
return rc;
#ifdef PGM_WITH_LARGE_PAGES
unsigned iPage;
|| PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO) /* allocated, monitored or shared means we can't use a large page here */
LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_STATE(pPage), rc));
# ifdef IN_RING3
return VINF_SUCCESS;
/* If we fail once, it most likely means the host's memory is too fragmented; don't bother trying again. */
return rc;
LogFlow(("pgmPhysIsValidLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
LogFlow(("pgmPhysIsValidLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
return VINF_SUCCESS;
case PGM_PAGE_STATE_ALLOCATED:
return VINF_SUCCESS;
case PGM_PAGE_STATE_ZERO:
return VERR_PGM_PHYS_PAGE_RESERVED;
case PGM_PAGE_STATE_SHARED:
case PGM_PAGE_STATE_BALLOONED:
return VERR_PGM_PHYS_PAGE_BALLOONED;
if (!pMap)
# ifdef IN_RING0
return rc;
return VINF_SUCCESS;
static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
return VINF_SUCCESS;
AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
*ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
return VINF_SUCCESS;
if (!pMap)
#ifdef IN_RING0
return rc;
return VINF_SUCCESS;
return rc;
if (!pRam)
void *pv;
return rc;
#ifdef PGM_WITH_PHYS_TLB
return VINF_SUCCESS;
int rc;
return rc;
void *pv;
&pv
return rc;
return rc;
return VINF_SUCCESS;
void *pv;
&pv
return rc;
return rc;
return VINF_SUCCESS;
void *pv;
&pv
/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
/** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary. */
/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
if (pMap)
if (cLocks == 0)
if (pMap)
return rc;
VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
void *pv;
&pv
/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
/** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary. */
/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
if (pMap)
if (cLocks == 0)
AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
if (pMap)
return rc;
return rc;
VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
return rc;
if (fWriteLock)
if (pMap)
#ifndef DEBUG_sandervl
Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
return rc;
#ifdef VBOX_STRICT
return R3Ptr;
return NIL_RTR3PTR;
return rc;
return rc;
#ifdef IN_RING3
AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
return VINF_SUCCESS;
#ifdef IN_RING3
Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
# ifdef VBOX_WITH_STATISTICS
if (pPhys)
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
unsigned iPage;
Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
#ifdef IN_RING3
if (!pPhys)
Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
return rc;
return rc;
const void *pvSrc;
AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
return VINF_SUCCESS;
if (!pRam)
return VINF_SUCCESS;
static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
int rc;
if (pCur)
#ifndef IN_RING3
return VERR_PGM_PHYS_WR_HIT_HANDLER;
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
# ifdef VBOX_WITH_STATISTICS
if (pCur)
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
return VINF_SUCCESS;
unsigned iPage;
#ifndef IN_RING3
return VERR_PGM_PHYS_WR_HIT_HANDLER;
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
return VINF_SUCCESS;
if (!pvDst)
unsigned iVirtPage = 0;
offVirt = 0;
offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
if ( pVirtPhys
fMoreVirt = false;
if (pPhys)
offPhys = 0;
if ( pPhys
fMorePhys = false;
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
#ifdef IN_RING3
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
# ifdef VBOX_WITH_STATISTICS
if (pPhys)
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
#ifdef IN_RING3
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
#ifdef IN_RING3
Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
# ifdef VBOX_WITH_STATISTICS
if (pPhys)
AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
return VERR_PGM_PHYS_WR_HIT_HANDLER;
return VINF_SUCCESS;
return rc;
void *pvDst;
return VINF_SUCCESS;
if (!pRam)
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void const *pvSrc;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
/* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
void const *pvSrc;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
if (!cb)
return VINF_SUCCESS;
void *pvDst;
return rc;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return VINF_SUCCESS;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return rc;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
return VINF_SUCCESS;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
int rc;
if (!cb)
return VINF_SUCCESS;
return rc;
int rc;
if (!cb)
return VINF_SUCCESS;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
return rc;
VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
int rc;
void *pvSrc;
switch (rc)
case VINF_SUCCESS:
Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
return rc;
return VINF_SUCCESS;
void *pvSrc1;
switch (rc)
case VINF_SUCCESS:
return rc;
void *pvSrc2;
switch (rc)
case VINF_SUCCESS:
return rc;
return VINF_SUCCESS;
switch (rc)
case VINF_SUCCESS:
case VERR_PAGE_NOT_PRESENT:
return rc;
VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
int rc;
const void *pvSrc;
switch (rc)
case VINF_SUCCESS:
return rc;
return VINF_SUCCESS;
const void *pvSrc;
switch (rc)
case VINF_SUCCESS:
return rc;
switch (rc)
case VINF_SUCCESS:
return rc;
return VINF_SUCCESS;
switch (rc)
case VINF_SUCCESS:
case VERR_PAGE_NOT_PRESENT:
return rc;
if (fRaiseTrap)
Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
return rc;
VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
int rc;
&& CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
void *pvDst;
switch (rc)
case VINF_SUCCESS:
return rc;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
return VINF_SUCCESS;
void *pvDst;
switch (rc)
case VINF_SUCCESS:
return rc;
switch (rc)
case VINF_SUCCESS:
return rc;
rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
return VINF_SUCCESS;
switch (rc)
case VINF_SUCCESS:
case VERR_ACCESS_DENIED:
case VERR_PAGE_NOT_PRESENT:
return rc;
if (fRaiseTrap)
Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
return rc;