PGMPhys.cpp revision faeaad5b5e989170d6fe2fa87dfb03bf53bbc1e6
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * PGM - Page Manager and Monitor, Physical Memory Addressing.
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync * Copyright (C) 2006-2007 Oracle Corporation
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * available from http://www.virtualbox.org. This file is free software;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * you can redistribute it and/or modify it under the terms of the GNU
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * General Public License (GPL) as published by the Free Software
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/*******************************************************************************
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync* Header Files *
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync*******************************************************************************/
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync/*******************************************************************************
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync* Defined Constants And Macros *
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync*******************************************************************************/
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/** The number of pages to free in one batch. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync/*******************************************************************************
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync* Internal Functions *
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync*******************************************************************************/
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * PGMR3PhysReadU8-64
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * PGMR3PhysWriteU8-64
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * EMT worker for PGMR3PhysReadExternal.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead)
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * Write to physical memory, external users.
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM VM Handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhys Physical address to write to.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvBuf What to write.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param cbWrite How many bytes to write.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @thread Any but EMTs.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncVMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Copy loop on ram ranges.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Find range. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Inside range or not? */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Must work our way thru this page by page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * If the page has an ALL access handler, we'll have to
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * delegate the job to EMT.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 4,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Simple stuff, go ahead.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync const void *pvSrc;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* next page */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync } /* walk pages in ram range. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Unassigned address space.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync } /* Ram range walk */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * EMT worker for PGMR3PhysWriteExternal.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** @todo VERR_EM_NO_MEMORY */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Write to physical memory, external users.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_EM_NO_MEMORY.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM VM Handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhys Physical address to write to.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvBuf What to write.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param cbWrite How many bytes to write.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pszWho Who is writing. For tracking down who is writing
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * after we've saved the state.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @thread Any but EMTs.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncVMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, const char *pszWho)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x pszWho=%s\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Copy loop on ram ranges, stop when we hit something difficult.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Find range. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Inside range or not? */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Must work our way thru this page by page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Is the page problematic, we have to do the work on the EMT.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Allocating writable pages and access handlers are
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync * problematic, write monitored pages are simple and can be
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync * dealth with here.
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 4,
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Simple stuff, go ahead.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* next page */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync } /* walk pages in ram range */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Unassigned address space, skip it.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync } /* Ram range walk */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns see PGMR3PhysGCPhys2CCPtrExternal
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pGCPhys Pointer to the guest physical address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param ppv Where to store the mapping address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pLock Where to store the lock.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * an access handler after it succeeds.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync int rc2 = pgmPhysPageQueryTlbe(&pVM->pgm.s, *pGCPhys, &pTlbe);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* We *must* flush any corresponding pgm pool page here, otherwise we'll
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * not be informed about writes and keep bogus gst->shw mappings around.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Requests the mapping of a guest page into ring-3, external threads.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * release it.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * This API will assume your intention is to write to the page, and will
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * therefore replace shared and zero pages. If you do not intend to modify the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS on success.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * backing or if the page has any active access handlers. The caller
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * must fall back on using PGMR3PhysWriteExternal.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhys The guest physical address of the page that should be mapped.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param ppv Where to store the address corresponding to GCPhys.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @remark Avoid calling this API from within critical sections (other than the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * PGM one) because of the deadlock risk when we have to delegating the
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * task to an EMT.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @thread Any.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncVMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Query the Physical TLB entry for the page (may fail).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * If the page is shared, the zero page, or being write monitored
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * it must be converted to an page that's writable if possible.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * We can only deal with write monitored pages here, the rest have
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * to be on an EMT.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync * Now, just perform the locking and calculate the return address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pMap->cRefs++; /* Extra ref to prevent it from going away. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Requests the mapping of a guest page into ring-3, external threads.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * release it.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VINF_SUCCESS on success.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * backing or if the page as an active ALL access handler. The caller
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * must fall back on using PGMPhysRead.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhys The guest physical address of the page that should be mapped.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param ppv Where to store the address corresponding to GCPhys.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @remark Avoid calling this API from within critical sections (other than
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * the PGM one) because of the deadlock risk.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @thread Any.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncVMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Query the Physical TLB entry for the page (may fail).
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* MMIO pages doesn't have any readable backing. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Now, just perform the locking and calculate the return address.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pMap->cRefs++; /* Extra ref to prevent it from going away. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync * Called when anything was relocated.
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync * @param pVM Pointer to the shared VM structure.
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfRC == MMHyperCCToRC(pVM, pCur));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesR3; pCur2; pCur2 = pCur2->pNextR3)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Links a new RAM range into the list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM Pointer to the shared VM structure.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pNew Pointer to the new list entry.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfRC == MMHyperCCToRC(pVM, pNew));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesR3;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pNew->pNextRC = pRam ? pRam->pSelfRC : NIL_RTRCPTR;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Unlink an existing RAM range from the list.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM Pointer to the shared VM structure.
93540abbca1a7ac8de705de01b9fc04092294a39vboxsync * @param pRam Pointer to the new list entry.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesR3 == pRam);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam));
0c4004948fca34f2db87e7b38013137e9472c306vboxsync pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync pPrev->pNextRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync pVM->pgm.s.pRamRangesR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync pVM->pgm.s.pRamRangesRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * Unlink an existing RAM range from the list.
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * @param pVM Pointer to the shared VM structure.
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * @param pRam Pointer to the new list entry.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* find prev. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Frees a range of pages, replacing them with ZERO pages of the specified type.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pRam The RAM range in which the pages resides.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhys The address of the first page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhysLast The address of the last page.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param uType The page type to replace then with.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsyncstatic int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, uint8_t uType)
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync /* Iterate the pages. */
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
8a339f91959bb7a3315b51a23461b68c7b0cb50evboxsync uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync while (cPagesLeft-- > 0)
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys);
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync * This is only called on one of the EMTs while the other ones are waiting for
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * it to complete this function.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @returns VINF_SUCCESS (VBox strict status code).
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @param pVM The VM handle.
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync * @param pVCpu The VMCPU for the EMT we're being called on. Unused.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pvUser User parameter
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Replace pages with ZERO pages. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync /* Iterate the pages. */
ee4d840f54fd2dcea8a73b1b86d5ec0db370b05dvboxsync for (unsigned i = 0; i < cPages; i++)
0db6a029780d9f9b347500e117320a8d5661efe5vboxsync PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, paPhysPage[i]);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], (pPage) ? pPage->uTypeY : 0));
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync /* Flush the shadow PT if this page was previously used as a guest page table. */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i]);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_BALLOONED);
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /* Iterate the pages. */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync for (unsigned i = 0; i < cPages; i++)
0c4004948fca34f2db87e7b38013137e9472c306vboxsync PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, paPhysPage[i]);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync AssertBreak(pPage && pPage->uTypeY == PGMPAGETYPE_RAM);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /* Change back to zero page. */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /* Notify GMM about the balloon change. */
d4e9ccea0ea1ed303b5708ff94f6c202755f0dc6vboxsync rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync /* Flush the recompiler's TLB as well. */
0c4004948fca34f2db87e7b38013137e9472c306vboxsync CPUMSetChangedFlags(&pVM->aCpus[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * @returns VBox status code.
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * @param pVM The VM handle.
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * @param fInflate Inflate or deflate memory balloon
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * @param cPages Number of pages to free
0c4004948fca34f2db87e7b38013137e9472c306vboxsync * @param paPhysPage Array of guest physical addresses
0c4004948fca34f2db87e7b38013137e9472c306vboxsyncstatic DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Inflate or deflate a memory balloon
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param fInflate Inflate or deflate memory balloon
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param cPages Number of pages to free
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param paPhysPage Array of guest physical addresses
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncVMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
8cd2f2e64725096acb682f34a5568b7fb816eda7vboxsync /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
8cd2f2e64725096acb682f34a5568b7fb816eda7vboxsync AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * In the SMP case we post a request packet to postpone the job.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
a8139954a84d6e9090dd3a8371aa788351d45bc3vboxsync rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Query the amount of free memory inside VMMR0
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
da3503c04ce76e653401396fe2795a9bc2427a1dvboxsync * @param puTotalAllocSize Pointer to total allocated memory inside VMMR0 (in bytes)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param puTotalFreeSize Pointer to total free (allocated but not used yet) memory inside VMMR0 (in bytes)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param puTotalBalloonSize Pointer to total ballooned memory inside VMMR0 (in bytes)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncVMMR3DECL(int) PGMR3QueryVMMMemoryStats(PVM pVM, uint64_t *puTotalAllocSize, uint64_t *puTotalFreeSize, uint64_t *puTotalBalloonSize)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync uint64_t cAllocPages = 0, cFreePages = 0, cBalloonPages = 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync rc = GMMR3QueryHypervisorMemoryStats(pVM, &cAllocPages, &cFreePages, &cBalloonPages);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pVM The VM handle.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pNew The new RAM range.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhys The address of the RAM range.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param GCPhysLast The last address of the RAM range.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
da3503c04ce76e653401396fe2795a9bc2427a1dvboxsync * if in HMA.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param R0PtrNew Ditto for R0.
cba6719bd64ec749967bbe931230452664109857vboxsync * @param pszDesc The description.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param pPrev The previous RAM range (for linking).
9b789c281103a2489742bf32f6ab500e38b2ecd5vboxsyncstatic void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
9b789c281103a2489742bf32f6ab500e38b2ecd5vboxsync RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * Initialize the range.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync pNew->pSelfRC = RCPtrNew != NIL_RTRCPTR ? RCPtrNew : MMHyperCCToRC(pVM, pNew);
9b789c281103a2489742bf32f6ab500e38b2ecd5vboxsync pNew->fFlags = RCPtrNew != NIL_RTRCPTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync while (iPage-- > 0)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Update the page count stats. */
5b465a7c1237993faf8bb50120d247f3f0319adavboxsync * Relocate a floating RAM range.
ad77e3ec3cde24263bc7537575f5cae442bee3b1vboxsync * @copydoc FNPGMRELOCATE.
ad77e3ec3cde24263bc7537575f5cae442bee3b1vboxsyncstatic DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync /* Update myself and then relink all the ranges. */
ad77e3ec3cde24263bc7537575f5cae442bee3b1vboxsync return true;
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * PGMR3PhysRegisterRam worker that registers a high chunk.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @returns VBox status code.
cba6719bd64ec749967bbe931230452664109857vboxsync * @param pVM The VM handle.
d7125f3a1b435761c393f9ec406e85a73ae2a3e7vboxsync * @param GCPhys The address of the RAM.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsync * @param cRamPages The number of RAM pages to register.
8f0fc87a72dee210b62acc9dd859a4bebf8bfb33vboxsync * @param cbChunk The size of the PGMRAMRANGE guest mapping.
cba6719bd64ec749967bbe931230452664109857vboxsync * @param iChunk The chunk number.
cba6719bd64ec749967bbe931230452664109857vboxsync * @param pszDesc The RAM range description.
cba6719bd64ec749967bbe931230452664109857vboxsync * @param ppPrev Previous RAM range pointer. In/Out.
9dca051a5f8ff457ef1692990f6ecfa280daf265vboxsyncstatic int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
? pszDesc
size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
NULL,
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
return rc;
AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
return rc;
while (cPagesLeft > 0)
rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
iChunk++;
pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
return VINF_SUCCESS;
while (cLeft-- > 0)
case PGM_PAGE_STATE_ZERO:
LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
return rc;
cPages++;
case PGM_PAGE_STATE_BALLOONED:
case PGM_PAGE_STATE_ALLOCATED:
case PGM_PAGE_STATE_SHARED:
pPage++;
return VINF_SUCCESS;
AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
while (iPage-- > 0)
case PGMPAGETYPE_RAM:
void *pvPage;
rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
case PGMPAGETYPE_MMIO2:
case PGMPAGETYPE_ROM:
case PGMPAGETYPE_MMIO:
AssertFailed();
while (iPage-- > 0)
case PGMPAGETYPE_RAM:
case PGM_PAGE_STATE_ZERO:
case PGM_PAGE_STATE_BALLOONED:
case PGM_PAGE_STATE_SHARED:
case PGM_PAGE_STATE_ALLOCATED:
void *pvPage;
case PGMPAGETYPE_MMIO2:
case PGMPAGETYPE_ROM_SHADOW:
case PGMPAGETYPE_ROM:
case PGMPAGETYPE_MMIO:
AssertFailed();
if (cPendingPages)
return VINF_SUCCESS;
int rc;
bool fRamExists = false;
while (cLeft-- > 0)
pPage++;
fRamExists = true;
if (fRamExists)
Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
while (iPage-- > 0)
&& !fRamExists)
return rc;
bool fAllMMIO = true;
while (cLeft-- > 0)
fAllMMIO = false;
pPage++;
if (fAllMMIO)
* Range match? It will all be within one range (see PGMAllHandler.cpp).
while (cLeft--)
AssertMsg(PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
AssertMsg(PGM_PAGE_IS_ZERO(pPage), ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
return rc;
return pCur;
return NULL;
VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS cb, uint32_t fFlags, void **ppv, const char *pszDesc)
if (!pszDesc)
return VERR_NO_MEMORY;
void *pvPages;
while (iPage-- > 0)
return VINF_SUCCESS;
return rc;
unsigned cFound = 0;
while (pCur)
cFound++;
if (pPrev)
#ifdef VBOX_STRICT
bool fRamExists = false;
fRamExists = true;
if (fRamExists)
while (cPagesLeft-- > 0)
pPage++;
if (fRamExists)
while (cPagesLeft-- > 0)
pPageSrc++;
pPageDst++;
if (cPendingPages)
return VINF_SUCCESS;
bool fInformREM;
while (cPagesLeft-- > 0)
pPageDst++;
fInformREM = false;
fInformREM = true;
if (fInformREM)
return VINF_SUCCESS;
VMMR3DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, PRTHCPHYS pHCPhys)
return VINF_SUCCESS;
VMMR3DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
return rc;
Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p fFlags=%#x pszDesc=%s\n",
AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
bool fRamExists = false;
fRamExists = true;
if (fRamExists)
while (cPagesLeft-- > 0)
pPage++;
if (cExtraBaseCost)
return rc;
return rc;
rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
if (!fRamExists)
rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
if (!fRamExists)
void *pvDstPage;
if (pRomPrev)
return VINF_SUCCESS;
if (!fRamExists)
return rc;
static DECLCALLBACK(int) pgmR3PhysRomWriteHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
Log5(("pgmR3PhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
return VINF_PGM_HANDLER_DO_DEFAULT;
return VINF_SUCCESS;
void *pvDstPage;
return rc;
rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow, pRom->GCPhys + (iPage << PAGE_SHIFT));
if (cPendingPages)
Assert(!PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
void *pvDstPage;
#ifdef VBOX_STRICT
void const *pvDstPage;
return VINF_SUCCESS;
if (!cb)
return VINF_SUCCESS;
bool fFlushTLB = false;
bool fChanges = false;
iPage++)
fChanges = true;
if (fChanges)
return rc2;
if (fFlushTLB)
return rc;
RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
typedef struct PGMR3PHYSCHUNKUNMAPCB
if (pChunk)
if (pChunk)
/* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
} while (pNode);
if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
return INT32_MAX;
int rc;
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3UkHeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk), NULL);
PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
return rc;
int rc;
return rc;
void *pv;
idPage++;
return rc;
* in EM.cpp and shouldn't be propagated outside TRPM, HWACCM, EM and
AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_INTERNAL_ERROR);
void *pvChunk;
void *pv;
AssertLogRelMsgBreak(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc", pPage->idPage, pPage->HCPhysGCPhys, rc));
iClear++;
Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
pRam;
return rc;
static int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys)
return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
return VINF_SUCCESS;
return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
return VINF_SUCCESS;
*pcPendingPages = 0;
return rc;
/** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
if (fWritable)
int rc2;
case PGM_PAGE_STATE_ALLOCATED:
case PGM_PAGE_STATE_BALLOONED:
AssertFailed();
case PGM_PAGE_STATE_ZERO:
case PGM_PAGE_STATE_SHARED:
Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
return rc;