PGMPhys.cpp revision 12fcc878a631e75b88a82cebc92d1cd57b09c8e7
/* $Id$ */
/** @file
* PGM - Page Manager and Monitor, Physical Memory Addressing.
*/
/*
* Copyright (C) 2006-2007 innotek GmbH
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PGM
#include "PGMInternal.h"
/*
*/
#define PGMPHYSFN_READNAME PGMR3PhysReadByte
#define PGMPHYS_DATASIZE 1
#define PGMPHYS_DATATYPE uint8_t
#include "PGMPhys.h"
#define PGMPHYSFN_READNAME PGMR3PhysReadWord
#define PGMPHYS_DATASIZE 2
#define PGMPHYS_DATATYPE uint16_t
#include "PGMPhys.h"
#define PGMPHYSFN_READNAME PGMR3PhysReadDword
#define PGMPHYS_DATASIZE 4
#define PGMPHYS_DATATYPE uint32_t
#include "PGMPhys.h"
/**
* Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
* registration APIs calls to inform PGM about memory registrations.
*
* It registers the physical memory range with PGM. MM is responsible
* for the toplevel things - allocation and locking - while PGM is taking
* care of all the details and implements the physical address space virtualization.
*
* @returns VBox status.
* @param pVM The VM handle.
* @param pvRam HC virtual address of the RAM range. (page aligned)
* @param GCPhys GC physical address of the RAM range. (page aligned)
* @param cb Size of the RAM range. (page aligned)
* @param fFlags Flags, MM_RAM_*.
* @param paPages Pointer an array of physical page descriptors.
* @param pszDesc Description string.
*/
PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
{
/*
* Validate input.
* (Not so important because callers are only MMR3PhysRegister()
* and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
*/
Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
/*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
/*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
if (GCPhysLast < GCPhys)
{
return VERR_INVALID_PARAMETER;
}
/*
* Find range location and check for conflicts.
*/
while (pCur)
{
{
AssertMsgFailed(("Conflict! This cannot happen!\n"));
return VERR_PGM_RAM_CONFLICT;
}
break;
/* next */
}
/*
* Allocate RAM range.
* Small ranges are allocated from the heap, big ones have separate mappings.
*/
int rc;
{ /* large */
if (VBOX_SUCCESS(rc))
{
if (VBOX_SUCCESS(rc))
{
}
else
{
}
}
else
}
else
{ /* small */
if (VBOX_SUCCESS(rc))
else
}
if (VBOX_SUCCESS(rc))
{
/*
* Initialize the range.
*/
pNew->pavHCChunkGC = 0;
if (paPages)
{
while (iPage-- > 0)
{
pNew->aPages[iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
}
}
else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
{
/* Allocate memory for chunk to HC ptr lookup array. */
rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
/* Physical memory will be allocated on demand. */
while (iPage-- > 0)
{
}
}
else
{
RTHCPHYS HCPhysDummyPage = (MMR3PageDummyHCPhys(pVM) & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
while (iPage-- > 0)
{
}
}
/*
* Insert the new RAM range.
*/
if (pPrev)
{
}
else
{
}
}
return rc;
}
/**
* Register a chunk of a the physical memory range with PGM. MM is responsible
* for the toplevel things - allocation and locking - while PGM is taking
* care of all the details and implements the physical address space virtualization.
*
*
* @returns VBox status.
* @param pVM The VM handle.
* @param pvRam HC virtual address of the RAM range. (page aligned)
* @param GCPhys GC physical address of the RAM range. (page aligned)
* @param cb Size of the RAM range. (page aligned)
* @param fFlags Flags, MM_RAM_*.
* @param paPages Pointer an array of physical page descriptors.
* @param pszDesc Description string.
*/
PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
{
/*
* Validate input.
* (Not so important because callers are only MMR3PhysRegister()
* and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
*/
Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
if (GCPhysLast < GCPhys)
{
return VERR_INVALID_PARAMETER;
}
/*
* Find existing range location.
*/
while (pRam)
{
break;
}
if (paPages)
{
while (iPage-- > 0)
pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
}
/* Notify the recompiler. */
return VINF_SUCCESS;
}
/**
* Allocate missing physical pages for an existing guest RAM range.
*
* @returns VBox status.
* @param pVM The VM handle.
* @param GCPhys GC physical address of the RAM range. (page aligned)
*/
{
/*
* Walk range list.
*/
while (pRam)
{
{
bool fRangeExists = false;
/** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
fRangeExists = true;
if (fRangeExists)
return VINF_SUCCESS;
}
}
}
#ifndef VBOX_WITH_NEW_PHYS_CODE
/**
* Allocate missing physical pages for an existing guest RAM range.
*
* @returns VBox status.
* @param pVM The VM handle.
* @param pRamRange RAM range
* @param GCPhys GC physical address of the RAM range. (page aligned)
*/
{
void *pvRam;
int rc;
/* We must execute this function in the EMT thread, otherwise we'll run into problems. */
{
AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
if (VBOX_SUCCESS(rc))
{
}
return rc;
}
/* Round down to chunk boundary */
for (;;)
{
if (VBOX_SUCCESS(rc))
{
rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
if (VBOX_SUCCESS(rc))
return rc;
}
if (enmVMState != VMSTATE_RUNNING)
{
LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
return rc;
}
LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
/* Pause first, then inform Main. */
VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
/* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
/* Retry */
LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
}
}
#endif /* !VBOX_WITH_NEW_PHYS_CODE */
/**
* Interface MMIO handler relocation calls.
*
* It relocates an existing physical memory range with PGM.
*
* @returns VBox status.
* @param pVM The VM handle.
* @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)
* @param GCPhysNew New GC physical address of the RAM range. (page aligned)
* @param cb Size of the RAM range. (page aligned)
*/
{
/*
* Validate input.
* (Not so important because callers are only MMR3PhysRelocate(),
* but anyway...)
*/
if (GCPhysLast < GCPhysOld)
{
return VERR_INVALID_PARAMETER;
}
if (GCPhysLast < GCPhysNew)
{
return VERR_INVALID_PARAMETER;
}
/*
* Find and remove old range location.
*/
while (pCur)
{
break;
/* next */
}
if (pPrev)
{
}
else
{
}
/*
* Update the range.
*/
/*
* Find range location and check for conflicts.
*/
while (pCur)
{
{
AssertMsgFailed(("Conflict! This cannot happen!\n"));
return VERR_PGM_RAM_CONFLICT;
}
break;
/* next */
}
/*
* Reinsert the RAM range.
*/
if (pPrev)
{
}
else
{
}
return VINF_SUCCESS;
}
/**
* Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
* flags of existing RAM ranges.
*
* @returns VBox status.
* @param pVM The VM handle.
* @param GCPhys GC physical address of the RAM range. (page aligned)
* @param cb Size of the RAM range. (page aligned)
* @param fFlags The Or flags, MM_RAM_* \#defines.
* @param fMask The and mask for the flags.
*/
PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
{
/*
* Validate input.
* (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
*/
Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
/*
* Lookup the range.
*/
if ( !pRam
{
return VERR_INVALID_PARAMETER;
}
/*
* Update the requested flags.
*/
RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
| fMask;
pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
return VINF_SUCCESS;
}
/**
* Sets the Address Gate 20 state.
*
* @param pVM VM handle.
* @param fEnable True if the gate should be enabled.
* False if the gate should be disabled.
*/
{
{
}
}
/**
* Tree enumeration callback for dealing with age rollover.
* It will perform a simple compression of the current age.
*/
{
/* Age compression - ASSUMES iNow == 4. */
else /* iAge = 0 */
/* reinsert */
return 0;
}
/**
* Tree enumeration callback that updates the chunks that have
* been used since the last
*/
{
{
}
return 0;
}
/**
* Performs ageing of the ring-3 chunk mappings.
*
* @param pVM The VM handle.
*/
{
{
RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
}
else
RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
}
/**
* The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
*/
typedef struct PGMR3PHYSCHUNKUNMAPCB
{
/**
* Callback used to find the mapping that's been unused for
* the longest time.
*/
{
do
{
{
/*
* Check that it's not in any of the TLBs.
*/
{
break;
}
if (pChunk)
{
break;
}
if (pChunk)
{
return 1; /* done */
}
}
/* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
} while (pNode);
return 0;
}
/**
* Finds a good candidate for unmapping when the ring-3 mapping cache is full.
*
* The candidate will not be part of any TLBs, so no need to flush
* anything afterwards.
*
* @returns Chunk id.
* @param pVM The VM handle.
*/
{
/*
* Do tree ageing first?
*/
/*
* Enumerate the age tree starting with the left most node.
*/
if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
return INT32_MAX;
}
/**
* Maps the given chunk into the ring-3 mapping cache.
*
* This will call ring-0.
*
* @returns VBox status code.
* @param pVM The VM handle.
* @param idChunk The chunk in question.
* @param ppChunk Where to store the chunk tracking structure.
*
* @remarks Called from within the PGM critical section.
*/
{
int rc;
/*
* Allocate a new tracking structure first.
*/
#if 0 /* for later when we've got a separate mapping method for ring-0. */
PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
#else
#endif
/*
* Request the ring-0 part to map the chunk in question and if
* necessary unmap another one to make space in the mapping cache.
*/
if (VBOX_SUCCESS(rc))
{
/*
* Update the tree.
*/
/* insert the new one. */
/* remove the unmapped one. */
{
PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
#if 0 /* for later when we've got a separate mapping method for ring-0. */
#else
#endif
}
}
else
{
#if 0 /* for later when we've got a separate mapping method for ring-0. */
#else
#endif
}
return rc;
}
/**
* For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
*
* @returns see pgmR3PhysChunkMap.
* @param pVM The VM handle.
* @param idChunk The chunk to map.
*/
{
}
/**
* Invalidates the TLB for the ring-3 mapping cache.
*
* @param pVM The VM handle.
*/
{
{
}
}
/**
* Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
*
* @returns The following VBox status codes.
* @retval VINF_SUCCESS on success. FF cleared.
* @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
*
* @param pVM The VM handle.
*/
{
if (rc == VERR_GMM_SEED_ME)
{
void *pvChunk;
if (VBOX_SUCCESS(rc))
if (VBOX_FAILURE(rc))
{
}
}
return rc;
}