PGMAllMap.cpp revision 8e972b677df5ee27b99211fc7e456a5aa50f3e68
/* $Id$ */
/** @file
* PGM - Page Manager and Monitor - All context code.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PGM
#include "PGMInternal.h"
/**
* Maps a range of physical pages at a given virtual address
* in the guest context.
*
* The GC virtual address range must be within an existing mapping.
*
* @returns VBox status code.
* @param pVM The virtual machine.
* @param GCPtr Where to map the page(s). Must be page aligned.
* @param HCPhys Start of the range of physical pages. Must be page aligned.
* @param cbPages Number of bytes to map. Must be page aligned.
* @param fFlags Page flags (X86_PTE_*).
*/
{
/*
* Validate input.
*/
AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
/* hypervisor defaults */
if (!fFlags)
/*
* Find the mapping.
*/
while (pCur)
{
{
{
AssertMsgFailed(("Invalid range!!\n"));
return VERR_INVALID_PARAMETER;
}
/*
* Setup PTE.
*/
/*
* Update the page tables.
*/
for (;;)
{
/* 32-bit */
pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
/* pae */
/* next */
if (!cbPages)
break;
}
return VINF_SUCCESS;
}
/* next */
}
return VERR_INVALID_PARAMETER;
}
/**
* Sets (replaces) the page flags for a range of pages in a mapping.
*
* @returns VBox status.
* @param pVM VM handle.
* @param GCPtr Virtual address of the first page in the range.
* @param cb Size (in bytes) of the range to apply the modification to.
* @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
*/
{
}
/**
* Modify page flags for a range of pages in a mapping.
*
* The existing flags are ANDed with the fMask and ORed with the fFlags.
*
* @returns VBox status code.
* @param pVM VM handle.
* @param GCPtr Virtual address of the first page in the range.
* @param cb Size (in bytes) of the range to apply the modification to.
* @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
* @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
*/
{
/*
* Validate input.
*/
/*
* Align the input.
*/
/*
* Find the mapping.
*/
while (pCur)
{
{
("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
/*
* Perform the requested operation.
*/
while (cb > 0)
{
{
/* 32-Bit */
/* PAE */
/* invalidate tls */
/* next */
iPTE++;
}
}
return VINF_SUCCESS;
}
/* next */
}
return VERR_INVALID_PARAMETER;
}
#ifndef IN_RING0
/**
* Sets all PDEs involved with the mapping in the shadow page table.
*
* @param pVM The VM handle.
* @param pMap Pointer to the mapping in question.
* @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
*/
{
Log(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
return;
return; /* too early */
#endif
/*
* Init the page tables and insert them into the page directories.
*/
iNewPDE += i;
while (i-- > 0)
{
iNewPDE--;
switch(enmShadowMode)
{
case PGMMODE_32_BIT:
{
{
pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
}
/* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
break;
}
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
{
if (!pShwPaePd)
{
{
/* Fake PDPT entry; access control handled on the page table level, so allow everything. */
GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
}
else
{
if (pGstPdpe)
else
GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
}
if (rc != VINF_SUCCESS)
{
}
}
#endif
{
}
PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
/* 2nd 2 MB PDE of the 4 MB region */
iPDE++;
{
}
PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
/* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
break;
}
default:
AssertFailed();
break;
}
}
}
/**
* Clears all PDEs involved with the mapping in the shadow page table.
*
* @param pVM The VM handle.
* @param pShwPageCR3 CR3 root page
* @param pMap Pointer to the mapping in question.
* @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
*/
{
Log(("pgmMapClearShadowPDEs old pde %x (mappings enabled %d)\n", iOldPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
return;
#endif
iOldPDE += i;
while (i-- > 0)
{
iOldPDE--;
switch(enmShadowMode)
{
case PGMMODE_32_BIT:
{
#else
#endif
pShw32BitPd->a[iOldPDE].u = 0;
break;
}
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
{
#else
#endif
iPDE++;
/* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
break;
}
default:
AssertFailed();
break;
}
}
}
#endif /* !IN_RING0 */
/**
* Apply the hypervisor mappings to the active CR3.
*
* @returns VBox status.
* @param pVM The virtual machine.
*/
{
/*
* Can skip this if mappings are safely fixed.
*/
return VINF_SUCCESS;
#ifdef IN_RING0
AssertFailed();
return VERR_INTERNAL_ERROR;
#else
# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
# endif
/*
* Iterate mappings.
*/
{
}
return VINF_SUCCESS;
#endif /* IN_RING0 */
}
/**
* Remove the hypervisor mappings from the active CR3
*
* @returns VBox status.
* @param pVM The virtual machine.
*/
{
/*
* Can skip this if mappings are safely fixed.
*/
return VINF_SUCCESS;
#ifdef IN_RING0
AssertFailed();
return VERR_INTERNAL_ERROR;
#else
# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
# endif
/*
* Iterate mappings.
*/
{
}
return VINF_SUCCESS;
#endif /* IN_RING0 */
}
/**
* Remove the hypervisor mappings from the specified CR3
*
* @returns VBox status.
* @param pVM The virtual machine.
* @param pShwPageCR3 CR3 root page
*/
{
/*
* Can skip this if mappings are safely fixed.
*/
return VINF_SUCCESS;
#ifdef IN_RING0
AssertFailed();
return VERR_INTERNAL_ERROR;
#else
# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
# endif
/*
* Iterate mappings.
*/
{
}
return VINF_SUCCESS;
#endif /* IN_RING0 */
}
#ifndef IN_RING0
/**
* Checks guest PD for conflicts with VMM GC mappings.
*
* @returns true if conflict detected.
* @returns false if not.
* @param pVM The virtual machine.
*/
{
/*
* Can skip this if mappings are safely fixed.
*/
return false;
/*
* Iterate mappings.
*/
if (enmGuestMode == PGMMODE_32_BIT)
{
/*
* Resolve the page directory.
*/
{
while (iPT-- > 0)
{
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
return true;
}
}
}
else if ( enmGuestMode == PGMMODE_PAE
|| enmGuestMode == PGMMODE_PAE_NX)
{
{
while (iPT-- > 0)
{
{
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
" PDE=%016RX64.\n",
return true;
}
}
}
}
else
AssertFailed();
return false;
}
# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
/**
* Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
*
* @returns VBox status.
* @param pVM The virtual machine.
*/
{
/*
* Can skip this if mappings are safely fixed.
*/
return VINF_SUCCESS;
/*
* Iterate mappings.
*/
if (enmGuestMode == PGMMODE_32_BIT)
{
/*
* Resolve the page directory.
*/
{
while (iPT-- > 0)
{
{
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
#ifdef IN_RING3
/*
* Update pCur.
*/
break;
#else
return VINF_PGM_SYNC_CR3;
#endif
}
}
if (!pCur)
break;
}
}
else if ( enmGuestMode == PGMMODE_PAE
|| enmGuestMode == PGMMODE_PAE_NX)
{
{
while (iPT-- > 0)
{
{
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
" PDE=%016RX64.\n",
#ifdef IN_RING3
/*
* Update pCur.
*/
break;
#else
return VINF_PGM_SYNC_CR3;
#endif
}
}
if (!pCur)
break;
}
}
else
AssertFailed();
return VINF_SUCCESS;
}
# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
#endif /* IN_RING0 */