PGMR0.cpp revision 30868e719f5a45ec4689ecb2616767cb1fd02c28
/* $Id$ */
/** @file
* PGM - Page Manager and Monitor, Ring-0.
*/
/*
* Copyright (C) 2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PGM
#include "../PGMInternal.h"
#include "../PGMInline.h"
#include "PGMR0Bth.h"
#include "PGMR0Bth.h"
#include "PGMR0Bth.h"
#include "PGMR0Bth.h"
/**
* Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
*
* @returns The following VBox status codes.
* @retval VINF_SUCCESS on success. FF cleared.
* @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
*
* @param pVM The VM handle.
* @param pVCpu The VMCPU handle.
*
* @remarks Must be called from within the PGM critical section. The caller
* must clear the new pages.
*/
{
/*
* Check for error injection.
*/
return VERR_NO_MEMORY;
/*
* Try allocate a full set of handy pages.
*/
if (!cPages)
return VINF_SUCCESS;
int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
if (RT_SUCCESS(rc))
{
{
}
}
else if (rc != VERR_GMM_SEED_ME)
{
if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
|| rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
&& iFirst < PGM_HANDY_PAGES_MIN)
{
#ifdef VBOX_STRICT
/* We're ASSUMING that GMM has updated all the entires before failing us. */
uint32_t i;
{
}
#endif
/*
* Reduce the number of pages until we hit the minimum limit.
*/
do
{
cPages >>= 2;
} while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
|| rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
if (RT_SUCCESS(rc))
{
#ifdef VBOX_STRICT
while (i-- > 0)
{
}
{
}
#endif
}
}
{
}
}
return rc;
}
/**
* Worker function for PGMR3PhysAllocateLargeHandyPage
*
* @returns The following VBox status codes.
* @retval VINF_SUCCESS on success.
* @retval VINF_EM_NO_MEMORY if we're out of memory.
*
* @param pVM The VM handle.
* @param pVCpu The VMCPU handle.
*
* @remarks Must be called from within the PGM critical section. The caller
* must clear the new pages.
*/
{
int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M, &pVM->pgm.s.aLargeHandyPage[0].idPage, &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
if (RT_SUCCESS(rc))
return rc;
}
/**
* #PF Handler for nested paging.
*
* @returns VBox status code (appropriate for trap handling and GC return).
* @param pVM VM Handle.
* @param pVCpu VMCPU Handle.
* @param enmShwPagingMode Paging mode for the nested page tables
* @param uErr The trap error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address.
*/
VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault)
{
int rc;
LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%RGp eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
/* AMD uses the host's paging mode; Intel has a single mode (EPT). */
AssertMsg(enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT, ("enmShwPagingMode=%d\n", enmShwPagingMode));
#ifdef VBOX_WITH_STATISTICS
/*
* Error code stats.
*/
if (uErr & X86_TRAP_PF_US)
{
if (!(uErr & X86_TRAP_PF_P))
{
if (uErr & X86_TRAP_PF_RW)
else
}
else if (uErr & X86_TRAP_PF_RW)
else if (uErr & X86_TRAP_PF_RSVD)
else if (uErr & X86_TRAP_PF_ID)
else
}
else
{ /* Supervisor */
if (!(uErr & X86_TRAP_PF_P))
{
if (uErr & X86_TRAP_PF_RW)
else
}
else if (uErr & X86_TRAP_PF_RW)
else if (uErr & X86_TRAP_PF_ID)
else if (uErr & X86_TRAP_PF_RSVD)
}
#endif
/*
* Call the worker.
*
* We pretend the guest is in protected mode without paging, so we can use existing code to build the
* nested page tables.
*/
bool fLockTaken = false;
switch(enmShwPagingMode)
{
case PGMMODE_32_BIT:
break;
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
break;
case PGMMODE_EPT:
break;
default:
AssertFailed();
break;
}
if (fLockTaken)
{
}
if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
rc = VINF_SUCCESS;
else
/* Note: hack alert for difficult to reproduce problem. */
{
Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
/* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
rc = VINF_SUCCESS;
}
return rc;
}