PGMAllShw.h revision 089df8ada78128cb620e6a9af2bcb5bcae7d318f
/* $Id$ */
/** @file
* VBox - Page Manager, Shadow Paging Template - All context code.
*/
/*
* Copyright (C) 2006-2010 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
#if PGM_SHW_TYPE == PGM_TYPE_32BIT
# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
# define SHW_PD_SHIFT X86_PD_SHIFT
# define SHW_PD_MASK X86_PD_MASK
# define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
# define SHW_PT_SHIFT X86_PT_SHIFT
# define SHW_PT_MASK X86_PT_MASK
# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD
# define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
# define SHW_PD_SHIFT EPT_PD_SHIFT
# define SHW_PD_MASK EPT_PD_MASK
# define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
# define SHW_PTE_IS_US(Pte) ( true )
# define SHW_PTE_IS_A(Pte) ( true )
# define SHW_PTE_IS_D(Pte) ( true )
# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false )
# define SHW_PT_SHIFT EPT_PT_SHIFT
# define SHW_PT_MASK EPT_PT_MASK
# define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
# define SHW_PDPT_MASK EPT_PDPT_MASK
# define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */
#else
# define SHWPT PGMSHWPTPAE
# define PSHWPT PPGMSHWPTPAE
# define SHWPTE PGMSHWPTEPAE
# define PSHWPTE PPGMSHWPTEPAE
# define PSHWPDE PX86PDEPAE
# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL
# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
# define SHW_PD_MASK X86_PD_PAE_MASK
# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
# define SHW_PT_MASK X86_PT_PAE_MASK
# if PGM_SHW_TYPE == PGM_TYPE_AMD64
# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
# define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_AMD64_CR3
# else /* 32 bits PAE mode */
# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
# define SHW_PDPT_MASK X86_PDPT_MASK_PAE
# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT
# endif
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
/**
* Gets effective page information (from the VMM page directory).
*
* @returns VBox status.
* @param pVCpu The VMCPU handle.
* @param GCPtr Guest Context virtual address of the page.
* @param pfFlags Where to store the flags. These are X86_PTE_*.
* @param pHCPhys Where to store the HC physical address of the page.
* This is page aligned.
* @remark You should use PGMMapGetPage() for pages in a mapping.
*/
{
#if PGM_SHW_TYPE == PGM_TYPE_NESTED
return VERR_PAGE_TABLE_NOT_PRESENT;
#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
/*
* Get the PDE.
*/
# if PGM_SHW_TYPE == PGM_TYPE_AMD64
/* PML4 */
return VERR_PAGE_TABLE_NOT_PRESENT;
/* PDPT */
if (RT_FAILURE(rc))
return rc;
return VERR_PAGE_TABLE_NOT_PRESENT;
/* PD */
if (RT_FAILURE(rc))
return rc;
/* Merge accessed, write, user and no-execute bits into the PDE. */
if (rc != VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
{
return rc;
}
# else /* PGM_TYPE_32BIT */
# endif
return VERR_PAGE_TABLE_NOT_PRESENT;
/** Deal with large pages. */
{
/*
* Store the results.
* RW and US flags depend on the entire page translation hierarchy - except for
* legacy PAE which has a simplified PDPE.
*/
if (pfFlags)
{
# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) /** @todo why do we have to check the guest state here? */
*pfFlags |= X86_PTE_PAE_NX;
# endif
}
if (pHCPhys)
*pHCPhys = (Pde.u & SHW_PDE_PG_MASK) + (GCPtr & (RT_BIT(SHW_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
return VINF_SUCCESS;
}
/*
* Get PT entry.
*/
if (!(Pde.u & PGM_PDFLAGS_MAPPING))
{
if (RT_FAILURE(rc2))
return rc2;
}
else /* mapping: */
{
# if PGM_SHW_TYPE == PGM_TYPE_AMD64 \
|| PGM_SHW_TYPE == PGM_TYPE_EPT
AssertFailed(); /* can't happen */
# else
# if PGM_SHW_TYPE == PGM_TYPE_32BIT
# else /* PAE */
# endif
# endif
}
if (!SHW_PTE_IS_P(Pte))
return VERR_PAGE_NOT_PRESENT;
/*
* Store the results.
* RW and US flags depend on the entire page translation hierarchy - except for
* legacy PAE which has a simplified PDPE.
*/
if (pfFlags)
{
# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) /** @todo why do we have to check the guest state here? */
/* The NX bit is determined by a bitwise OR between the PT and PD */
*pfFlags |= X86_PTE_PAE_NX;
# endif
}
if (pHCPhys)
return VINF_SUCCESS;
#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED */
}
/**
* Modify page flags for a range of pages in the shadow context.
*
* The existing flags are ANDed with the fMask and ORed with the fFlags.
*
* @returns VBox status code.
* @param pVCpu The VMCPU handle.
* @param GCPtr Virtual address of the first page in the range. Page aligned!
* @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
* @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
* @param fMask The AND mask - page flags X86_PTE_*.
* Be extremely CAREFUL with ~'ing values because they can be 32-bit!
* @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
* @remark You must use PGMMapModifyPage() for pages in a mapping.
*/
PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
{
# if PGM_SHW_TYPE == PGM_TYPE_NESTED
return VERR_PAGE_TABLE_NOT_PRESENT;
# else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
int rc;
/*
* Walk page tables and pages till we're done.
*/
for (;;)
{
/*
* Get the PDE.
*/
# if PGM_SHW_TYPE == PGM_TYPE_AMD64
/* PML4 */
return VERR_PAGE_TABLE_NOT_PRESENT;
/* PDPT */
if (RT_FAILURE(rc))
return rc;
return VERR_PAGE_TABLE_NOT_PRESENT;
/* PD */
if (RT_FAILURE(rc))
return rc;
if (rc != VINF_SUCCESS)
{
return rc;
}
# else /* PGM_TYPE_32BIT */
# endif
return VERR_PAGE_TABLE_NOT_PRESENT;
/*
* Map the page table.
*/
if (RT_FAILURE(rc))
return rc;
{
{
SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK));
if (!SHW_PTE_IS_P(NewPte))
{
/** @todo Some CSAM code path might end up here and upset
* the page pool. */
AssertFailed();
}
else if ( SHW_PTE_IS_RW(NewPte)
&& !SHW_PTE_IS_RW(OrgPte)
&& !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
{
/** @todo Optimize \#PF handling by caching data. We can
* then use this when PGM_MK_PG_IS_WRITE_FAULT is
* set instead of resolving the guest physical
* address yet again. */
if (RT_SUCCESS(rc))
{
if (pPage)
{
Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GCPhys, pPage));
}
}
}
# if PGM_SHW_TYPE == PGM_TYPE_EPT
# else
# endif
}
/* next page */
if (!cb)
return VINF_SUCCESS;
iPTE++;
}
}
# endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED */
}