PGMAll.cpp revision 01a590b833f412c36f58cc2ccbbd6b2a83c2b10d
/* $Id$ */
/** @file
* PGM - Page Manager and Monitor - All context code.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PGM
#include <VBox/hwacc_vmx.h>
#include "PGMInternal.h"
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
* passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
*/
typedef struct PGMHVUSTATE
{
/** The VM handle. */
/** The VMCPU handle. */
/** The todo flags. */
/** The CR4 register value. */
} PGMHVUSTATE, *PPGMHVUSTATE;
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
/*
* Shadow - 32-bit mode
*/
#define PGM_SHW_TYPE PGM_TYPE_32BIT
#include "PGMAllShw.h"
/* Guest - real mode */
#define PGM_GST_TYPE PGM_TYPE_REAL
#include "PGMGstDefs.h"
#include "PGMAllGst.h"
#include "PGMAllBth.h"
/* Guest - protected mode */
#define PGM_GST_TYPE PGM_TYPE_PROT
#include "PGMGstDefs.h"
#include "PGMAllGst.h"
#include "PGMAllBth.h"
/* Guest - 32-bit mode */
#define PGM_GST_TYPE PGM_TYPE_32BIT
#include "PGMGstDefs.h"
#include "PGMAllGst.h"
#include "PGMAllBth.h"
/*
* Shadow - PAE mode
*/
#define PGM_SHW_TYPE PGM_TYPE_PAE
#include "PGMAllShw.h"
/* Guest - real mode */
#define PGM_GST_TYPE PGM_TYPE_REAL
#include "PGMGstDefs.h"
#include "PGMAllBth.h"
/* Guest - protected mode */
#define PGM_GST_TYPE PGM_TYPE_PROT
#include "PGMGstDefs.h"
#include "PGMAllBth.h"
/* Guest - 32-bit mode */
#define PGM_GST_TYPE PGM_TYPE_32BIT
#include "PGMGstDefs.h"
#include "PGMAllBth.h"
/* Guest - PAE mode */
#define PGM_GST_TYPE PGM_TYPE_PAE
#include "PGMGstDefs.h"
#include "PGMAllGst.h"
#include "PGMAllBth.h"
/*
* Shadow - AMD64 mode
*/
# define PGM_SHW_TYPE PGM_TYPE_AMD64
# include "PGMAllShw.h"
/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
# define PGM_GST_TYPE PGM_TYPE_PROT
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
# ifdef VBOX_WITH_64_BITS_GUESTS
/* Guest - AMD64 mode */
# define PGM_GST_TYPE PGM_TYPE_AMD64
# include "PGMGstDefs.h"
# include "PGMAllGst.h"
# include "PGMAllBth.h"
# endif /* VBOX_WITH_64_BITS_GUESTS */
/*
* Shadow - Nested paging mode
*/
# define PGM_SHW_TYPE PGM_TYPE_NESTED
# include "PGMAllShw.h"
/* Guest - real mode */
# define PGM_GST_TYPE PGM_TYPE_REAL
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
/* Guest - protected mode */
# define PGM_GST_TYPE PGM_TYPE_PROT
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
/* Guest - 32-bit mode */
# define PGM_GST_TYPE PGM_TYPE_32BIT
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
/* Guest - PAE mode */
# define PGM_GST_TYPE PGM_TYPE_PAE
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
# ifdef VBOX_WITH_64_BITS_GUESTS
/* Guest - AMD64 mode */
# define PGM_GST_TYPE PGM_TYPE_AMD64
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
# endif /* VBOX_WITH_64_BITS_GUESTS */
/*
* Shadow - EPT
*/
# define PGM_SHW_TYPE PGM_TYPE_EPT
# include "PGMAllShw.h"
/* Guest - real mode */
# define PGM_GST_TYPE PGM_TYPE_REAL
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
/* Guest - protected mode */
# define PGM_GST_TYPE PGM_TYPE_PROT
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
/* Guest - 32-bit mode */
# define PGM_GST_TYPE PGM_TYPE_32BIT
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
/* Guest - PAE mode */
# define PGM_GST_TYPE PGM_TYPE_PAE
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
# ifdef VBOX_WITH_64_BITS_GUESTS
/* Guest - AMD64 mode */
# define PGM_GST_TYPE PGM_TYPE_AMD64
# include "PGMGstDefs.h"
# include "PGMAllBth.h"
# endif /* VBOX_WITH_64_BITS_GUESTS */
#endif /* !IN_RC */
#ifndef IN_RING3
/**
* #PF Handler.
*
* @returns VBox status code (appropriate for trap handling and GC return).
* @param pVCpu VMCPU handle.
* @param uErr The trap error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address.
*/
{
LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
#ifdef VBOX_WITH_STATISTICS
/*
* Error code stats.
*/
if (uErr & X86_TRAP_PF_US)
{
if (!(uErr & X86_TRAP_PF_P))
{
if (uErr & X86_TRAP_PF_RW)
else
}
else if (uErr & X86_TRAP_PF_RW)
else if (uErr & X86_TRAP_PF_RSVD)
else if (uErr & X86_TRAP_PF_ID)
else
}
else
{ /* Supervisor */
if (!(uErr & X86_TRAP_PF_P))
{
if (uErr & X86_TRAP_PF_RW)
else
}
else if (uErr & X86_TRAP_PF_RW)
else if (uErr & X86_TRAP_PF_ID)
else if (uErr & X86_TRAP_PF_RSVD)
}
#endif /* VBOX_WITH_STATISTICS */
/*
* Call the worker.
*/
if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
rc = VINF_SUCCESS;
STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
return rc;
}
#endif /* !IN_RING3 */
/**
* Prefetch a page
*
* Typically used to sync commonly used pages before entering raw mode
* after a CR3 reload.
*
* @returns VBox status code suitable for scheduling.
* @retval VINF_SUCCESS on success.
* @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
* @param pVCpu VMCPU handle.
* @param GCPtrPage Page to invalidate.
*/
{
return rc;
}
/**
* Gets the mapping corresponding to the specified address (if any).
*
* @returns Pointer to the mapping.
* @returns NULL if not
*
* @param pVM The virtual machine.
* @param GCPtr The guest context pointer.
*/
{
while (pMapping)
{
break;
return pMapping;
}
return NULL;
}
/**
* Verifies a range of pages for read or write access
*
* Only checks the guest's page tables
*
* @returns VBox status code.
* @param pVCpu VMCPU handle.
* @param Addr Guest virtual address to check
* @param cbSize Access size
* @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
* @remarks Current not in use.
*/
{
/*
* Validate input.
*/
{
return VERR_INVALID_PARAMETER;
}
if (RT_FAILURE(rc))
{
return VINF_EM_RAW_GUEST_TRAP;
}
/*
* Check if the access would cause a page fault
*
* Note that hypervisor page directories are not present in the guest's tables, so this check
* is sufficient.
*/
{
Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
return VINF_EM_RAW_GUEST_TRAP;
}
if ( RT_SUCCESS(rc)
return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
return rc;
}
/**
* Verifies a range of pages for read or write access
*
* Supports handling of pages marked for dirty bit tracking and CSAM
*
* @returns VBox status code.
* @param pVCpu VMCPU handle.
* @param Addr Guest virtual address to check
* @param cbSize Access size
* @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
*/
{
AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
/*
* Get going.
*/
if (RT_FAILURE(rc))
{
return VINF_EM_RAW_GUEST_TRAP;
}
/*
* Check if the access would cause a page fault
*
* Note that hypervisor page directories are not present in the guest's tables, so this check
* is sufficient.
*/
{
Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
return VINF_EM_RAW_GUEST_TRAP;
}
if (!HWACCMIsNestedPagingActive(pVM))
{
/*
* Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
*/
if ( rc == VERR_PAGE_NOT_PRESENT
|| rc == VERR_PAGE_TABLE_NOT_PRESENT)
{
/*
* Page is not present in our page tables.
* Try to sync it!
*/
if (rc != VINF_SUCCESS)
return rc;
}
else
}
#if 0 /* def VBOX_STRICT; triggers too often now */
/*
* This check is a bit paranoid, but useful.
*/
/** @note this will assert when writing to monitored pages (a bit annoying actually) */
{
AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
return VINF_EM_RAW_GUEST_TRAP;
}
#endif
if ( RT_SUCCESS(rc)
{
/* Don't recursively call PGMVerifyAccess as we might run out of stack. */
for (;;)
{
else
cbSize = 1;
if (rc != VINF_SUCCESS)
break;
break;
}
}
return rc;
}
/**
* Emulation of the invlpg instruction (HC only actually).
*
* @returns VBox status code, special care required.
* @retval VINF_PGM_SYNC_CR3 - handled.
* @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
* @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
*
* @param pVCpu VMCPU handle.
* @param GCPtrPage Page to invalidate.
*
* @remark ASSUMES the page table entry or page directory is valid. Fairly
* safe, but there could be edge cases!
*
* @todo Flush page or page directory only if necessary!
*/
{
int rc;
#ifndef IN_RING3
/*
* Notify the recompiler so it can record this instruction.
* Failure happens when it's out of space. We'll return to HC in that case.
*/
if (rc != VINF_SUCCESS)
return rc;
#endif /* !IN_RING3 */
#ifdef IN_RC
/*
* Check for conflicts and pending CR3 monitoring updates.
*/
{
{
LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
return VINF_PGM_SYNC_CR3;
}
{
LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
return VINF_EM_RAW_EMULATE_INSTR;
}
}
#endif /* IN_RC */
/*
* Call paging mode specific worker.
*/
#ifdef IN_RING3
/*
* Check if we have a pending update of the CR3 monitoring.
*/
if ( RT_SUCCESS(rc)
{
}
/*
* Inform CSAM about the flush
*
* Note: This is to check if monitored pages have been changed; when we implement
* callbacks for virtual handlers, this is no longer required.
*/
#endif /* IN_RING3 */
return rc;
}
/**
* Executes an instruction using the interpreter.
*
* @returns VBox status code (appropriate for trap handling and GC return).
* @param pVM VM handle.
* @param pVCpu VMCPU handle.
* @param pRegFrame Register frame.
* @param pvFault Fault address.
*/
VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
{
if (rc == VERR_EM_INTERPRETER)
if (rc != VINF_SUCCESS)
return rc;
}
/**
* Gets effective page information (from the VMM page directory).
*
* @returns VBox status.
* @param pVCpu VMCPU handle.
* @param GCPtr Guest Context virtual address of the page.
* @param pfFlags Where to store the flags. These are X86_PTE_*.
* @param pHCPhys Where to store the HC physical address of the page.
* This is page aligned.
* @remark You should use PGMMapGetPage() for pages in a mapping.
*/
{
}
/**
* Sets (replaces) the page flags for a range of pages in the shadow context.
*
* @returns VBox status.
* @param pVCpu VMCPU handle.
* @param GCPtr The address of the first page.
* @param cb The size of the range in bytes.
* @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
* @remark You must use PGMMapSetPage() for pages in a mapping.
*/
{
}
/**
* Modify page flags for a range of pages in the shadow context.
*
* The existing flags are ANDed with the fMask and ORed with the fFlags.
*
* @returns VBox status code.
* @param pVCpu VMCPU handle.
* @param GCPtr Virtual address of the first page in the range.
* @param cb Size (in bytes) of the range to apply the modification to.
* @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
* @param fMask The AND mask - page flags X86_PTE_*.
* Be very CAREFUL when ~'ing constants which could be 32-bit!
* @remark You must use PGMMapModifyPage() for pages in a mapping.
*/
VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
{
/*
* Align the input.
*/
/*
* Call worker.
*/
}
/**
* Gets the shadow page directory for the specified address, PAE.
*
* @returns Pointer to the shadow PD.
* @param pVCpu The VMCPU handle.
* @param GCPtr The address.
* @param pGstPdpe Guest PDPT entry
* @param ppPD Receives address of page directory
*/
{
int rc;
/* Allocate page directory if not present. */
&& !(pPdpe->u & X86_PDPE_PG_MASK))
{
# if defined(IN_RC)
/* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
# endif
if (fNestedPaging || !fPaging)
{
}
else
{
{
{
/* PD not present; guest must reload CR3 to change it.
* No need to monitor anything in this case.
*/
}
else
{
}
}
else
{
}
}
/* Create a reference back to the PDPT by using the index in its shadow page. */
/* The PD was cached or created; hook it up now. */
# if defined(IN_RC)
/* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
* non-present PDPT will continue to cause page faults.
*/
ASMReloadCR3();
# endif
}
else
{
}
return VINF_SUCCESS;
}
/**
* Gets the pointer to the shadow page directory entry for an address, PAE.
*
* @returns Pointer to the PDE.
* @param pPGM Pointer to the PGMCPU instance data.
* @param GCPtr The address.
* @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
*/
{
{
}
/* Fetch the pgm pool shadow descriptor. */
PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
return VINF_SUCCESS;
}
#ifndef IN_RC
/**
* Syncs the SHADOW page directory pointer for the specified address.
*
* Allocates backing pages in case the PDPT or PML4 entry is missing.
*
* The caller is responsible for making sure the guest has a valid PD before
* calling this function.
*
* @returns VBox status.
* @param pVCpu VMCPU handle.
* @param GCPtr The address.
* @param pGstPml4e Guest PML4 entry
* @param pGstPdpe Guest PDPT entry
* @param ppPD Receives address of page directory
*/
int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
{
int rc;
/* Allocate page directory pointer table if not present. */
&& !(pPml4e->u & X86_PML4E_PG_MASK))
{
if (fNestedPaging || !fPaging)
{
}
else
{
}
/* Create a reference back to the PDPT by using the index in its shadow page. */
}
else
{
}
/* The PDPT was cached or created; hook it up now. */
/* Allocate page directory if not present. */
&& !(pPdpe->u & X86_PDPE_PG_MASK))
{
if (fNestedPaging || !fPaging)
{
}
else
{
}
/* Create a reference back to the PDPT by using the index in its shadow page. */
}
else
{
}
/* The PD was cached or created; hook it up now. */
return VINF_SUCCESS;
}
/**
* Gets the SHADOW page directory pointer for the specified address (long mode).
*
* @returns VBox status.
* @param pVCpu VMCPU handle.
* @param GCPtr The address.
* @param ppPdpt Receives address of pdpt
* @param ppPD Receives address of page directory
*/
DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
{
if (ppPml4e)
return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
return VINF_SUCCESS;
}
/**
* Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
* backing pages in case the PDPT or PML4 entry is missing.
*
* @returns VBox status.
* @param pVCpu VMCPU handle.
* @param GCPtr The address.
* @param ppPdpt Receives address of pdpt
* @param ppPD Receives address of page directory
*/
{
int rc;
/* Allocate page directory pointer table if not present. */
&& !(pPml4e->u & EPT_PML4E_PG_MASK))
{
rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
}
else
{
}
/* The PDPT was cached or created; hook it up now and fill with the default value. */
if (ppPdpt)
/* Allocate page directory if not present. */
&& !(pPdpe->u & EPT_PDPTE_PG_MASK))
{
}
else
{
}
/* The PD was cached or created; hook it up now and fill with the default value. */
return VINF_SUCCESS;
}
#endif /* IN_RC */
/**
* Gets effective Guest OS page information.
*
* When GCPtr is in a big page, the function will return as if it was a normal
* 4KB page. If the need for distinguishing between big and normal page becomes
* necessary at a later point, a PGMGstGetPage() will be created for that
* purpose.
*
* @returns VBox status.
* @param pVCpu VMCPU handle.
* @param GCPtr Guest Context virtual address of the page.
* @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
* @param pGCPhys Where to store the GC physical address of the page.
* This is page aligned. The fact that the
*/
{
}
/**
* Checks if the page is present.
*
* @returns true if the page is present.
* @returns false if the page is not present.
* @param pVCpu VMCPU handle.
* @param GCPtr Address within the page.
*/
{
return RT_SUCCESS(rc);
}
/**
* Sets (replaces) the page flags for a range of pages in the guest's tables.
*
* @returns VBox status.
* @param pVCpu VMCPU handle.
* @param GCPtr The address of the first page.
* @param cb The size of the range in bytes.
* @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
*/
{
}
/**
* Modify page flags for a range of pages in the guest's tables
*
* The existing flags are ANDed with the fMask and ORed with the fFlags.
*
* @returns VBox status code.
* @param pVCpu VMCPU handle.
* @param GCPtr Virtual address of the first page in the range.
* @param cb Size (in bytes) of the range to apply the modification to.
* @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
* @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
* Be very CAREFUL when ~'ing constants which could be 32-bit!
*/
VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
{
/*
* Validate input.
*/
/*
* Adjust input.
*/
/*
* Call worker.
*/
return rc;
}
#ifdef IN_RING3
/**
* Performs the lazy mapping of the 32-bit guest PD.
*
* @returns Pointer to the mapping.
* @param pPGM The PGM instance data.
*/
{
int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
}
/**
* Performs the lazy mapping of the PAE guest PDPT.
*
* @returns Pointer to the mapping.
* @param pPGM The PGM instance data.
*/
{
int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3);
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
}
#endif /* IN_RING3 */
/**
* Performs the lazy mapping / updating of a PAE guest PD.
*
* @returns Pointer to the mapping.
* @param pPGM The PGM instance data.
* @param iPdpt Which PD entry to map (0..3).
*/
{
{
int rc = VINF_SUCCESS;
#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
#endif
{
}
if (RT_SUCCESS(rc))
{
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
if (fChanged)
{
}
}
}
/* Invalid page or some failure, invalidate the entry. */
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
return NULL;
}
#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
/**
* Performs the lazy mapping of the 32-bit guest PD.
*
* @returns Pointer to the mapping.
* @param pPGM The PGM instance data.
*/
{
int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3);
# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
# endif
}
#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 */
/**
* Gets the specified page directory pointer table entry.
*
* @returns PDP entry
* @param pVCpu VMCPU handle.
* @param iPdpt PDPT index
*/
{
}
/**
* Gets the current CR3 register value for the shadow memory context.
* @returns CR3 value.
* @param pVCpu VMCPU handle.
*/
{
AssertPtrReturn(pPoolPage, 0);
}
/**
* Gets the current CR3 register value for the nested memory context.
* @returns CR3 value.
* @param pVCpu VMCPU handle.
*/
{
}
/**
* Gets the current CR3 register value for the HC intermediate memory context.
* @returns CR3 value.
* @param pVM The VM handle.
*/
{
{
case SUPPAGINGMODE_32_BIT:
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
case SUPPAGINGMODE_PAE_NX:
case SUPPAGINGMODE_AMD64:
case SUPPAGINGMODE_AMD64_NX:
default:
return ~0;
}
}
/**
* Gets the current CR3 register value for the RC intermediate memory context.
* @returns CR3 value.
* @param pVM The VM handle.
* @param pVCpu VMCPU handle.
*/
{
{
case PGMMODE_32_BIT:
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
case PGMMODE_EPT:
case PGMMODE_NESTED:
return 0; /* not relevant */
default:
return ~0;
}
}
/**
* Gets the CR3 register value for the 32-Bit intermediate memory context.
* @returns CR3 value.
* @param pVM The VM handle.
*/
{
}
/**
* Gets the CR3 register value for the PAE intermediate memory context.
* @returns CR3 value.
* @param pVM The VM handle.
*/
{
}
/**
* Gets the CR3 register value for the AMD64 intermediate memory context.
* @returns CR3 value.
* @param pVM The VM handle.
*/
{
}
/**
* Performs and schedules necessary updates following a CR3 load or reload.
*
* This will normally involve mapping the guest PD or nPDPT
*
* @returns VBox status code.
* @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
* safely be ignored and overridden since the FF will be set too then.
* @param pVCpu VMCPU handle.
* @param cr3 The new cr3.
* @param fGlobal Indicates whether this is a global flush or not.
*/
{
/*
* Always flag the necessary updates; necessary for hardware acceleration
*/
/** @todo optimize this, it shouldn't always be necessary. */
if (fGlobal)
LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
/*
* Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
*/
int rc = VINF_SUCCESS;
{
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
break;
default:
break;
}
{
{
{
}
}
else
{
}
if (fGlobal)
else
}
else
{
/*
* Check if we have a pending update of the CR3 monitoring.
*/
{
}
if (fGlobal)
else
}
return rc;
}
/**
* Performs and schedules necessary updates following a CR3 load or reload when
* using nested or extended paging.
*
* This API is an alterantive to PDMFlushTLB that avoids actually flushing the
* TLB and triggering a SyncCR3.
*
* This will normally involve mapping the guest PD or nPDPT
*
* @returns VBox status code.
* @retval VINF_SUCCESS.
* @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
* requires a CR3 sync. This can safely be ignored and overridden since
* the FF will be set too then.)
* @param pVCpu VMCPU handle.
* @param cr3 The new cr3.
*/
{
/* We assume we're only called in nested paging mode. */
/*
* Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
*/
int rc = VINF_SUCCESS;
{
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
break;
default:
break;
}
{
AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
}
return rc;
}
/**
* Synchronize the paging structures.
*
* This function is called in response to the VM_FF_PGM_SYNC_CR3 and
* VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
* in several places, most importantly whenever the CR3 is loaded.
*
* @returns VBox status code.
* @param pVCpu VMCPU handle.
* @param cr0 Guest context CR0 register
* @param cr3 Guest context CR3 register
* @param cr4 Guest context CR4 register
* @param fGlobal Including global page directories or not
*/
{
int rc;
#ifdef PGMPOOL_WITH_MONITORING
/*
* The pool may have pending stuff and even require a return to ring-3 to
* clear the whole thing.
*/
if (rc != VINF_SUCCESS)
return rc;
#endif
/*
* We might be called when we shouldn't.
*
* The mode switching will ensure that the PD is resynced
* after every mode switch. So, if we find ourselves here
* when in protected or real mode we can safely disable the
* FF and return immediately.
*/
{
return VINF_SUCCESS;
}
/* If global pages are not supported, then all flushes are global. */
if (!(cr4 & X86_CR4_PGE))
fGlobal = true;
VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
/*
* Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
* This should be done before SyncCR3.
*/
{
{
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
break;
default:
break;
}
{
}
#ifdef IN_RING3
if (rc == VINF_PGM_SYNC_CR3)
#else
if (rc == VINF_PGM_SYNC_CR3)
{
return rc;
}
#endif
}
/*
* Let the 'Bth' function do the work and we'll just keep track of the flags.
*/
if (rc == VINF_SUCCESS)
{
{
}
/*
* Check if we have a pending update of the CR3 monitoring.
*/
{
}
}
/*
* Now flush the CR3 (guest context).
*/
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Called whenever CR0 or CR4 in a way which may change
* the paging mode.
*
* @returns VBox status code, with the following informational code for
* VM scheduling.
* @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
* @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
* (I.e. not in R3.)
* @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
*
* @param pVCpu VMCPU handle.
* @param cr0 The new cr0.
* @param cr4 The new cr4.
* @param efer The new extended feature enable register.
*/
{
/*
* Calc the new guest mode.
*/
if (!(cr0 & X86_CR0_PE))
else if (!(cr0 & X86_CR0_PG))
else if (!(cr4 & X86_CR4_PAE))
else if (!(efer & MSR_K6_EFER_LME))
{
if (!(efer & MSR_K6_EFER_NXE))
else
}
else
{
if (!(efer & MSR_K6_EFER_NXE))
else
}
/*
* Did it change?
*/
return VINF_SUCCESS;
/* Flush the TLB */
#ifdef IN_RING3
#else
LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
return VINF_PGM_CHANGE_MODE;
#endif
}
/**
* Gets the current guest paging mode.
*
*
* @returns The current paging mode.
* @param pVCpu VMCPU handle.
*/
{
}
/**
* Gets the current shadow paging mode.
*
* @returns The current paging mode.
* @param pVCpu VMCPU handle.
*/
{
}
/**
* Gets the current host paging mode.
*
* @returns The current paging mode.
* @param pVM The VM handle.
*/
{
{
case SUPPAGINGMODE_32_BIT:
return PGMMODE_32_BIT;
case SUPPAGINGMODE_PAE:
case SUPPAGINGMODE_PAE_GLOBAL:
return PGMMODE_PAE;
case SUPPAGINGMODE_PAE_NX:
return PGMMODE_PAE_NX;
case SUPPAGINGMODE_AMD64:
return PGMMODE_AMD64;
case SUPPAGINGMODE_AMD64_NX:
return PGMMODE_AMD64_NX;
}
return PGMMODE_INVALID;
}
/**
* Get mode name.
*
* @returns read-only name string.
* @param enmMode The mode which name is desired.
*/
{
switch (enmMode)
{
case PGMMODE_REAL: return "Real";
case PGMMODE_PROTECTED: return "Protected";
case PGMMODE_32_BIT: return "32-bit";
case PGMMODE_PAE: return "PAE";
case PGMMODE_PAE_NX: return "PAE+NX";
case PGMMODE_AMD64: return "AMD64";
case PGMMODE_AMD64_NX: return "AMD64+NX";
case PGMMODE_NESTED: return "Nested";
case PGMMODE_EPT: return "EPT";
default: return "unknown mode value";
}
}
/**
* Check if the PGM lock is currently taken.
*
* @param pVM The VM to operate on.
*/
{
}
/**
* Check if this VCPU currently owns the PGM lock.
*
* @param pVM The VM to operate on.
*/
{
}
/**
* Acquire the PGM lock.
*
* @returns VBox status code
* @param pVM The VM to operate on.
*/
{
#ifdef IN_RC
if (rc == VERR_SEM_BUSY)
if (rc == VERR_SEM_BUSY)
#endif
return rc;
}
/**
* Release the PGM lock.
*
* @returns VBox status code
* @param pVM The VM to operate on.
*/
{
}
#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
/**
* Temporarily maps one guest page specified by GC physical address.
* These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
*
* Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
* reused after 8 mappings (or perhaps a few more if you score with the cache).
*
* @returns VBox status.
* @param pVM VM handle.
* @param GCPhys GC Physical address of the page.
* @param ppv Where to store the address of the mapping.
*/
{
/*
* Get the ram range.
*/
if (!pRam)
{
}
/*
* Pass it on to PGMDynMapHCPage.
*/
//Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
#else
#endif
return VINF_SUCCESS;
}
/**
* Temporarily maps one guest page specified by unaligned GC physical address.
* These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
*
* Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
* reused after 8 mappings (or perhaps a few more if you score with the cache).
*
* The caller is aware that only the speicifed page is mapped and that really bad things
* will happen if writing beyond the page!
*
* @returns VBox status.
* @param pVM VM handle.
* @param GCPhys GC Physical address within the page to be mapped.
* @param ppv Where to store the address of the mapping address corresponding to GCPhys.
*/
{
/*
* Get the ram range.
*/
if (!pRam)
{
}
/*
* Pass it on to PGMDynMapHCPage.
*/
#else
#endif
return VINF_SUCCESS;
}
# ifdef IN_RC
/**
* Temporarily maps one host page specified by HC physical address.
*
* Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
* reused after 16 mappings (or perhaps a few more if you score with the cache).
*
* @returns VINF_SUCCESS, will bail out to ring-3 on failure.
* @param pVM VM handle.
* @param HCPhys HC Physical address of the page.
* @param ppv Where to store the address of the mapping. This is the
* address of the PAGE not the exact address corresponding
* to HCPhys. Use PGMDynMapHCPageOff if you care for the
* page offset.
*/
{
/*
* Check the cache.
*/
register unsigned iCache;
{
static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
{
{ 0, 9, 10, 11, 12, 13, 14, 15},
{ 0, 1, 10, 11, 12, 13, 14, 15},
{ 0, 1, 2, 11, 12, 13, 14, 15},
{ 0, 1, 2, 3, 12, 13, 14, 15},
{ 0, 1, 2, 3, 4, 13, 14, 15},
{ 0, 1, 2, 3, 4, 5, 14, 15},
{ 0, 1, 2, 3, 4, 5, 6, 15},
{ 0, 1, 2, 3, 4, 5, 6, 7},
{ 8, 1, 2, 3, 4, 5, 6, 7},
{ 8, 9, 2, 3, 4, 5, 6, 7},
{ 8, 9, 10, 3, 4, 5, 6, 7},
{ 8, 9, 10, 11, 4, 5, 6, 7},
{ 8, 9, 10, 11, 12, 5, 6, 7},
{ 8, 9, 10, 11, 12, 13, 6, 7},
{ 8, 9, 10, 11, 12, 13, 14, 7},
{ 8, 9, 10, 11, 12, 13, 14, 15},
};
{
/* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
{
return VINF_SUCCESS;
}
else
}
}
/*
* Update the page tables.
*/
unsigned i;
for (i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
{
break;
iPage++;
}
pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
return VINF_SUCCESS;
}
/**
* Temporarily lock a dynamic page to prevent it from being reused.
*
* @param pVM VM handle.
* @param GCPage GC address of page
*/
{
unsigned iPage;
Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
}
/**
* Unlock a dynamic page
*
* @param pVM VM handle.
* @param GCPage GC address of page
*/
{
unsigned iPage;
AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
}
# ifdef VBOX_STRICT
/**
* Check for lock leaks.
*
* @param pVM VM handle.
*/
{
}
# endif /* VBOX_STRICT */
# endif /* IN_RC */
#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
#if !defined(IN_R0) || defined(LOG_ENABLED)
/** Format handler for PGMPAGE.
* @copydoc FNRTSTRFORMATTYPE */
void *pvUser)
{
{
cch = 0;
/* The single char state stuff. */
#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
if (IS_PART_INCLUDED(5))
{
}
/* The type. */
if (IS_PART_INCLUDED(4))
{
static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
}
/* The numbers. */
if (IS_PART_INCLUDED(3))
{
cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
}
if (IS_PART_INCLUDED(2))
{
cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
}
if (IS_PART_INCLUDED(6))
{
cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
}
}
else
return cch;
}
/** Format handler for PGMRAMRANGE.
* @copydoc FNRTSTRFORMATTYPE */
static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
void *pvUser)
{
{
char szTmp[80];
}
else
return cch;
}
/** Format type andlers to be registered/deregistered. */
static const struct
{
char szType[24];
} g_aPgmFormatTypes[] =
{
{ "pgmpage", pgmFormatTypeHandlerPage },
{ "pgmramrange", pgmFormatTypeHandlerRamRange }
};
#endif /* !IN_R0 || LOG_ENABLED */
/**
* Registers the global string format types.
*
* This should be called at module load time or in some other manner that ensure
* that it's called exactly one time.
*
* @returns IPRT status code on RTStrFormatTypeRegister failure.
*/
VMMDECL(int) PGMRegisterStringFormatTypes(void)
{
#if !defined(IN_R0) || defined(LOG_ENABLED)
int rc = VINF_SUCCESS;
unsigned i;
{
# ifdef IN_RING0
if (rc == VERR_ALREADY_EXISTS)
{
/* in case of cleanup failure in ring-0 */
}
# endif
}
if (RT_FAILURE(rc))
while (i-- > 0)
return rc;
#else
return VINF_SUCCESS;
#endif
}
/**
* Deregisters the global string format types.
*
* This should be called at module unload time or in some other manner that
* ensure that it's called exactly one time.
*/
VMMDECL(void) PGMDeregisterStringFormatTypes(void)
{
#if !defined(IN_R0) || defined(LOG_ENABLED)
for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
#endif
}
#ifdef VBOX_STRICT
/**
* Asserts that there are no mapping conflicts.
*
* @returns Number of conflicts.
* @param pVM The VM Handle.
*/
{
unsigned cErrors = 0;
/* Only applies to raw mode -> 1 VPCU */
/*
* Check for mapping conflicts.
*/
{
/** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
{
if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
{
cErrors++;
break;
}
}
}
return cErrors;
}
/**
* Asserts that everything related to the guest CR3 is correctly shadowed.
*
* This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
* and assert the correctness of the guest CR3 mapping before asserting that the
* shadow page tables is in sync with the guest page tables.
*
* @returns Number of conflicts.
* @param pVM The VM Handle.
* @param pVCpu VMCPU handle.
* @param cr3 The current guest CR3 register value.
* @param cr4 The current guest CR4 register value.
*/
{
return cErrors;
}
#endif /* VBOX_STRICT */