PGMInline.h revision 7a0e09fa61a820005c2124d7e547929d24b23b47
/* $Id$ */
/** @file
* PGM - Inlined functions.
*/
/*
* Copyright (C) 2006-2010 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifndef ___PGMInline_h
#define ___PGMInline_h
#include <iprt/critsect.h>
/** @addtogroup grp_pgm_int Internals
* @internal
* @{
*/
/**
* Gets the PGMRAMRANGE structure for a guest page.
*
* @returns Pointer to the RAM range on success.
* @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
*
* @param pVM Pointer to the VM.
* @param GCPhys The GC physical address.
*/
{
return pRam;
}
/**
* Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
* range above it.
*
* @returns Pointer to the RAM range on success.
* @returns NULL if the address is located after the last range.
*
* @param pVM Pointer to the VM.
* @param GCPhys The GC physical address.
*/
{
if ( !pRam
return pRam;
}
/**
* Gets the PGMPAGE structure for a guest page.
*
* @returns Pointer to the page on success.
* @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
*
* @param pVM Pointer to the VM.
* @param GCPhys The GC physical address.
*/
{
if ( !pRam
}
/**
* Gets the PGMPAGE structure for a guest page.
*
* Old Phys code: Will make sure the page is present.
*
* @returns VBox status code.
* @retval VINF_SUCCESS and a valid *ppPage on success.
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
*
* @param pVM Pointer to the VM.
* @param GCPhys The GC physical address.
* @param ppPage Where to store the page pointer on success.
*/
{
if ( !pRam
return VINF_SUCCESS;
}
/**
* Gets the PGMPAGE structure for a guest page.
*
* Old Phys code: Will make sure the page is present.
*
* @returns VBox status code.
* @retval VINF_SUCCESS and a valid *ppPage on success.
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
*
* @param pVM Pointer to the VM.
* @param GCPhys The GC physical address.
* @param ppPage Where to store the page pointer on success.
* @param ppRamHint Where to read and store the ram list hint.
* The caller initializes this to NULL before the call.
*/
DECLINLINE(int) pgmPhysGetPageWithHintEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
{
if ( !pRam
{
if ( !pRam
}
return VINF_SUCCESS;
}
/**
* Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
*
* @returns Pointer to the page on success.
* @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
*
* @param pVM Pointer to the VM.
* @param GCPhys The GC physical address.
* @param ppPage Where to store the pointer to the PGMPAGE structure.
* @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
*/
DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
{
if ( !pRam
return VINF_SUCCESS;
}
/**
* Convert GC Phys to HC Phys.
*
* @returns VBox status.
* @param pVM Pointer to the VM.
* @param GCPhys The GC physical address.
* @param pHCPhys Where to store the corresponding HC physical address.
*
* @deprecated Doesn't deal with zero, shared or write monitored pages.
* Avoid when writing new code!
*/
{
if (RT_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
/**
* Inlined version of the ring-0 version of the host page mapping code
* that optimizes access to pages already in the set.
*
* @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
* @param pVCpu Pointer to the VMCPU.
* @param HCPhys The physical address of the page.
* @param ppv Where to store the mapping address.
*/
DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
{
{
}
else
{
}
return VINF_SUCCESS;
}
/**
* Inlined version of the guest page mapping code that optimizes access to pages
* already in the set.
*
* @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param GCPhys The guest physical address of the page.
* @param ppv Where to store the mapping address.
*/
DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
{
/*
* Get the ram range.
*/
if ( !pRam
/** @todo || page state stuff */
)
{
/* This case is not counted into StatRZDynMapGCPageInl. */
}
/*
* pgmRZDynMapHCPageInlined with out stats.
*/
{
}
else
{
}
return VINF_SUCCESS;
}
/**
* Inlined version of the ring-0 version of guest page mapping that optimizes
* access to pages already in the set.
*
* @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
* @param pVCpu Pointer to the VMCPU.
* @param GCPhys The guest physical address of the page.
* @param ppv Where to store the mapping address.
*/
DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
{
return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
}
/**
* Inlined version of the ring-0 version of the guest byte mapping code
* that optimizes access to pages already in the set.
*
* @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
* @param pVCpu Pointer to the VMCPU.
* @param HCPhys The physical address of the page.
* @param ppv Where to store the mapping address. The offset is
* preserved.
*/
DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
{
/*
* Get the ram range.
*/
if ( !pRam
/** @todo || page state stuff */
)
{
/* This case is not counted into StatRZDynMapGCPageInl. */
}
/*
* pgmRZDynMapHCPageInlined with out stats.
*/
{
}
else
{
}
return VINF_SUCCESS;
}
#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
/**
* Maps the page into current context (RC and maybe R0).
*
* @returns pointer to the mapping.
* @param pVM Pointer to the PGM instance data.
* @param pPage The page.
*/
{
{
void *pv;
return pv;
}
}
/**
* Maps the page into current context (RC and maybe R0).
*
* @returns pointer to the mapping.
* @param pVM Pointer to the PGM instance data.
* @param pVCpu Pointer to the VMCPU.
* @param pPage The page.
*/
DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
{
{
void *pv;
return pv;
}
}
#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
#ifndef IN_RC
/**
* Queries the Physical TLB entry for a physical guest page,
* attempting to load the TLB entry if necessary.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
*
* @param pVM Pointer to the VM.
* @param GCPhys The address of the guest page.
* @param ppTlbe Where to store the pointer to the TLB entry.
*/
{
int rc;
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/**
* Queries the Physical TLB entry for a physical guest page,
* attempting to load the TLB entry if necessary.
*
* @returns VBox status code.
* @retval VINF_SUCCESS on success
* @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
*
* @param pVM Pointer to the VM.
* @param pPage Pointer to the PGMPAGE structure corresponding to
* GCPhys.
* @param GCPhys The address of the guest page.
* @param ppTlbe Where to store the pointer to the TLB entry.
*/
DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
{
int rc;
{
rc = VINF_SUCCESS;
# if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(IN_RING3)
# endif
}
else
return rc;
}
#endif /* !IN_RC */
/**
* Enables write monitoring for an allocated page.
*
* The caller is responsible for updating the shadow page tables.
*
* @param pVM Pointer to the VM.
* @param pPage The page to write monitor.
* @param GCPhysPage The address of the page.
*/
{
/* Large pages must disabled. */
{
{
}
else
}
}
/**
* Checks if the no-execute (NX) feature is active (EFER.NXE=1).
*
* Only used when the guest is in PAE or long mode. This is inlined so that we
* can perform consistency checks in debug builds.
*
* @returns true if it is, false if it isn't.
* @param pVCpu Pointer to the VMCPU.
*/
{
}
/**
* Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
*
* Only used when the guest is in paged 32-bit mode. This is inlined so that
* we can perform consistency checks in debug builds.
*
* @returns true if it is, false if it isn't.
* @param pVCpu Pointer to the VMCPU.
*/
{
}
/**
* Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
* Takes PSE-36 into account.
*
* @returns guest physical address
* @param pVM Pointer to the VM.
* @param Pde Guest Pde
*/
{
}
/**
* Gets the address the guest page directory (32-bit paging).
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param ppPd Where to return the mapping. This is always set.
*/
{
int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
if (RT_FAILURE(rc))
{
return rc;
}
#else
if (RT_UNLIKELY(!*ppPd))
#endif
return VINF_SUCCESS;
}
/**
* Gets the address the guest page directory (32-bit paging).
*
* @returns Pointer to the page directory entry in question.
* @param pVCpu Pointer to the VMCPU.
*/
{
int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
if (RT_FAILURE(rc))
{
return NULL;
}
#else
if (RT_UNLIKELY(!pGuestPD))
{
if (RT_FAILURE(rc))
return NULL;
}
#endif
return pGuestPD;
}
/**
* Gets the guest page directory pointer table.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param ppPdpt Where to return the mapping. This is always set.
*/
{
int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
if (RT_FAILURE(rc))
{
return rc;
}
#else
if (RT_UNLIKELY(!*ppPdpt))
#endif
return VINF_SUCCESS;
}
/**
* Gets the guest page directory pointer table.
*
* @returns Pointer to the page directory in question.
* @returns NULL if the page directory is not present or on an invalid page.
* @param pVCpu Pointer to the VMCPU.
*/
{
return pGuestPdpt;
}
/**
* Gets the guest page directory pointer table entry for the specified address.
*
* @returns Pointer to the page directory in question.
* @returns NULL if the page directory is not present or on an invalid page.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
*/
{
int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
#else
if (RT_UNLIKELY(!pGuestPDPT))
{
if (RT_FAILURE(rc))
return NULL;
}
#endif
}
/**
* Gets the page directory entry for the specified address.
*
* @returns The page directory entry in question.
* @returns A non-present entry if the page directory is not present or on an invalid page.
* @param pVCpu The handle of the virtual CPU.
* @param GCPtr The address.
*/
{
if (RT_LIKELY(pGuestPDPT))
{
{
(void **)&pGuestPD
if (RT_SUCCESS(rc))
#else
if ( !pGuestPD
if (pGuestPD)
#endif
}
}
return ZeroPde;
}
/**
* Gets the page directory pointer table entry for the specified address
* and returns the index into the page directory
*
* @returns Pointer to the page directory in question.
* @returns NULL if the page directory is not present or on an invalid page.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
* @param piPD Receives the index into the returned page directory
* @param pPdpe Receives the page directory pointer entry. Optional.
*/
DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
{
/* The PDPE. */
if (RT_UNLIKELY(!pGuestPDPT))
return NULL;
if (pPdpe)
return NULL;
return NULL;
/* The PDE. */
(void **)&pGuestPD
if (RT_FAILURE(rc))
{
return NULL;
}
#else
if ( !pGuestPD
#endif
return pGuestPD;
}
#ifndef IN_RC
/**
* Gets the page map level-4 pointer for the guest.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param ppPml4 Where to return the mapping. Always set.
*/
{
int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
if (RT_FAILURE(rc))
{
return rc;
}
#else
if (RT_UNLIKELY(!*ppPml4))
#endif
return VINF_SUCCESS;
}
/**
* Gets the page map level-4 pointer for the guest.
*
* @returns Pointer to the PML4 page.
* @param pVCpu Pointer to the VMCPU.
*/
{
return pGuestPml4;
}
/**
* Gets the pointer to a page map level-4 entry.
*
* @returns Pointer to the PML4 entry.
* @param pVCpu Pointer to the VMCPU.
* @param iPml4 The index.
* @remarks Only used by AssertCR3.
*/
{
int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
#else
if (RT_UNLIKELY(!pGuestPml4))
{
}
#endif
return &pGuestPml4->a[iPml4];
}
/**
* Gets the page directory entry for the specified address.
*
* @returns The page directory entry in question.
* @returns A non-present entry if the page directory is not present or on an invalid page.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
*/
{
/*
* Note! To keep things simple, ASSUME invalid physical addresses will
* cause X86_TRAP_PF_RSVD. This isn't a problem until we start
* supporting 52-bit wide physical guest addresses.
*/
if ( RT_LIKELY(pGuestPml4)
{
if (RT_SUCCESS(rc))
{
{
if (RT_SUCCESS(rc))
{
}
}
}
}
return ZeroPde;
}
/**
* Gets the GUEST page directory pointer for the specified address.
*
* @returns The page directory in question.
* @returns NULL if the page directory is not present or on an invalid page.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
* @param ppPml4e Page Map Level-4 Entry (out)
* @param pPdpe Page directory pointer table entry (out)
* @param piPD Receives the index into the returned page directory
*/
DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
{
/* The PMLE4. */
if (RT_UNLIKELY(!pGuestPml4))
return NULL;
return NULL;
return NULL;
/* The PDPE. */
if (RT_FAILURE(rc))
{
return NULL;
}
return NULL;
return NULL;
/* The PDE. */
if (RT_FAILURE(rc))
{
return NULL;
}
return pPD;
}
#endif /* !IN_RC */
/**
* Gets the shadow page directory, 32-bit.
*
* @returns Pointer to the shadow 32-bit PD.
* @param pVCpu Pointer to the VMCPU.
*/
{
return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
}
/**
* Gets the shadow page directory entry for the specified address, 32-bit.
*
* @returns Shadow 32-bit PDE.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
*/
{
if (!pShwPde)
{
return ZeroPde;
}
}
/**
* Gets the pointer to the shadow page directory entry for the specified
* address, 32-bit.
*
* @returns Pointer to the shadow 32-bit PDE.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
*/
{
}
/**
* Gets the shadow page pointer table, PAE.
*
* @returns Pointer to the shadow PAE PDPT.
* @param pVCpu Pointer to the VMCPU.
*/
{
return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
}
/**
* Gets the shadow page directory for the specified address, PAE.
*
* @returns Pointer to the shadow PD.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
*/
{
return NULL;
/* Fetch the pgm pool shadow descriptor. */
PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
}
/**
* Gets the shadow page directory for the specified address, PAE.
*
* @returns Pointer to the shadow PD.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
*/
{
return NULL;
/* Fetch the pgm pool shadow descriptor. */
PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
}
/**
* Gets the shadow page directory entry, PAE.
*
* @returns PDE.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
*/
{
if (!pShwPde)
{
return ZeroPde;
}
}
/**
* Gets the pointer to the shadow page directory entry for an address, PAE.
*
* @returns Pointer to the PDE.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
* @remarks Only used by AssertCR3.
*/
{
}
#ifndef IN_RC
/**
* Gets the shadow page map level-4 pointer.
*
* @returns Pointer to the shadow PML4.
* @param pVCpu Pointer to the VMCPU.
*/
{
return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
}
/**
* Gets the shadow page map level-4 entry for the specified address.
*
* @returns The entry.
* @param pVCpu Pointer to the VMCPU.
* @param GCPtr The address.
*/
{
if (!pShwPml4)
{
return ZeroPml4e;
}
}
/**
* Gets the pointer to the specified shadow page map level-4 entry.
*
* @returns The entry.
* @param pVCpu Pointer to the VMCPU.
* @param iPml4 The PML4 index.
*/
{
if (!pShwPml4)
return NULL;
}
#endif /* !IN_RC */
/**
* Cached physical handler lookup.
*
* @returns Physical handler covering @a GCPhys.
* @param pVM Pointer to the VM.
* @param GCPhys The lookup address.
*/
{
if ( pHandler
{
return pHandler;
}
pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
if (pHandler)
return pHandler;
}
/**
* Gets the page state for a physical handler.
*
* @returns The physical handler page state.
* @param pCur The physical handler in question.
*/
{
{
return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
case PGMPHYSHANDLERTYPE_MMIO:
return PGM_PAGE_HNDL_PHYS_STATE_ALL;
default:
}
}
/**
* Gets the page state for a virtual handler.
*
* @returns The virtual handler page state.
* @param pCur The virtual handler in question.
* @remarks This should never be used on a hypervisor access handler.
*/
{
{
case PGMVIRTHANDLERTYPE_WRITE:
return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
case PGMVIRTHANDLERTYPE_ALL:
return PGM_PAGE_HNDL_VIRT_STATE_ALL;
default:
}
}
/**
* Clears one physical page of a virtual handler.
*
* @param pVM Pointer to the VM.
* @param pCur Virtual handler structure.
* @param iPage Physical page index.
*
* @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
* need to care about other handlers in the same page.
*/
{
/*
* Remove the node from the tree (it's supposed to be in the tree if we get here!).
*/
("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
#endif
{
/* We're the head of the alias chain. */
PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
" got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
#endif
{
/* Insert the next list in the alias chain into the tree. */
PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
#endif
}
}
else
{
/* Locate the previous node in the alias chain. */
PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
#endif
for (;;)
{
PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
if (pNext == pPhys2Virt)
{
/* unlink. */
LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
else
{
PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
}
break;
}
/* next */
{
("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
#endif
break;
}
}
}
Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
pPhys2Virt->offNextAlias = 0;
/*
* Clear the ram flags for this page.
*/
}
/**
* Internal worker for finding a 'in-use' shadow page give by it's physical address.
*
* @returns Pointer to the shadow page structure.
* @param pPool The pool.
* @param idx The pool page index.
*/
{
}
/**
* Clear references to guest physical memory.
*
* @param pPool The pool.
* @param pPoolPage The pool page.
* @param pPhysPage The physical guest page tracking structure.
* @param iPte Shadow PTE index
*/
DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
{
/*
* Just deal with the simple case here.
*/
# ifdef VBOX_STRICT
# endif
# ifdef LOG_ENABLED
# endif
if (cRefs == 1)
{
/* Invalidate the tracking data. */
}
else
Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
}
/**
* Moves the page to the head of the age list.
*
* This is done when the cached page is used in one way or another.
*
* @param pPool The pool.
* @param pPage The cached page.
*/
{
/*
* Move to the head of the age list.
*/
{
/* unlink */
else
/* insert at head */
}
}
/**
* Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
*
* @param pVM Pointer to the VM.
* @param pPage PGM pool page
*/
{
}
/**
* Unlocks a page to allow flushing again
*
* @param pVM Pointer to the VM.
* @param pPage PGM pool page
*/
{
}
/**
* Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
*
* @returns VBox status code.
* @param pPage PGM pool page
*/
{
{
if (pPage->cModifications)
pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
return true;
}
return false;
}
/**
* Tells if mappings are to be put into the shadow page table or not.
*
* @returns boolean result
* @param pVM Pointer to the VM.
*/
{
#ifdef PGM_WITHOUT_MAPPINGS
/* There are no mappings in VT-x and AMD-V mode. */
return false;
#else
#endif
}
/**
* Checks if the mappings are floating and enabled.
*
* @returns true / false.
* @param pVM Pointer to the VM.
*/
{
#ifdef PGM_WITHOUT_MAPPINGS
/* There are no mappings in VT-x and AMD-V mode. */
return false;
#else
#endif
}
/** @} */
#endif