PGMAllGst.h revision 4bf2f9c1daa19585b72908e822321a6b710cd961
/* $Id$ */
/** @file
* VBox - Page Manager, Guest Paging Template - All context code.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
#if PGM_GST_TYPE == PGM_TYPE_REAL \
|| PGM_GST_TYPE == PGM_TYPE_PROT
# define GST_PTE_PG_MASK SHW_PTE_PG_MASK
# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
# define GST_PDE_PG_MASK X86_PDE_PG_MASK
# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
# define GST_PD_SHIFT X86_PD_SHIFT
# define GST_PD_MASK X86_PD_MASK
# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
# define GST_PTE_PG_MASK X86_PTE_PG_MASK
# define GST_PT_SHIFT X86_PT_SHIFT
# define GST_PT_MASK X86_PT_MASK
# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
|| PGM_GST_TYPE == PGM_TYPE_AMD64
# define PGSTPTE PX86PTEPAE
# define PGSTPDE PX86PDEPAE
# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK_FULL
# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
# define GST_PD_SHIFT X86_PD_PAE_SHIFT
# define GST_PD_MASK X86_PD_PAE_MASK
# if PGM_GST_TYPE == PGM_TYPE_PAE
# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
# define GST_PDPT_SHIFT X86_PDPT_SHIFT
# define GST_PDPT_MASK X86_PDPT_MASK_PAE
# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
# else
# define GST_PDPT_SHIFT X86_PDPT_SHIFT
# define GST_PDPE_PG_MASK X86_PDPE_PG_MASK_FULL
# define GST_PDPT_MASK X86_PDPT_MASK_AMD64
# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK_FULL
# define GST_CR3_PAGE_MASK X86_CR3_AMD64_PAGE_MASK
# endif
# define GST_PT_SHIFT X86_PT_PAE_SHIFT
# define GST_PT_MASK X86_PT_PAE_MASK
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
#ifndef IN_RING3
PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
# if PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
# endif
#endif
/**
* Gets effective Guest OS page information.
*
* When GCPtr is in a big page, the function will return as if it was a normal
* 4KB page. If the need for distinguishing between big and normal page becomes
* necessary at a later point, a PGMGstGetPage Ex() will be created for that
* purpose.
*
* @returns VBox status.
* @param pVM VM Handle.
* @param GCPtr Guest Context virtual address of the page. Page aligned!
* @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
* @param pGCPhys Where to store the GC physical address of the page.
* This is page aligned. The fact that the
*/
{
#if PGM_GST_TYPE == PGM_TYPE_REAL \
|| PGM_GST_TYPE == PGM_TYPE_PROT
/*
* Fake it.
*/
if (pfFlags)
if (pGCPhys)
return VINF_SUCCESS;
#elif PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
/*
* Get the PDE.
*/
# if PGM_GST_TYPE == PGM_TYPE_32BIT
/* pgmGstGetPaePDE will return 0 if the PDPTE is marked as not present
* All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
*/
return VERR_PAGE_TABLE_NOT_PRESENT;
/* Merge accessed, write, user and no-execute bits into the PDE. */
# endif
/*
* Lookup the page.
*/
return VERR_PAGE_TABLE_NOT_PRESENT;
# if PGM_GST_TYPE != PGM_TYPE_AMD64
# endif
)
{
if (VBOX_FAILURE(rc))
return rc;
/*
* Get PT entry and check presence.
*/
return VERR_PAGE_NOT_PRESENT;
/*
* Store the result.
* RW and US flags depend on all levels (bitwise AND) - except for legacy PAE
* where the PDPE is simplified.
*/
if (pfFlags)
{
# if PGM_WITH_NX(PGM_GST_TYPE)
/* The NX bit is determined by a bitwise OR between the PT and PD */
if (fNoExecuteBitValid)
# endif
}
if (pGCPhys)
}
else
{
/*
* Map big to 4k PTE and store the result
*/
if (pfFlags)
{
# if PGM_WITH_NX(PGM_GST_TYPE)
/* The NX bit is determined by a bitwise OR between the PT and PD */
if (fNoExecuteBitValid)
# endif
}
if (pGCPhys)
*pGCPhys = (Pde.u & GST_PDE_BIG_PG_MASK) | (GCPtr & (~GST_PDE_BIG_PG_MASK ^ ~GST_PTE_PG_MASK)); /** @todo pse36 */
}
return VINF_SUCCESS;
#else
# error "shouldn't be here!"
/* something else... */
return VERR_NOT_SUPPORTED;
#endif
}
/**
* Modify page flags for a range of pages in the guest's tables
*
* The existing flags are ANDed with the fMask and ORed with the fFlags.
*
* @returns VBox status code.
* @param pVM VM handle.
* @param GCPtr Virtual address of the first page in the range. Page aligned!
* @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
* @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
* @param fMask The AND mask - page flags X86_PTE_*.
*/
PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
for (;;)
{
/*
* Get the PD entry.
*/
# if PGM_GST_TYPE == PGM_TYPE_32BIT
/* pgmGstGetPaePDEPtr will return 0 if the PDPTE is marked as not present
* All the other bits in the PDPTE are only valid in long mode (r/w, u/s, nx)
*/
if (!pPde)
return VERR_PAGE_TABLE_NOT_PRESENT;
/** @todo Setting the r/w, u/s & nx bits might have no effect depending on the pdpte & pml4 values */
if (!pPde)
return VERR_PAGE_TABLE_NOT_PRESENT;
# endif
return VERR_PAGE_TABLE_NOT_PRESENT;
# if PGM_GST_TYPE != PGM_TYPE_AMD64
# endif
)
{
/*
* 4KB Page table
*
* Walk page tables and pages till we're done.
*/
if (VBOX_FAILURE(rc))
return rc;
{
| (fFlags & ~GST_PTE_PG_MASK);
/* next page */
if (!cb)
return VINF_SUCCESS;
iPTE++;
}
}
else
{
/*
* 4MB Page table
*/
Pde.u = (Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS)) /** @todo pse36 */
| (fFlags & ~GST_PTE_PG_MASK)
/* advance */
return VINF_SUCCESS;
}
}
#else
/* real / protected mode: ignore. */
return VINF_SUCCESS;
#endif
}
/**
* Retrieve guest PDE information
*
* @returns VBox status code.
* @param pVM The virtual machine.
* @param GCPtr Guest context pointer
* @param pPDE Pointer to guest PDE structure
*/
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
# if PGM_GST_TYPE == PGM_TYPE_32BIT
# endif
return VINF_SUCCESS;
#else
AssertFailed();
return VERR_NOT_IMPLEMENTED;
#endif
}
/**
* Maps the CR3 into HMA in GC and locate it in HC.
*
* Note that a MapCR3 call is usually not followed by an UnmapCR3 call; whenever
* CR3 is updated we simply call MapCR3 again.
*
* @returns VBox status, no specials.
* @param pVM VM handle.
* @param GCPhysCR3 The physical address in the CR3 register.
*/
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
/*
* Map the page CR3 points at.
*/
int rc = pgmRamGCPhys2HCPtrAndHCPhysWithFlags(&pVM->pgm.s, GCPhysCR3 & GST_CR3_PAGE_MASK, &HCPtrGuestCR3, &HCPhysGuestCR3);
if (VBOX_SUCCESS(rc))
{
if (VBOX_SUCCESS(rc))
{
# if PGM_GST_TYPE == PGM_TYPE_32BIT
pVM->pgm.s.pGstPaePDPTGC = (RCPTRTYPE(PX86PDPT)) ((RCPTRTYPE(uint8_t *))pVM->pgm.s.GCPtrCR3Mapping + offset);
/*
* Map the 4 PDs too.
*/
{
{
if (VBOX_SUCCESS(rc2))
{
continue;
}
}
}
if (!HWACCMIsNestedPagingActive(pVM))
{
{
pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT);
}
rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.pHCShwAmd64CR3);
if (rc == VERR_PGM_POOL_FLUSHED)
{
AssertFailed(); /* check if we handle this properly!! */
return VINF_PGM_SYNC_CR3;
}
pVM->pgm.s.pHCPaePML4 = (PX86PML4)PGMPOOL_PAGE_2_PTR(pPool->CTXSUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3);
}
# endif
}
else
}
else
int rc = VINF_SUCCESS;
#endif
return rc;
}
/**
* Unmaps the CR3.
*
* @returns VBox status, no specials.
* @param pVM VM handle.
*/
{
LogFlow(("UnmapCR3\n"));
int rc = VINF_SUCCESS;
#if PGM_GST_TYPE == PGM_TYPE_32BIT
for (unsigned i=0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
}
if (!HWACCMIsNestedPagingActive(pVM))
{
{
pgmPoolFreeByPage(pPool, pVM->pgm.s.pHCShwAmd64CR3, PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.pHCShwAmd64CR3->GCPhys >> PAGE_SHIFT);
}
}
/* nothing to do */
#endif
return rc;
}
#define LOG_GROUP LOG_GROUP_PGM_POOL
/**
* Registers physical page monitors for the necessary paging
* structures to detect conflicts with our guest mappings.
*
* This is always called after mapping CR3.
* This is never called with fixed mappings.
*
* @returns VBox status, no specials.
* @param pVM VM handle.
* @param GCPhysCR3 The physical address in the CR3 register.
*/
{
int rc = VINF_SUCCESS;
/*
*/
#if PGM_GST_TYPE == PGM_TYPE_32BIT
{
# ifndef PGMPOOL_WITH_MIXED_PT_CR3
rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
else
rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
if (VBOX_FAILURE(rc))
{
AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
return rc;
}
}
/* Monitor the PDPT page */
/*
*/
# ifndef PGMPOOL_WITH_MIXED_PT_CR3
AssertFailed();
# endif
{
if (VBOX_FAILURE(rc))
{
AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
return rc;
}
}
/*
* Do the 4 PDs.
*/
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
{
{
}
if (VBOX_FAILURE(rc))
{
AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
return rc;
}
}
{
}
}
#else
#endif
return rc;
}
/**
* Deregisters any physical page monitors installed by MonitorCR3.
*
* @returns VBox status code, no specials.
* @param pVM The VM handle.
*/
{
int rc = VINF_SUCCESS;
/*
* Deregister the access handlers.
*
* PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
* before we enter GC again.
*/
#if PGM_GST_TYPE == PGM_TYPE_32BIT
{
# ifndef PGMPOOL_WITH_MIXED_PT_CR3
# else /* PGMPOOL_WITH_MIXED_PT_CR3 */
: PGMPOOL_IDX_PD);
# endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
}
/* The PDPT page */
# ifndef PGMPOOL_WITH_MIXED_PT_CR3
AssertFailed();
# endif
{
}
/* The 4 PDs. */
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
{
if (VBOX_FAILURE(rc2))
}
}
#else
#endif
return rc;
}
#define LOG_GROUP LOG_GROUP_PGM
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
/**
* Updates one virtual handler range.
*
* @returns 0
* @param pNode Pointer to a PGMVIRTHANDLER.
* @param pvUser Pointer to a PGMVHUARGS structure (see PGM.cpp).
*/
static DECLCALLBACK(int) PGM_GST_NAME(VirtHandlerUpdateOne)(PAVLROGCPTRNODECORE pNode, void *pvUser)
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT
#endif
#if PGM_GST_MODE != PGM_MODE_AMD64
/* skip all stuff above 4GB if not AMD64 mode. */
return 0;
#endif
unsigned iPage = 0;
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT
#endif
{
# if PGM_GST_TYPE != PGM_TYPE_AMD64
# endif
)
{
/*
* Normal page table.
*/
if (VBOX_SUCCESS(rc))
{
{
else
{
("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%VGp\n",
#endif
}
}
}
else
{
/* not-present. */
offPage = 0;
{
{
("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
#endif
}
}
}
}
else
{
/*
* 2/4MB page.
*/
{
{
("{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} GCPhysNew=%VGp\n",
#endif
}
}
} /* pde type */
}
else
{
/* not-present. */
{
{
}
}
offPage = 0;
}
} /* for pages in virtual mapping. */
return 0;
}
#endif /* 32BIT, PAE and AMD64 */
/**
* Updates the virtual page access handlers.
*
* @returns true if bits were flushed.
* @returns false if bits weren't flushed.
* @param pVM VM handle.
* @param pPDSrc The page directory.
* @param cr4 The cr4 register value.
*/
{
#if PGM_GST_TYPE == PGM_TYPE_32BIT \
|| PGM_GST_TYPE == PGM_TYPE_PAE \
|| PGM_GST_TYPE == PGM_TYPE_AMD64
/** @todo
* In theory this is not sufficient: the guest can change a single page in a range with invlpg
*/
/*
* Resolve any virtual address based access handlers to GC physical addresses.
* This should be fairly quick.
*/
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTXSUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);
/*
* Set / reset bits?
*/
{
Log(("pgmR3VirtualHandlersUpdate: resets bits\n"));
RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTXSUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);
}
#else /* real / protected */
return false;
#endif
}
/**
* Write access handler for the Guest CR3 page in 32-bit mode.
*
* This will try interpret the instruction, if failure fail back to the recompiler.
* Check if the changed PDEs are marked present and conflicts with our
* mappings. If conflict, we'll switch to the host context and resolve it there
*
* @returns VBox status code (appropritate for trap handling and GC return).
* @param pVM VM Handle.
* @param uErrorCode CPU Error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address (cr2).
* @param GCPhysFault The GC physical address corresponding to pvFault.
* @param pvUser User argument.
*/
PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
{
/*
* Try interpret the instruction.
*/
{
/*
* Check if the modified PDEs are present and mappings.
*/
#ifdef DEBUG
Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD1, iPD1 << X86_PD_SHIFT));
Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD2, iPD2 << X86_PD_SHIFT));
#endif
{
)
{
if (rc == VINF_SUCCESS)
Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
return rc;
}
}
}
else
{
if (rc == VERR_EM_INTERPRETER)
}
return rc;
}
#endif /* PGM_TYPE_32BIT && !IN_RING3 */
/**
* Write access handler for the Guest CR3 page in PAE mode.
*
* This will try interpret the instruction, if failure fail back to the recompiler.
* Check if the changed PDEs are marked present and conflicts with our
* mappings. If conflict, we'll switch to the host context and resolve it there
*
* @returns VBox status code (appropritate for trap handling and GC return).
* @param pVM VM Handle.
* @param uErrorCode CPU Error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address (cr2).
* @param GCPhysFault The GC physical address corresponding to pvFault.
* @param pvUser User argument.
*/
PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
{
/*
* Try interpret the instruction.
*/
{
/*
* Check if any of the PDs have changed.
*/
for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
{
/*
* The PDPE has changed.
* We will schedule a monitoring update for the next TLB Flush,
* InvalidatePage or SyncCR3.
*
* This isn't perfect, because a lazy page sync might be dealing with an half
* updated PDPE. However, we assume that the guest OS is disabling interrupts
* and being extremely careful (cmpxchg8b) when updating a PDPE where it's
* executing.
*/
Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%VGp\n",
}
}
}
else
{
if (rc == VERR_EM_INTERPRETER)
}
return rc;
}
/**
* Write access handler for the Guest PDs in PAE mode.
*
* This will try interpret the instruction, if failure fail back to the recompiler.
* Check if the changed PDEs are marked present and conflicts with our
* mappings. If conflict, we'll switch to the host context and resolve it there
*
* @returns VBox status code (appropritate for trap handling and GC return).
* @param pVM VM Handle.
* @param uErrorCode CPU Error code.
* @param pRegFrame Trap register frame.
* @param pvFault The fault address (cr2).
* @param GCPhysFault The GC physical address corresponding to pvFault.
* @param pvUser User argument.
*/
PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
{
/*
* Try interpret the instruction.
*/
{
/*
* Figure out which of the 4 PDs this is.
*/
RTGCUINTPTR i;
for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
{
#ifdef DEBUG
Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%VGv)\n",
Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%VGv)\n",
#endif
{
)
{
return VINF_PGM_SYNC_CR3;
}
}
break; /* ASSUMES no duplicate entries... */
}
Assert(i < 4);
}
else
{
if (rc == VERR_EM_INTERPRETER)
else
}
return rc;
}
#endif /* PGM_TYPE_PAE && !IN_RING3 */