SELM.cpp revision 87fe4d73d7e6e53fbcec40dc6be2372479851cd4
/* $Id$ */
/** @file
* SELM - The Selector Manager.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/** @page pg_selm SELM - The Selector Manager
*
* SELM takes care of GDT, LDT and TSS shadowing in raw-mode, and the injection
* of a few hyper selector for the raw-mode context. In the hardware assisted
* virtualization mode its only task is to decode entries in the guest GDT or
* LDT once in a while.
*
* @see grp_selm
*
*
* @section seg_selm_shadowing Shadowing
*
* SELMR3UpdateFromCPUM() and SELMR3SyncTSS() does the bulk synchronization
* work. The three structures (GDT, LDT, TSS) are all shadowed wholesale atm.
* The idea is to do it in a more on-demand fashion when we get time. There
* also a whole bunch of issues with the current synchronization of all three
* tables, see notes and todos in the code.
*
* When the guest makes changes to the GDT we will try update the shadow copy
* without involving SELMR3UpdateFromCPUM(), see selmGCSyncGDTEntry().
*
* When the guest make LDT changes we'll trigger a full resync of the LDT
* (SELMR3UpdateFromCPUM()), which, needless to say, isn't optimal.
*
* The TSS shadowing is limited to the fields we need to care about, namely SS0
* and ESP0. The Patch Manager makes use of these. We monitor updates to the
* guest TSS and will try keep our SS0 and ESP0 copies up to date this way
* rather than go the SELMR3SyncTSS() route.
*
* When in raw-mode SELM also injects a few extra GDT selectors which are used
* by the raw-mode (hyper) context. These start their life at the high end of
* the table and will be relocated when the guest tries to make use of them...
* Well, that was that idea at least, only the code isn't quite there yet which
* is why we have trouble with guests which actually have a full sized GDT.
*
* So, the summary of the current GDT, LDT and TSS shadowing is that there is a
* lot of relatively simple and enjoyable work to be done, see @bugref{3267}.
*
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_SELM
#include "SELMInternal.h"
/**
* @{
*/
#define SELM_TRACK_GUEST_GDT_CHANGES
#define SELM_TRACK_GUEST_LDT_CHANGES
#define SELM_TRACK_GUEST_TSS_CHANGES
/** @} */
/**
* @{
*/
#define SELM_TRACK_SHADOW_GDT_CHANGES
#define SELM_TRACK_SHADOW_LDT_CHANGES
#define SELM_TRACK_SHADOW_TSS_CHANGES
/** @} */
/** SELM saved state version. */
#define SELM_SAVED_STATE_VERSION 5
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
//static DECLCALLBACK(void) selmR3InfoTss(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
//static DECLCALLBACK(void) selmR3InfoTssGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
/**
* Initializes the SELM.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
LogFlow(("SELMR3Init\n"));
/*
* Assert alignment and sizes.
* (The TSS block requires contiguous back.)
*/
AssertCompile(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding)); AssertRelease(sizeof(pVM->selm.s) <= sizeof(pVM->selm.padding));
#if 0 /* doesn't work */
AssertCompile((RT_OFFSETOF(VM, selm.s.Tss) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.Tss));
AssertCompile((RT_OFFSETOF(VM, selm.s.TssTrap08) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.TssTrap08));
#endif
AssertRelease((RT_OFFSETOF(VM, selm.s.Tss) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.Tss));
AssertRelease((RT_OFFSETOF(VM, selm.s.TssTrap08) & PAGE_OFFSET_MASK) <= PAGE_SIZE - sizeof(pVM->selm.s.TssTrap08));
/*
* Init the structure.
*/
/*
* Allocate GDT table.
*/
/*
* Allocate LDT area.
*/
/*
* Init Guest's and Shadow GDT, LDT, TSS changes control variables.
*/
/* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it
* for I/O operations. */
/* bit set to 1 means no redirection */
/*
* Register the saved state data unit.
*/
if (RT_FAILURE(rc))
return rc;
/*
* Statistics.
*/
STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT.");
STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT.");
STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected.");
STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS.");
STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS.");
STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS.");
STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
STAM_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors.");
STAM_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors.");
/*
* Default action when entering raw mode for the first time
*/
/*
* Register info handlers.
*/
DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest);
DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest);
//DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);
//DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest);
return rc;
}
/**
* Finalizes HMA page attributes.
*
* @returns VBox status code.
* @param pVM The VM handle.
*/
{
/** @cfgm{/DoubleFault,bool,false}
* Enables catching of double faults in the raw-mode context VMM code. This can
* be used when the tripple faults or hangs occure and one suspect an unhandled
* double fault. This is not enabled by default because it means making the
* hyper selectors writeable for all supervisor code, including the guest's.
* The double fault is a task switch and thus requires write access to the GDT
* of the TSS (to set it busy), to the old TSS (to store state), and to the Trap
* 8 TSS for the back link.
*/
bool f;
#if defined(DEBUG_bird)
#else
#endif
if (f)
{
rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] >> 3]), sizeof(paGdt[0]),
rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3]), sizeof(paGdt[0]),
rc = PGMMapSetPage(pVM, VM_RC_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]),
rc = PGMMapSetPage(pVM, VM_RC_ADDR(pVM, &pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]), sizeof(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]),
}
return VINF_SUCCESS;
}
/**
* Setup the hypervisor GDT selectors in our shadow table
*
* @param pVM The VM handle.
*/
{
/*
* Set up global code and data descriptors for use in the guest context.
* Both are wide open (base 0, limit 4GB)
*/
/* data */
/* 64-bit mode code (& data?) */
/*
* TSS descriptor
*/
/*
* TSS descriptor for trap 08
*/
}
/**
* Applies relocations to data and code managed by this
* component. This function will be called at init and
* whenever the VMM need to relocate it self inside the GC.
*
* @param pVM The VM.
*/
{
LogFlow(("SELMR3Relocate\n"));
{
/*
* Update GDTR and selector.
*/
/** @todo selector relocations should be a seperate operation? */
}
/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
/** @todo PGM knows the proper CR3 values these days, not CPUM. */
/*
* Update the TSSes.
*/
/* Only applies to raw mode which supports only 1 VCPU */
/* Current TSS */
/* trap 08 */
pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu); /* this should give use better survival chances. */
pVM->selm.s.TssTrap08.esp0 = VMMGetStackRC(pVM) - PAGE_SIZE / 2; /* upper half can be analysed this way. */
pVM->selm.s.TssTrap08.ecx = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */
/* TRPM will be updating the eip */
{
/*
*/
int rc;
{
}
0, 0, "selmRCShadowGDTWriteHandler", 0, "Shadow GDT write access handler");
#endif
{
}
0, 0, "selmRCShadowTSSWriteHandler", 0, "Shadow TSS write access handler");
#endif
/*
* Update the GC LDT region handler and address.
*/
{
}
#endif
0, 0, "selmRCShadowLDTWriteHandler", 0, "Shadow LDT write access handler");
#endif
}
}
/**
* Terminates the SELM.
*
* Termination means cleaning up and freeing all resources,
* the VM it self is at this point powered off or suspended.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
return 0;
}
/**
* The VM is being reset.
*
* needs to be removed.
*
* @param pVM VM handle.
*/
{
LogFlow(("SELMR3Reset:\n"));
/*
*/
int rc;
#ifdef SELM_TRACK_GUEST_GDT_CHANGES
{
}
#endif
#ifdef SELM_TRACK_GUEST_LDT_CHANGES
{
}
#endif
#ifdef SELM_TRACK_GUEST_TSS_CHANGES
{
}
#endif
/*
* Re-initialize other members.
*/
/*
* Default action when entering raw mode for the first time
*/
}
/**
*
* @param pVM The VM to operate on.
*/
{
/*
*/
int rc;
#ifdef SELM_TRACK_GUEST_GDT_CHANGES
{
}
#endif
#ifdef SELM_TRACK_GUEST_LDT_CHANGES
{
}
#endif
#ifdef SELM_TRACK_GUEST_TSS_CHANGES
{
}
#endif
/*
*/
{
}
#endif
{
}
#endif
{
}
#endif
}
/**
* Execute state save operation.
*
* @returns VBox status code.
* @param pVM VM Handle.
* @param pSSM SSM operation handle.
*/
{
LogFlow(("selmR3Save:\n"));
/*
* Save the basic bits - fortunately all the other things can be resynced on load.
*/
}
/**
* Execute state load operation.
*
* @returns VBox status code.
* @param pVM VM Handle.
* @param pSSM SSM operation handle.
* @param u32Version Data layout version.
*/
{
LogFlow(("selmR3Load:\n"));
/*
* Validate version.
*/
if (u32Version != SELM_SAVED_STATE_VERSION)
{
}
/*
* Do a reset.
*/
/* Get the monitoring flag. */
/* Get the TSS state flag. */
/*
* Get the selectors.
*/
/* Copy the selectors; they will be checked during relocation. */
return VINF_SUCCESS;
}
/**
* Sync the GDT, LDT and TSS after loading the state.
*
* Just to play save, we set the FFs to force syncing before
* executing GC code.
*
* @returns VBox status code.
* @param pVM VM Handle.
* @param pSSM SSM operation handle.
*/
{
LogFlow(("selmR3LoadDone:\n"));
/*
* Don't do anything if it's a load failure.
*/
if (RT_FAILURE(rc))
return VINF_SUCCESS;
/*
* Do the syncing if we're in protected mode.
*/
{
}
/*
* Flag everything for resync on next raw mode entry.
*/
return VINF_SUCCESS;
}
/**
* Updates the Guest GDT & LDT virtualization based on current CPU state.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pVCpu The VMCPU to operate on.
*/
{
int rc = VINF_SUCCESS;
{
return VINF_SUCCESS;
}
/*
* GDT sync
*/
{
/*
* Always assume the best
*/
/* If the GDT was changed, then make sure the LDT is checked too */
/** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */
/* Same goes for the TSS selector */
/*
* Get the GDTR and check if there is anything to do (there usually is).
*/
{
Log(("No GDT entries...\n"));
return VINF_SUCCESS;
}
/*
* Read the Guest GDT.
* ASSUMES that the entire GDT is in memory.
*/
rc = PGMPhysSimpleReadGCPtr(pVCpu, pGDTE, GDTR.pGdt + sizeof(X86DESC), cbEffLimit + 1 - sizeof(X86DESC));
if (RT_FAILURE(rc))
{
/*
* Read it page by page.
*
* Keep track of the last valid page and delay memsets and
* adjust cbEffLimit to reflect the effective size. The latter
* is something we do in the belief that the guest will probably
* never actually commit the last page, thus allowing us to keep
* our selectors in the high end of the GDT.
*/
while (cbLeft)
{
if (RT_SUCCESS(rc))
{
if (pu8DstInvalid != pu8Dst)
}
else if ( rc == VERR_PAGE_NOT_PRESENT
|| rc == VERR_PAGE_TABLE_NOT_PRESENT)
{
}
else
{
return VERR_NOT_IMPLEMENTED;
}
}
/* any invalid pages at the end? */
if (pu8DstInvalid != pu8Dst)
{
/* If any GDTEs was invalidated, zero them. */
}
/* keep track of the effective limit. */
{
Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n",
}
}
/*
* Check if the Guest GDT intrudes on our GDT entries.
*/
/** @todo we should try to minimize relocations by making sure our current selectors can be reused. */
if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
{
int iGDT = 0;
Log(("Internal SELM GDT conflict: use non-present entries\n"));
while (pGDTE > pGDTEStart)
{
/* We can reuse non-present entries */
{
iGDT++;
if (iGDT >= SELM_HYPER_SEL_MAX)
break;
}
pGDTE--;
}
if (iGDT != SELM_HYPER_SEL_MAX)
{
AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n"));
return VERR_NOT_IMPLEMENTED;
}
}
else
{
}
/*
* Work thru the copied GDT entries adjusting them for correct virtualization.
*/
{
{
/*
* Code and data selectors are generally 1:1, with the
* 'little' adjustment we do for DPL 0 selectors.
*/
{
/*
* Hack for A-bit against Trap E on read-only GDT.
*/
/** @todo Fix this by loading ds and cs before turning off WP. */
/*
* All DPL 0 code and data segments are squeezed into DPL 1.
*
* We're skipping conforming segments here because those
* cannot give us any trouble.
*/
!= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
}
else
{
/*
* System type selectors are marked not present.
* Recompiler or special handling is required for these.
*/
/** @todo what about interrupt gates and rawr0? */
}
}
/* Next GDT entry. */
pGDTE++;
}
/*
* Check if our hypervisor selectors were changed.
*/
{
/* Reinitialize our hypervisor GDTs */
/*
* Do the relocation callbacks to let everyone update their hyper selector dependencies.
* (SELMR3Relocate will call selmR3SetupHyperGDTSelectors() for us.)
*/
VMR3Relocate(pVM, 0);
}
else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE)
/* We overwrote all entries above, so we have to save them again. */
/*
* Adjust the cached GDT limit.
* Any GDT entries which have been removed must be cleared.
*/
{
#ifndef SELM_TRACK_GUEST_GDT_CHANGES
#endif
}
#ifdef SELM_TRACK_GUEST_GDT_CHANGES
/*
* Check if Guest's GDTR is changed.
*/
{
Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));
/*
* [Re]Register write virtual handler for guest's GDT.
*/
{
}
rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,
if (RT_FAILURE(rc))
return rc;
/* Update saved Guest GDTR. */
}
#endif
}
/*
* TSS sync
*/
{
}
/*
* LDT sync
*/
{
/*
* Always assume the best
*/
/*
* LDT handling is done similarly to the GDT handling with a shadow
* array. However, since the LDT is expected to be swappable (at least
* some ancient OSes makes it swappable) it must be floating and
* synced on a per-page basis.
*
* Eventually we will change this to be fully on demand. Meaning that
* we will only sync pages containing LDT selectors actually used and
* let the #PF handler lazily sync pages as they are used.
* (This applies to GDT too, when we start making OS/2 fast.)
*/
/*
* First, determin the current LDT selector.
*/
if ((SelLdt & X86_SEL_MASK) == 0)
{
/* ldtr = 0 - update hyper LDTR and deregister any active handler. */
CPUMSetHyperLDTR(pVCpu, 0);
#ifdef SELM_TRACK_GUEST_LDT_CHANGES
{
}
#endif
return VINF_SUCCESS;
}
/*
* Get the LDT selector.
*/
/*
* Validate it.
*/
if ( !cbLdt
{
/* cbLdt > 0:
* This is quite impossible, so we do as most people do when faced with
* the impossible, we simply ignore it.
*/
CPUMSetHyperLDTR(pVCpu, 0);
#ifdef SELM_TRACK_GUEST_LDT_CHANGES
{
}
#endif
return VINF_SUCCESS;
}
/** @todo check what intel does about odd limits. */
AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(X86DESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
/*
* Use the cached guest ldt address if the descriptor has already been modified (see below)
* (this is necessary due to redundant LDT updates; see todo above at GDT sync)
*/
#ifdef SELM_TRACK_GUEST_LDT_CHANGES
/** @todo Handle only present LDT segments. */
// if (pDesc->Gen.u1Present)
{
/*
*/
{
Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %RGv:%04x to %RGv:%04x. (GDTR=%016RX64:%04x)\n",
pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt));
/*
* [Re]Register write virtual handler for guest's GDT.
* In the event of LDT overlapping something, don't install it just assume it's being updated.
*/
{
}
#ifdef DEBUG
Log(("LDT selector marked not present!!\n"));
#endif
rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */,
if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT)
{
/** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */
Log(("WARNING: Guest LDT (%RGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%016RX64:%04x)\n",
}
else if (RT_SUCCESS(rc))
else
{
CPUMSetHyperLDTR(pVCpu, 0);
return rc;
}
}
}
#else
#endif
/*
* Calc Shadow LDT base.
*/
unsigned off;
/*
* Enable the LDT selector in the shadow GDT.
*/
if (cbLdt > 0xffff)
{
cbLdt = 0xffff;
}
/*
* Set Hyper LDTR and notify TRPM.
*/
/*
* Loop synchronising the LDT page by page.
*/
/** @todo investigate how intel handle various operations on half present cross page entries. */
/* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */
while (cbLeft)
{
/*
* Read a chunk.
*/
if (RT_SUCCESS(rc))
{
/*
* Mark page
*/
rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D);
/*
* Loop thru the available LDT entries.
* Figure out where to start and end and the potential cross pageness of
* things adds a little complexity. pLDTE is updated there and not in the
* 'next' part of the loop. The pLDTEEnd is inclusive.
*/
{
{
/*
* Code and data selectors are generally 1:1, with the
* 'little' adjustment we do for DPL 0 selectors.
*/
{
/*
* Hack for A-bit against Trap E on read-only GDT.
*/
/** @todo Fix this by loading ds and cs before turning off WP. */
/*
* All DPL 0 code and data segments are squeezed into DPL 1.
*
* We're skipping conforming segments here because those
* cannot give us any trouble.
*/
!= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )
}
else
{
/*
* System type selectors are marked not present.
* Recompiler or special handling is required for these.
*/
/** @todo what about interrupt gates and rawr0? */
}
}
/* Next LDT entry. */
pLDTE++;
}
}
else
{
}
/*
* Advance to the next page.
*/
}
}
return VINF_SUCCESS;
}
/**
* \#PF Handler callback for virtual access handler ranges.
*
* Important to realize that a physical page in a range can have aliases, and
* for ALL and WRITE handlers these will also trigger.
*
* @returns VINF_SUCCESS if the handler have carried out the operation.
* @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
* @param pVM VM Handle.
* @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
* @param pvPtr The HC mapping of that address.
* @param enmAccessType The access type.
* @param pvUser User argument.
*/
static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
{
return VINF_PGM_HANDLER_DO_DEFAULT;
}
/**
* \#PF Handler callback for virtual access handler ranges.
*
* Important to realize that a physical page in a range can have aliases, and
* for ALL and WRITE handlers these will also trigger.
*
* @returns VINF_SUCCESS if the handler have carried out the operation.
* @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
* @param pVM VM Handle.
* @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
* @param pvPtr The HC mapping of that address.
* @param enmAccessType The access type.
* @param pvUser User argument.
*/
static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
{
return VINF_PGM_HANDLER_DO_DEFAULT;
}
/**
* \#PF Handler callback for virtual access handler ranges.
*
* Important to realize that a physical page in a range can have aliases, and
* for ALL and WRITE handlers these will also trigger.
*
* @returns VINF_SUCCESS if the handler have carried out the operation.
* @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
* @param pVM VM Handle.
* @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
* @param pvPtr The HC mapping of that address.
* @param enmAccessType The access type.
* @param pvUser User argument.
*/
static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
{
Log(("selmR3GuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf));
/** @todo This can be optimized by checking for the ESP0 offset and tracking TR
* reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We
* changes while we're in REM. */
return VINF_PGM_HANDLER_DO_DEFAULT;
}
/**
* Synchronize the shadowed fields in the TSS.
*
* At present we're shadowing the ring-0 stack selector & pointer, and the
* interrupt redirection bitmap (if present). We take the lazy approach wrt to
* REM and this function is called both if REM made any changes to the TSS or
* loaded TR.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pVCpu The VMCPU to operate on.
*/
{
int rc;
{
return VINF_SUCCESS;
}
/*
* Get TR and extract and store the basic info.
*
* Note! The TSS limit is not checked by the LTR code, so we
* have to be a bit careful with it. We make sure cbTss
* won't be zero if TR is valid and if it's NULL we'll
* make sure cbTss is 0.
*/
|| (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
if (SelTss & X86_SEL_MASK)
{
if (!++cbTss)
cbTss = UINT32_MAX;
}
else
{
|| (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
cbTss = 0; /* the reset case. */
}
/*
* Figure out the size of what need to monitor.
*/
/* We're not interested in any 16-bit TSSes. */
cbMonitoredTss = 0;
bool fNoRing1Stack = true;
if (cbMonitoredTss)
{
/*
* 32-bit TSS. What we're really keen on is the SS0 and ESP0 fields.
* If VME is enabled we also want to keep an eye on the interrupt
* redirection bitmap.
*/
if ( !(cr4 & X86_CR4_VME)
|| ( VBOX_SUCCESS(rc)
&& Tss.offIoBitmap > cbTss) /* beyond the end */ /** @todo not sure how the partial case is handled; probably not allowed. */
)
/* No interrupt redirection bitmap, just ESP0 and SS0. */
else if (RT_SUCCESS(rc))
{
/*
* Everything up to and including the interrupt redirection bitmap. Unfortunately
* this can be quite a large chunk. We use to skip it earlier and just hope it
* was kind of static...
*
* Update the virtual interrupt redirection bitmap while we're here.
* (It is located in the 32 bytes before TR:offIoBitmap.)
*/
/** @todo memset the bitmap on failure? */
Log2(("Redirection bitmap:\n"));
}
else
{
/** @todo memset the bitmap? */
}
/*
* Update the ring 0 stack selector and base address.
*/
if (RT_SUCCESS(rc))
{
#ifdef LOG_ENABLED
if (LogIsEnabled())
{
{
Log(("SELMR3SyncTSS: Updating TSS ring 0 stack to %04X:%08X from %04X:%08X; TSS Phys=%VGp)\n",
("ring-1 leak into TSS.SS0! %04X:%08X from %04X:%08X; TSS Phys=%VGp)\n",
}
}
#endif /* LOG_ENABLED */
/* Update our TSS structure for the guest's ring 1 stack */
}
}
/*
* Flush the ring-1 stack and the direct syscall dispatching if we
* cannot obtain SS0:ESP0.
*/
if (fNoRing1Stack)
{
/** @todo handle these dependencies better! */
}
/*
* Check for monitor changes and apply them.
*/
{
Log(("SELMR3SyncTSS: Guest's TSS is changed to pTss=%RGv cbMonitoredTss=%08X cbGuestTss=%#08x\n",
/* Release the old range first. */
{
}
/* Register the write handler if TS != 0. */
if (cbMonitoredTss != 0)
{
rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1,
"selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler");
if (RT_FAILURE(rc))
{
return rc;
}
/* Update saved Guest TSS info. */
}
else
{
}
}
return VINF_SUCCESS;
}
/**
* Compares the Guest GDT and LDT with the shadow tables.
* This is a VBOX_STRICT only function.
*
* @returns VBox status code.
* @param pVM The VM Handle.
*/
{
#ifdef VBOX_STRICT
/*
* Get GDTR and check for conflict.
*/
return VINF_SUCCESS;
Log(("SELMR3DebugCheck: guest GDT size forced us to look for unused selectors.\n"));
Log(("SELMR3DebugCheck: limits have changed! new=%d old=%d\n", GDTR.cbGdt, pVM->selm.s.GuestGdtr.cbGdt));
/*
* Loop thru the GDT checking each entry.
*/
{
if (RT_SUCCESS(rc))
{
{
{
}
}
}
/* Advance to the next descriptor. */
GCPtrGDTEGuest += sizeof(X86DESC);
pGDTE++;
}
/*
* LDT?
*/
if ((SelLdt & X86_SEL_MASK) == 0)
return VINF_SUCCESS;
{
return VERR_INTERNAL_ERROR;
}
int rc = PGMPhysSimpleReadGCPtr(pVCpu, &LDTDesc, GDTR.pGdt + (SelLdt & X86_SEL_MASK), sizeof(LDTDesc));
if (RT_FAILURE(rc))
{
return rc;
}
/*
* Validate it.
*/
if (!cbLdt)
return VINF_SUCCESS;
/** @todo check what intel does about odd limits. */
AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(X86DESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt));
{
return VERR_INTERNAL_ERROR;
}
/*
* Loop thru the LDT checking each entry.
*/
{
if (RT_SUCCESS(rc))
{
{
}
}
/* Advance to the next descriptor. */
GCPtrLDTEGuest += sizeof(X86DESC);
pLDTE++;
}
#else /* !VBOX_STRICT */
#endif /* !VBOX_STRICT */
return VINF_SUCCESS;
}
/**
* Validates the RawR0 TSS values against the one in the Guest TSS.
*
* @returns true if it matches.
* @returns false and assertions on mismatch..
* @param pVM VM Handle.
*/
{
#ifdef VBOX_STRICT
return true;
/*
* Get TR and extract the basic info.
*/
|| (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */));
if (SelTss & X86_SEL_MASK)
{
false);
if (!++cbTss)
cbTss = UINT32_MAX;
}
else
{
|| (cbTss == 0xffff && GCPtrTss == 0 && trHid.Attr.n.u1Present && trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY /* RESET */),
false);
cbTss = 0; /* the reset case. */
}
AssertMsgReturn(pVM->selm.s.cbGuestTss == cbTss, ("%#x %#x\n", pVM->selm.s.cbGuestTss, cbTss), false);
AssertMsgReturn(pVM->selm.s.fGuestTss32Bit == ( trHid.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
false);
false);
false);
/*
* Figure out the size of what need to monitor.
*/
bool fNoRing1Stack = true;
/* We're not interested in any 16-bit TSSes. */
cbMonitoredTss = 0;
if (cbMonitoredTss)
{
/* Happends early in XP boot during page table switching. */
false);
if ( !(cr4 & X86_CR4_VME)
|| ( VBOX_SUCCESS(rc)
)
else if (RT_SUCCESS(rc))
{
false);
/* check the bitmap */
AssertRCReturn(rc, false);
AssertMsgReturn(!memcmp(&Tss.IntRedirBitmap[0], &pVM->selm.s.Tss.IntRedirBitmap[0], sizeof(Tss.IntRedirBitmap)),
("offIoBitmap=%#x cbTss=%#x\n"
" Guest: %.32Rhxs\n"
"Shadow: %.32Rhxs\n",
&Tss.IntRedirBitmap[0],
false);
}
else
/*
* Check SS0 and ESP0.
*/
&& RT_SUCCESS(rc))
{
{
AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%RGv Phys=%RGp\n",
return false;
}
}
AssertMsgReturn(pVM->selm.s.cbMonitoredGuestTss == cbMonitoredTss, ("%#x %#x\n", pVM->selm.s.cbMonitoredGuestTss, cbMonitoredTss), false);
}
else
{
AssertMsgReturn(pVM->selm.s.Tss.ss1 == 0 && pVM->selm.s.Tss.esp1 == 0, ("%04x:%08x\n", pVM->selm.s.Tss.ss1, pVM->selm.s.Tss.esp1), false);
AssertMsgReturn(pVM->selm.s.cbMonitoredGuestTss == cbMonitoredTss, ("%#x %#x\n", pVM->selm.s.cbMonitoredGuestTss, cbMonitoredTss), false);
}
return true;
#else /* !VBOX_STRICT */
return true;
#endif /* !VBOX_STRICT */
}
/**
* Returns flat address and limit of LDT by LDT selector from guest GDTR.
*
* Fully validate selector.
*
* @returns VBox status.
* @param pVM VM Handle.
* @param SelLdt LDT selector.
* @param ppvLdt Where to store the flat address of LDT.
* @param pcbLimit Where to store LDT limit.
*/
{
/* Get guest GDTR. */
/* Check selector TI and GDT limit. */
if ( SelLdt & X86_SEL_LDT
return VERR_INVALID_SELECTOR;
/* Read descriptor from GC. */
int rc = PGMPhysSimpleReadGCPtr(pVCpu, (void *)&Desc, (RTGCPTR)(GDTR.pGdt + (SelLdt & X86_SEL_MASK)), sizeof(Desc));
if (RT_FAILURE(rc))
{
/* fatal */
return VERR_SELECTOR_NOT_PRESENT;
}
/* Check if LDT descriptor is not present. */
return VERR_SELECTOR_NOT_PRESENT;
/* Check LDT descriptor type. */
return VERR_INVALID_SELECTOR;
/* LDT descriptor is ok. */
if (ppvLdt)
{
}
return VINF_SUCCESS;
}
/**
* Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
*
* See SELMR3GetSelectorInfo for details.
*
* @returns VBox status code, see SELMR3GetSelectorInfo for details.
*
* @param pVM VM handle.
* @param pVCpu VMCPU handle.
* @param Sel The selector to get info about.
* @param pSelInfo Where to store the information.
*/
{
/*
* Read it from the guest descriptor table.
*/
if (!(Sel & X86_SEL_LDT))
{
/* GDT */
return VERR_INVALID_SELECTOR;
}
else
{
/*
* LDT - must locate the LDT first...
*/
if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */
return VERR_INVALID_SELECTOR;
if (RT_FAILURE(rc))
return rc;
/* validate the LDT descriptor. */
return VERR_SELECTOR_NOT_PRESENT;
return VERR_INVALID_SELECTOR;
return VERR_INVALID_SELECTOR;
/* calc the descriptor location. */
}
/* read the descriptor. */
if (RT_FAILURE(rc))
return rc;
/*
* Extract the base and limit
*/
return VINF_SUCCESS;
}
/**
* Gets information about a 64-bit selector, SELMR3GetSelectorInfo helper.
*
* See SELMR3GetSelectorInfo for details.
*
* @returns VBox status code, see SELMR3GetSelectorInfo for details.
*
* @param pVM VM handle.
* @param pVCpu VMCPU handle.
* @param Sel The selector to get info about.
* @param pSelInfo Where to store the information.
*/
{
/*
* Read the descriptor entry
*/
if ( !(Sel & X86_SEL_LDT)
)
{
/*
* Hypervisor descriptor.
*/
}
else if (CPUMIsGuestInProtectedMode(pVCpu))
{
/*
* Read it from the guest descriptor table.
*/
if (!(Sel & X86_SEL_LDT))
{
/* GDT */
return VERR_INVALID_SELECTOR;
}
else
{
/*
* LDT - must locate the LDT first...
*/
if ( (unsigned)(SelLdt & X86_SEL_MASK) < sizeof(X86DESC) /* the first selector is invalid, right? */
return VERR_INVALID_SELECTOR;
if (RT_FAILURE(rc))
return rc;
/* validate the LDT descriptor. */
return VERR_SELECTOR_NOT_PRESENT;
return VERR_INVALID_SELECTOR;
return VERR_INVALID_SELECTOR;
/* calc the descriptor location. */
}
/* read the descriptor. */
if (RT_FAILURE(rc))
return rc;
}
else
{
/*
* We're in real mode.
*/
return VINF_SUCCESS;
}
/*
* Extract the base and limit
*/
return VINF_SUCCESS;
}
/**
* Gets information about a selector.
* Intended for the debugger mostly and will prefer the guest
* descriptor tables over the shadow ones.
*
* @returns VINF_SUCCESS on success.
* @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
* @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
* @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
* backing the selector table wasn't present.
* @returns Other VBox status code on other errors.
*
* @param pVM VM handle.
* @param Sel The selector to get info about.
* @param pSelInfo Where to store the information.
*/
{
if (CPUMIsGuestInLongMode(pVCpu))
}
/**
* Gets information about a selector from the shadow tables.
*
* This is intended to be faster than the SELMR3GetSelectorInfo() method, but requires
* that the caller ensures that the shadow tables are up to date.
*
* @returns VINF_SUCCESS on success.
* @returns VERR_INVALID_SELECTOR if the selector isn't fully inside the descriptor table.
* @returns VERR_SELECTOR_NOT_PRESENT if the selector wasn't present.
* @returns VERR_PAGE_TABLE_NOT_PRESENT or VERR_PAGE_NOT_PRESENT if the pagetable or page
* backing the selector table wasn't present.
* @returns Other VBox status code on other errors.
*
* @param pVM VM handle.
* @param Sel The selector to get info about.
* @param pSelInfo Where to store the information.
*/
{
/*
* Read the descriptor entry
*/
if (!(Sel & X86_SEL_LDT))
{
/*
* Global descriptor.
*/
/** @todo check that the GDT offset is valid. */
}
else
{
/*
* Local Descriptor.
*/
/** @todo check if the LDT page is actually available. */
/** @todo check that the LDT offset is valid. */
}
/*
* Extract the base and limit
*/
return VINF_SUCCESS;
}
/**
* Formats a descriptor.
*
* @param Desc Descriptor to format.
* @param Sel Selector number.
* @param pszOutput Output buffer.
* @param cchOutput Size of output buffer.
*/
{
/*
* Make variable description string.
*/
static struct
{
unsigned cch;
const char *psz;
} const aTypes[32] =
{
/* system */
/* non system */
};
char szMsg[128];
else
else
*psz = '\0';
/*
* Limit and Base and format the output.
*/
}
/**
* Dumps a descriptor.
*
* @param Desc Descriptor to dump.
* @param Sel Selector number.
* @param pszMsg Message to prepend the log entry with.
*/
{
char szOutput[128];
}
/**
* Display the shadow gdt.
*
* @param pVM VM Handle.
* @param pHlp The info helpers.
* @param pszArgs Arguments, ignored.
*/
{
{
{
char szOutput[128];
selmR3FormatDescriptor(pVM->selm.s.paGdtR3[iGDT], iGDT << X86_SEL_SHIFT, &szOutput[0], sizeof(szOutput));
const char *psz = "";
psz = " HyperCS";
psz = " HyperDS";
psz = " HyperCS64";
psz = " HyperTSS";
psz = " HyperTSSTrap08";
}
}
}
/**
* Display the guest gdt.
*
* @param pVM VM Handle.
* @param pHlp The info helpers.
* @param pszArgs Arguments, ignored.
*/
{
/** @todo SMP support! */
{
if (RT_SUCCESS(rc))
{
{
char szOutput[128];
}
}
else if (rc == VERR_PAGE_NOT_PRESENT)
{
}
else
pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%RGv\n", iGDT << X86_SEL_SHIFT, rc, GCPtrGDT);
}
}
/**
* Display the shadow ldt.
*
* @param pVM VM Handle.
* @param pHlp The info helpers.
* @param pszArgs Arguments, ignored.
*/
{
pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%RRv limit=%#x):\n", pVM->selm.s.pvLdtRC + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);
{
{
char szOutput[128];
selmR3FormatDescriptor(paLDT[iLDT], (iLDT << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
}
}
}
/**
* Display the guest ldt.
*
* @param pVM VM Handle.
* @param pHlp The info helpers.
* @param pszArgs Arguments, ignored.
*/
{
/** @todo SMP support! */
if (!(SelLdt & X86_SEL_MASK))
{
return;
}
unsigned cbLdt;
if (RT_FAILURE(rc))
{
return;
}
{
if (RT_SUCCESS(rc))
{
{
char szOutput[128];
selmR3FormatDescriptor(LdtE, (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, &szOutput[0], sizeof(szOutput));
}
}
else if (rc == VERR_PAGE_NOT_PRESENT)
{
pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%RGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, GCPtrLdt);
}
else
pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%RGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, GCPtrLdt);
}
}
/**
* Dumps the hypervisor GDT
*
* @param pVM VM handle.
*/
{
}
/**
* Dumps the hypervisor LDT
*
* @param pVM VM handle.
*/
{
}
/**
* Dumps the guest GDT
*
* @param pVM VM handle.
*/
{
}
/**
* Dumps the guest LDT
*
* @param pVM VM handle.
*/
{
}