HMInternal.h revision 5dcf1b054c00164e2d91bde08d4cdee674de21c3
/* $Id$ */
/** @file
* HM - Internal header file.
*/
/*
* Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifndef ___HMInternal_h
#define ___HMInternal_h
#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
/* Enable 64 bits guest support. */
# define VBOX_ENABLE_64_BITS_GUESTS
#endif
#ifdef VBOX_WITH_OLD_VTX_CODE
# define VMX_USE_CACHED_VMCS_ACCESSES
#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
# define VMX_USE_CACHED_VMCS_ACCESSES
#endif
/** @def HM_PROFILE_EXIT_DISPATCH
* Enables profiling of the VM exit handler dispatching. */
#if 0
# define HM_PROFILE_EXIT_DISPATCH
#endif
* used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
* booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
*
* Note: don't forget to update the assembly files while modifying this!
*/
/** @todo This define should always be in effect and the define itself removed
after 'sufficient' testing. */
# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
/** @defgroup grp_hm_int Internal
* @ingroup grp_hm
* @internal
* @{
*/
/** Maximum number of exit reason statistics counters. */
#define MAX_EXITREASON_STAT 0x100
#define MASK_EXITREASON_STAT 0xff
#define MASK_INJECT_IRQ_STAT 0xff
/** @name HM changed flags.
* These flags are used to keep track of which important registers that
* have been changed since last they were reset.
* @{
*/
#ifdef VBOX_WITH_OLD_VTX_CODE
# define HM_CHANGED_GUEST_FPU RT_BIT(0)
# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \
#else
# define HM_CHANGED_GUEST_RIP RT_BIT(0)
/* VT-x specific state. */
/* AMD-V specific state. */
# define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_RIP \
#endif
/** @} */
/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
/** Total guest mapped memory needed. */
/** Enable for TPR guest patching. */
#define VBOX_HM_WITH_GUEST_PATCHING
/** HM SSM version
*/
#ifdef VBOX_HM_WITH_GUEST_PATCHING
# define HM_SSM_VERSION 5
# define HM_SSM_VERSION_NO_PATCHING 4
#else
# define HM_SSM_VERSION 4
# define HM_SSM_VERSION_NO_PATCHING 4
#endif
#define HM_SSM_VERSION_2_0_X 3
/**
* Global per-cpu information. (host)
*/
typedef struct HMGLOBLCPUINFO
{
/** The CPU ID. */
/** The memory object */
/** Current ASID (AMD-V) / VPID (Intel). */
/** TLB flush count. */
bool fFlushAsidBeforeUse;
/** Configured for VT-x or AMD-V. */
bool fConfigured;
/** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
bool fIgnoreAMDVInUseError;
/** In use by our code. (for power suspend) */
volatile bool fInUse;
/** Pointer to the per-cpu global information. */
typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
typedef enum
{
HMPENDINGIO_INVALID = 0,
/** The usual 32-bit paranoia. */
HMPENDINGIO_32BIT_HACK = 0x7fffffff
} HMPENDINGIO;
typedef enum
{
/** The usual 32-bit paranoia. */
HMTPRINSTR_32BIT_HACK = 0x7fffffff
} HMTPRINSTR;
typedef struct
{
/** The key is the address of patched instruction. (32 bits GC ptr) */
/** Original opcode. */
/** Instruction size. */
/** Replacement opcode. */
/** Replacement instruction size. */
/** Instruction type. */
/** Source operand. */
/** Destination operand. */
/** Number of times the instruction caused a fault. */
/** Patch address of the jump replacement. */
} HMTPRPATCH;
/** Pointer to HMTPRPATCH. */
typedef HMTPRPATCH *PHMTPRPATCH;
/**
* Switcher function, HC to the special 64-bit RC.
*
* @param pVM Pointer to the VM.
* @param offCpumVCpu Offset from pVM->cpum to pVM->aCpus[idCpu].cpum.
* @returns Return code indicating the action to take.
*/
/** Pointer to switcher function. */
typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
/**
* HM VM Instance data.
* Changes to this must checked against the padding of the hm union in VM!
*/
typedef struct HM
{
/** Set when we've initialized VMX or SVM. */
bool fInitialized;
/** Set if nested paging is enabled. */
bool fNestedPaging;
/** Set if nested paging is allowed. */
bool fAllowNestedPaging;
/** Set if large pages are enabled (requires nested paging). */
bool fLargePages;
/** Set if we can support 64-bit guests or not. */
bool fAllow64BitGuests;
/** Set if an IO-APIC is configured for this VM. */
bool fHasIoApic;
/** Set when TPR patching is allowed. */
bool fTRPPatchingAllowed;
/** Set when we initialize VT-x or AMD-V once for all CPUs. */
bool fGlobalInit;
/** Set when TPR patching is active. */
bool fTPRPatchingActive;
bool u8Alignment[7];
/** Maximum ASID allowed. */
/** The maximum number of resumes loops allowed in ring-0 (safety precaution).
* This number is set much higher when RTThreadPreemptIsPending is reliable. */
/** Guest allocated memory for patching purposes. */
/** Current free pointer inside the patch block. */
/** Size of the guest patch memory block. */
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
/** 32 to 64 bits switcher entrypoint. */
#endif
struct
{
/** Set by the ring-0 side of HM to indicate VMX is supported by the
* CPU. */
bool fSupported;
/** Set when we've enabled VMX. */
bool fEnabled;
/** Set if VPID is supported. */
bool fVpid;
/** Set if VT-x VPID is allowed. */
bool fAllowVpid;
/** Set if unrestricted guest execution is in use (real and protected mode without paging). */
bool fUnrestrictedGuest;
/** Set if unrestricted guest execution is allowed to be used. */
bool fAllowUnrestricted;
/** Whether we're using the preemption timer or not. */
bool fUsePreemptTimer;
/** The shift mask employed by the VMX-Preemption timer. */
/** Virtual address of the TSS page used for real mode emulation. */
/** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
/** R0 memory object for the APIC-access page. */
/** Physical address of the APIC-access page. */
/** Virtual address of the APIC-access page. */
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
#endif
#ifndef VBOX_WITH_OLD_VTX_CODE
unsigned uFlushTaggedTlb;
#else
/** Ring 0 handlers for VT-x. */
#endif
#endif
/** Host CR4 value (set by ring-0 VMX init) */
/** Host EFER value (set by ring-0 VMX init) */
/** VMX MSR values */
struct
{
} msr;
/** Flush types for invept & invvpid; they depend on capabilities. */
} vmx;
struct
{
/** Set by the ring-0 side of HM to indicate SVM is supported by the
* CPU. */
bool fSupported;
/** Set when we've enabled SVM. */
bool fEnabled;
/** Set if erratum 170 affects the AMD cpu. */
bool fAlwaysFlushTLB;
/** Set when the hack to ignore VERR_SVM_IN_USE is active. */
bool fIgnoreInUseError;
/** R0 memory object for the IO bitmap (12kb). */
/** Physical address of the IO bitmap (12kb). */
/** Virtual address of the IO bitmap. */
R0PTRTYPE(void *) pvIOBitmap;
/* HWCR MSR (for diagnostics) */
/** SVM revision. */
/** SVM feature bits from cpuid 0x8000000a */
} svm;
/**
* AVL tree with all patches (active or disabled) sorted by guest instruction address
*/
struct
{
} cpuid;
/** Saved error from detection */
/** HMR0Init was run */
bool fHMR0Init;
bool u8Alignment1[7];
} HM;
/** Pointer to HM VM instance data. */
/* Maximum number of cached entries. */
#define VMCSCACHE_MAX_ENTRY 128
/* Structure for storing read and write VMCS actions. */
typedef struct VMCSCACHE
{
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
/* Magic marker for searching in crash dumps. */
#endif
/* CR2 is saved here for EPT syncing. */
struct
{
} Write;
struct
{
} Read;
#ifdef VBOX_STRICT
struct
{
} TestIn;
struct
{
} TestOut;
struct
{
} ScratchPad;
#endif
} VMCSCACHE;
/** Pointer to VMCSCACHE. */
typedef VMCSCACHE *PVMCSCACHE;
/** VMX StartVM function. */
typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
/** Pointer to a VMX StartVM function. */
/** SVM VMRun function. */
typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
/** Pointer to a SVM VMRun function. */
/**
* HM VMCPU Instance data.
*/
typedef struct HMCPU
{
/** Set if we don't have to flush the TLB on VM entry. */
bool fResumeVM;
/** Set if we need to flush the TLB during the world switch. */
bool fForceTLBFlush;
/** Set when we're using VT-x or AMD-V at that moment. */
bool fActive;
/** Set when the TLB has been checked until we return from the world switch. */
volatile bool fCheckedTLBFlush;
/** World switch exit counter. */
volatile uint32_t cWorldSwitchExits;
/** HM_CHANGED_* flags. */
/** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
/** TLB flush count */
/** Current ASID in use by the VM */
/** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
struct
{
/** Physical address of the VM control structure (VMCS). */
/** R0 memory object for the VM control structure (VMCS). */
/** Virtual address of the VM control structure (VMCS). */
/** Ring 0 handlers for VT-x. */
#if HC_ARCH_BITS == 32
#endif
/** Current VMX_VMCS32_CTRL_PIN_EXEC. */
/** Current VMX_VMCS32_CTRL_PROC_EXEC. */
/** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
/** Current VMX_VMCS32_CTRL_EXIT. */
/** Current VMX_VMCS32_CTRL_ENTRY. */
/** Physical address of the virtual APIC page for TPR caching. */
/** R0 memory object for the virtual APIC page for TPR caching. */
/** Virtual address of the virtual APIC page for TPR caching. */
#if HC_ARCH_BITS == 32
#endif
/** Current CR0 mask. */
/** Current CR4 mask. */
/** Current exception bitmap. */
/** The updated-guest-state mask. */
/** Current EPTP. */
/** Physical address of the MSR bitmap. */
/** R0 memory object for the MSR bitmap. */
/** Virtual address of the MSR bitmap. */
R0PTRTYPE(void *) pvMsrBitmap;
/** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
* for guest MSRs). */
/** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
* (used for guest MSRs). */
/** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
* for guest MSRs). */
R0PTRTYPE(void *) pvGuestMsr;
/** Physical address of the VM-exit MSR-load area (used for host MSRs). */
/** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
/** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
/** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
/** Last use TSC offset value. (cached) */
/** VMCS cache. */
/** Real-mode emulation state. */
struct
{
} RealMode;
struct
{
} lasterror;
#ifdef VBOX_WITH_OLD_VTX_CODE
/** The last seen guest paging mode (by VT-x). */
/** Current guest paging mode (as seen by HMR3PagingModeChanged). */
/** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
#else
/** Which host-state bits to restore before being preempted. */
/** The host-state restoration structure. */
/** Set if guest was executing in real mode (extra checks). */
bool fWasInRealMode;
#endif
} vmx;
struct
{
/** R0 memory object for the host VMCB which holds additional host-state. */
/** Physical address of the host VMCB which holds additional host-state. */
/** Virtual address of the host VMCB which holds additional host-state. */
R0PTRTYPE(void *) pvVmcbHost;
/** R0 memory object for the guest VMCB. */
/** Physical address of the guest VMCB. */
/** Virtual address of the guest VMCB. */
/** Ring 0 handlers for VT-x. */
/** R0 memory object for the MSR bitmap (8 KB). */
/** Physical address of the MSR bitmap (8 KB). */
/** Virtual address of the MSR bitmap. */
R0PTRTYPE(void *) pvMsrBitmap;
/** Whether VTPR with V_INTR_MASKING set is in effect, indicating
* we should check if the VTPR changed on every VM-exit. */
bool fSyncVTpr;
/** Alignment padding. */
} svm;
/** Event injection state. */
struct
{
} Event;
/** IO Block emulation state. */
struct
{
bool fEnabled;
/** RIP at the start of the io code we wish to emulate in the recompiler. */
struct
{
/** Pending IO operation type. */
union
{
struct
{
} Port;
} s;
} PendingIO;
/** The PAE PDPEs used with Nested Paging (only valid when
* VMCPU_FF_HM_UPDATE_PAE_PDPES is set). */
/** Current shadow paging mode. */
/** The CPU ID of the CPU currently owning the VMCS. Set in
* HMR0Enter and cleared in HMR0Leave. */
/** To keep track of pending TLB shootdown pages. (SMP guest only) */
struct
{
} TlbShootdown;
/** For saving stack space, the disassembler state is allocated here instead of
* on the stack. */
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
#endif
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
#endif
#ifdef VBOX_WITH_STATISTICS
#endif
#ifdef HM_PROFILE_EXIT_DISPATCH
#endif
} HMCPU;
/** Pointer to HM VM instance data. */
#ifdef IN_RING0
#ifdef VBOX_STRICT
#else
# define HMDumpRegs(a, b ,c) do { } while (0)
# define HMR0DumpDescriptor(a, b, c) do { } while (0)
#endif
# ifdef VBOX_WITH_KERNEL_USING_XMM
DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
# endif
# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
/**
* Gets 64-bit GDTR and IDTR on darwin.
* @param pGdtr Where to store the 64-bit GDTR.
* @param pIdtr Where to store the 64-bit IDTR.
*/
/**
* Gets 64-bit CR3 on darwin.
* @returns CR3
*/
# endif
#endif /* IN_RING0 */
/** @} */
#endif