HMInternal.h revision 784b2c1767a5e96b6ac8987b3b85b9ddbca9b306
/* $Id$ */
/** @file
* HM - Internal header file.
*/
/*
* Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifndef ___HMInternal_h
#define ___HMInternal_h
#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined (VBOX_WITH_64_BITS_GUESTS)
/* Enable 64 bits guest support. */
# define VBOX_ENABLE_64_BITS_GUESTS
#endif
#define VMX_USE_CACHED_VMCS_ACCESSES
#define HM_VMX_EMULATE_REALMODE
* handle this MSR manually. See @bugref{6208}. This is clearly visible while
* booting Solaris 11 (11.1 b19) VMs with 2 Cpus.
*
* Note: don't forget to update the assembly files while modifying this!
*/
# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
/** @defgroup grp_hm_int Internal
* @ingroup grp_hm
* @internal
* @{
*/
/** Maximum number of exit reason statistics counters. */
#define MAX_EXITREASON_STAT 0x100
#define MASK_EXITREASON_STAT 0xff
#define MASK_INJECT_IRQ_STAT 0xff
/** @name Changed flags
* These flags are used to keep track of which important registers that
* have been changed since last they were reset.
* @{
*/
#define HM_CHANGED_GUEST_FPU RT_BIT(0)
#define HM_CHANGED_ALL ( HM_CHANGED_GUEST_SEGMENT_REGS \
#define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \
/** @} */
/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
#define HM_MAX_TLB_SHOOTDOWN_PAGES 8
/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
/** Total guest mapped memory needed. */
/** Enable for TPR guest patching. */
#define VBOX_HM_WITH_GUEST_PATCHING
/** HM SSM version
*/
#ifdef VBOX_HM_WITH_GUEST_PATCHING
# define HM_SSM_VERSION 5
# define HM_SSM_VERSION_NO_PATCHING 4
#else
# define HM_SSM_VERSION 4
# define HM_SSM_VERSION_NO_PATCHING 4
#endif
#define HM_SSM_VERSION_2_0_X 3
/**
* Global per-cpu information. (host)
*/
typedef struct HMGLOBLCPUINFO
{
/** The CPU ID. */
/** The memory object */
/** Current ASID (AMD-V) / VPID (Intel). */
/** TLB flush count. */
bool fFlushAsidBeforeUse;
/** Configured for VT-x or AMD-V. */
bool fConfigured;
/** Set if the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active. */
bool fIgnoreAMDVInUseError;
/** In use by our code. (for power suspend) */
volatile bool fInUse;
/** Pointer to the per-cpu global information. */
typedef HMGLOBLCPUINFO *PHMGLOBLCPUINFO;
typedef enum
{
HMPENDINGIO_INVALID = 0,
/** The usual 32-bit paranoia. */
HMPENDINGIO_32BIT_HACK = 0x7fffffff
} HMPENDINGIO;
typedef enum
{
/** The usual 32-bit paranoia. */
HMTPRINSTR_32BIT_HACK = 0x7fffffff
} HMTPRINSTR;
typedef struct
{
/** The key is the address of patched instruction. (32 bits GC ptr) */
/** Original opcode. */
/** Instruction size. */
/** Replacement opcode. */
/** Replacement instruction size. */
/** Instruction type. */
/** Source operand. */
/** Destination operand. */
/** Number of times the instruction caused a fault. */
/** Patch address of the jump replacement. */
} HMTPRPATCH;
/** Pointer to HMTPRPATCH. */
typedef HMTPRPATCH *PHMTPRPATCH;
/**
* Switcher function, HC to RC.
*
* @param pVM Pointer to the VM.
* @param uOffsetVMCPU VMCPU offset from pVM
* @returns Return code indicating the action to take.
*/
/** Pointer to switcher function. */
typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
/**
* HM VM Instance data.
* Changes to this must checked against the padding of the hm union in VM!
*/
typedef struct HM
{
/** Set when we've initialized VMX or SVM. */
bool fInitialized;
/** Set when hardware acceleration is allowed. */
bool fAllowed;
/** Set if nested paging is enabled. */
bool fNestedPaging;
/** Set if nested paging is allowed. */
bool fAllowNestedPaging;
/** Set if large pages are enabled (requires nested paging). */
bool fLargePages;
/** Set if we can support 64-bit guests or not. */
bool fAllow64BitGuests;
/** Set if an IO-APIC is configured for this VM. */
bool fHasIoApic;
/** Set when TPR patching is allowed. */
bool fTRPPatchingAllowed;
/** Set when we initialize VT-x or AMD-V once for all CPUs. */
bool fGlobalInit;
/** Set when TPR patching is active. */
bool fTPRPatchingActive;
bool u8Alignment[6];
/** And mask for copying register contents. */
/** Maximum ASID allowed. */
/** The maximum number of resumes loops allowed in ring-0 (safety precaution).
* This number is set much higher when RTThreadPreemptIsPending is reliable. */
/** Guest allocated memory for patching purposes. */
/** Current free pointer inside the patch block. */
/** Size of the guest patch memory block. */
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
/** 32 to 64 bits switcher entrypoint. */
/* AMD-V 64 bits vmrun handler */
/* VT-x 64 bits vmlaunch handler */
/* RC handler to setup the 64 bits FPU state. */
/* RC handler to setup the 64 bits debug state. */
/* Test handler */
/*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
uint32_t u32Alignment[1]; */
#endif
struct
{
/** Set by the ring-0 side of HM to indicate VMX is supported by the
* CPU. */
bool fSupported;
/** Set when we've enabled VMX. */
bool fEnabled;
/** Set if VPID is supported. */
bool fVpid;
/** Set if VT-x VPID is allowed. */
bool fAllowVpid;
/** Set if unrestricted guest execution is allowed (real and protected mode without paging). */
bool fUnrestrictedGuest;
/** Whether we're using the preemption timer or not. */
bool fUsePreemptTimer;
/** The shift mask employed by the VMX-Preemption timer. */
bool uAlignment[1];
/** Virtual address of the TSS page used for real mode emulation. */
/** Virtual address of the identity page table used for real mode and protected mode without paging emulation in EPT mode. */
/** R0 memory object for the APIC-access page. */
/** Physical address of the APIC-access page. */
/** Virtual address of the APIC-access page. */
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
#endif
/** Ring 0 handlers for VT-x. */
#endif
/** Host CR4 value (set by ring-0 VMX init) */
/** Host EFER value (set by ring-0 VMX init) */
/** VMX MSR values */
struct
{
} msr;
/** Flush types for invept & invvpid; they depend on capabilities. */
} vmx;
struct
{
/** Set by the ring-0 side of HM to indicate SVM is supported by the
* CPU. */
bool fSupported;
/** Set when we've enabled SVM. */
bool fEnabled;
/** Set if erratum 170 affects the AMD cpu. */
bool fAlwaysFlushTLB;
/** Set when the hack to ignore VERR_SVM_IN_USE is active. */
bool fIgnoreInUseError;
/** R0 memory object for the IO bitmap (12kb). */
/** Physical address of the IO bitmap (12kb). */
/** Virtual address of the IO bitmap. */
R0PTRTYPE(void *) pvIOBitmap;
/* HWCR MSR (for diagnostics) */
/** SVM revision. */
/** SVM feature bits from cpuid 0x8000000a */
} svm;
/**
* AVL tree with all patches (active or disabled) sorted by guest instruction address
*/
struct
{
} cpuid;
/** Saved error from detection */
/** HMR0Init was run */
bool fHMR0Init;
bool u8Alignment1[7];
} HM;
/** Pointer to HM VM instance data. */
/* Maximum number of cached entries. */
#define VMCSCACHE_MAX_ENTRY 128
/* Structure for storing read and write VMCS actions. */
typedef struct VMCSCACHE
{
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
/* Magic marker for searching in crash dumps. */
#endif
/* CR2 is saved here for EPT syncing. */
struct
{
} Write;
struct
{
} Read;
#ifdef DEBUG
struct
{
} TestIn;
struct
{
} TestOut;
struct
{
} ScratchPad;
#endif
} VMCSCACHE;
/** Pointer to VMCSCACHE. */
typedef VMCSCACHE *PVMCSCACHE;
/** VMX StartVM function. */
typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
/** Pointer to a VMX StartVM function. */
/** SVM VMRun function. */
typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
/** Pointer to a SVM VMRun function. */
/**
* HM VMCPU Instance data.
*/
typedef struct HMCPU
{
/** Old style FPU reporting trap mask override performed (optimization) */
bool fFPUOldStyleOverride;
/** Set if we don't have to flush the TLB on VM entry. */
bool fResumeVM;
/** Set if we need to flush the TLB during the world switch. */
bool fForceTLBFlush;
/** Set when we're using VT-x or AMD-V at that moment. */
bool fActive;
/** Set when the TLB has been checked until we return from the world switch. */
volatile bool fCheckedTLBFlush;
/** World switch exit counter. */
volatile uint32_t cWorldSwitchExits;
/** HM_CHANGED_* flags. */
/** Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
/** TLB flush count */
/** Current ASID in use by the VM */
/* Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
struct
{
/** Physical address of the VM control structure (VMCS). */
/** R0 memory object for the VM control structure (VMCS). */
/** Virtual address of the VM control structure (VMCS). */
/** Ring 0 handlers for VT-x. */
#if HC_ARCH_BITS == 32
#endif
/** Current VMX_VMCS_CTRL_PROC_EXEC_CONTROLS. */
/** Current VMX_VMCS_CTRL_PROC_EXEC2_CONTROLS. */
/** Physical address of the virtual APIC page for TPR caching. */
/** R0 memory object for the virtual APIC page for TPR caching. */
/** Virtual address of the virtual APIC page for TPR caching. */
/** Current CR0 mask. */
/** Current CR4 mask. */
/** Current EPTP. */
/** Physical address of the MSR bitmap. */
/** R0 memory object for the MSR bitmap. */
/** Virtual address of the MSR bitmap. */
R0PTRTYPE(void *) pvMsrBitmap;
/** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
* for guest MSRs). */
/** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
* (used for guest MSRs). */
/** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
* for guest MSRs). */
R0PTRTYPE(void *) pvGuestMsr;
/** Physical address of the VM-exit MSR-load area (used for host MSRs). */
/** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
/** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
/* Last use TSC offset value. (cached) */
/** VMCS cache. */
/** Real-mode emulation state. */
struct
{
} RealMode;
struct
{
} lasterror;
/** The last seen guest paging mode (by VT-x). */
/** Current guest paging mode (as seen by HMR3PagingModeChanged). */
/** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
} vmx;
struct
{
/** R0 memory object for the host VM control block (VMCB). */
/** Physical address of the host VM control block (VMCB). */
/** Virtual address of the host VM control block (VMCB). */
R0PTRTYPE(void *) pvVMCBHost;
/** R0 memory object for the VM control block (VMCB). */
/** Physical address of the VM control block (VMCB). */
/** Virtual address of the VM control block (VMCB). */
/** Ring 0 handlers for VT-x. */
/** R0 memory object for the MSR bitmap (8kb). */
/** Physical address of the MSR bitmap (8kb). */
/** Virtual address of the MSR bitmap. */
R0PTRTYPE(void *) pvMsrBitmap;
} svm;
/** Event injection state. */
struct
{
} Event;
/** IO Block emulation state. */
struct
{
bool fEnabled;
/** RIP at the start of the io code we wish to emulate in the recompiler. */
struct
{
/* Pending IO operation type. */
union
{
struct
{
unsigned uPort;
unsigned uAndVal;
unsigned cbSize;
} Port;
} s;
} PendingIO;
/** Currently shadow paging mode. */
/** The CPU ID of the CPU currently owning the VMCS. Set in
* HMR0Enter and cleared in HMR0Leave. */
/** To keep track of pending TLB shootdown pages. (SMP guest only) */
struct
{
unsigned cPages;
} TlbShootdown;
/** For saving stack space, the disassembler state is allocated here instead of
* on the stack. */
#if 1 /* temporary for tracking down darwin issues. */
#endif
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
#endif
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
#endif
#ifdef VBOX_WITH_STATISTICS
#endif
} HMCPU;
/** Pointer to HM VM instance data. */
#ifdef IN_RING0
#ifdef VBOX_STRICT
#else
# define HMDumpRegs(a, b ,c) do { } while (0)
# define HMR0DumpDescriptor(a, b, c) do { } while (0)
#endif
# ifdef VBOX_WITH_KERNEL_USING_XMM
DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
# endif
# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
/**
* Gets 64-bit GDTR and IDTR on darwin.
* @param pGdtr Where to store the 64-bit GDTR.
* @param pIdtr Where to store the 64-bit IDTR.
*/
/**
* Gets 64-bit CR3 on darwin.
* @returns CR3
*/
# endif
#endif /* IN_RING0 */
/** @} */
#endif