HMSVMR0.cpp revision a5d217f05d3d5d1c0cb583890149af86564d074d
/* $Id$ */
/** @file
* HM SVM (AMD-V) - Host Context Ring-0.
*/
/*
* Copyright (C) 2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#ifdef DEBUG_ramshankar
# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
# define HMSVM_ALWAYS_TRAP_PF
#endif
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
#ifdef VBOX_WITH_STATISTICS
# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
if ((u64ExitCode) == SVM_EXIT_NPF) \
else \
} while (0)
#else
# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
#endif
/** If we decide to use a function table approach this can be useful to
* switch to a "static DECLCALLBACK(int)". */
#define HMSVM_EXIT_DECL static int
/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
*
* The CPU format of the segment attribute is described in X86DESCATTRBITS
* which is 16-bits (i.e. includes 4 bits of the segment limit).
*
* The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
* only the attribute bits and nothing else). Upper 4-bits are unused.
*
* @{ */
/** @} */
* @{ */
do \
{ \
} while (0)
do \
{ \
} while (0)
/** @} */
/** @name VMCB Clean Bits.
*
* These flags are used for VMCB-state caching. A set VMCB Clean Bit indicates
* AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
* memory.
*
* @{ */
/** All intercepts vectors, TSC offset, PAUSE filter counter. */
#define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0)
/** I/O permission bitmap, MSR permission bitmap. */
/** ASID. */
/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
V_INTR_VECTOR. */
/** Nested Paging: Nested CR3 (nCR3), PAT. */
/** Control registers (CR0, CR3, CR4, EFER). */
/** Debug registers (DR6, DR7). */
/** GDT, IDT limit and base. */
/** Segment register: CS, SS, DS, ES limit and base. */
/** CR2.*/
/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
/** Mask of all valid VMCB Clean bits. */
/** @} */
/** @name SVM transient.
*
* A state structure for holding miscellaneous information across AMD-V
* VMRUN/#VMEXIT operation, restored after the transition.
*
* @{ */
typedef struct SVMTRANSIENT
{
#if HC_ARCH_BITS == 32
#endif
/** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
/** The guest's TPR value used for TPR shadowing. */
/** @} */
/**
* MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
*/
typedef enum SVMMSREXITREAD
{
/** Reading this MSR causes a VM-exit. */
SVMMSREXIT_INTERCEPT_READ = 0xb,
/** Reading this MSR does not cause a VM-exit. */
/**
* MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
*/
typedef enum SVMMSREXITWRITE
{
/** Writing to this MSR causes a VM-exit. */
SVMMSREXIT_INTERCEPT_WRITE = 0xd,
/** Writing to this MSR does not cause a VM-exit. */
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Ring-0 memory object for the IO bitmap. */
/** Physical address of the IO bitmap. */
RTHCPHYS g_HCPhysIOBitmap = 0;
/** Virtual address of the IO bitmap. */
/**
* Sets up and activates AMD-V on the current CPU.
*
* @returns VBox status code.
* @param pCpu Pointer to the CPU info struct.
* @param pVM Pointer to the VM (can be NULL after a resume!).
* @param pvCpuPage Pointer to the global CPU page.
* @param HCPhysCpuPage Physical address of the global CPU page.
*/
VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
{
/*
* We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
*/
if (u64HostEfer & MSR_K6_EFER_SVME)
{
/* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
if ( pVM
{
pCpu->fIgnoreAMDVInUseError = true;
}
if (!pCpu->fIgnoreAMDVInUseError)
return VERR_SVM_IN_USE;
}
/* Turn on AMD-V in the EFER MSR. */
/* Write the physical page address where the CPU will store the host state while executing the VM. */
/*
* Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
* when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
* upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
* to flush the TLB with before using a new ASID.
*/
pCpu->fFlushAsidBeforeUse = true;
/*
* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
*/
++pCpu->cTlbFlushes;
return VINF_SUCCESS;
}
/**
* Deactivates AMD-V on the current CPU.
*
* @returns VBox status code.
* @param pCpu Pointer to the CPU info struct.
* @param pvCpuPage Pointer to the global CPU page.
* @param HCPhysCpuPage Physical address of the global CPU page.
*/
{
/* Turn off AMD-V in the EFER MSR if AMD-V is active. */
if (u64HostEfer & MSR_K6_EFER_SVME)
{
/* Invalidate host state physical address. */
}
return VINF_SUCCESS;
}
/**
* Does global AMD-V initialization (called during module initialization).
*
* @returns VBox status code.
*/
VMMR0DECL(int) SVMR0GlobalInit(void)
{
/*
* Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done
* once globally here instead of per-VM.
*/
if (RT_FAILURE(rc))
return rc;
/* Set all bits to intercept all IO accesses. */
}
/**
* Does global VT-x termination (called during module termination).
*/
VMMR0DECL(void) SVMR0GlobalTerm(void)
{
if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ)
{
g_pvIOBitmap = NULL;
g_HCPhysIOBitmap = 0;
}
}
/**
* Frees any allocated per-VCPU structures for a VM.
*
* @param pVM Pointer to the VM.
*/
{
{
{
}
{
}
{
}
}
}
/**
* Does per-VM AMD-V initialization.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
{
int rc = VERR_INTERNAL_ERROR_5;
/*
* Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
*/
{
Log4(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
}
/*
* Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
*/
{
}
{
/*
* Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
* FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
*/
rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */);
if (RT_FAILURE(rc))
goto failure_cleanup;
pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
/*
* Allocate one page for the guest-state VMCB.
*/
if (RT_FAILURE(rc))
goto failure_cleanup;
/*
* Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
* SVM to not require one.
*/
rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
if (RT_FAILURE(rc))
pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
/* Set all bits to intercept all MSR accesses (changed later on). */
}
return VINF_SUCCESS;
return rc;
}
/**
* Does per-VM AMD-V termination.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
{
return VINF_SUCCESS;
}
/**
* Sets the permission bits for the specified MSR in the MSRPM.
*
* @param pVCpu Pointer to the VMCPU.
* @param uMsr The MSR for which the access permissions are being set.
* @param enmRead MSR read permissions.
* @param enmWrite MSR write permissions.
*/
static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
{
unsigned ulBit;
/*
* Layout:
* Byte offset MSR range
* 0x000 - 0x7ff 0x00000000 - 0x00001fff
* 0x800 - 0xfff 0xc0000000 - 0xc0001fff
* 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
* 0x1800 - 0x1fff Reserved
*/
if (uMsr <= 0x00001FFF)
{
/* Pentium-compatible MSRs. */
}
else if ( uMsr >= 0xC0000000
&& uMsr <= 0xC0001FFF)
{
/* AMD Sixth Generation x86 Processor MSRs. */
pbMsrBitmap += 0x800;
}
else if ( uMsr >= 0xC0010000
&& uMsr <= 0xC0011FFF)
{
/* AMD Seventh and Eighth Generation Processor MSRs. */
pbMsrBitmap += 0x1000;
}
else
{
AssertFailed();
return;
}
if (enmRead == SVMMSREXIT_INTERCEPT_READ)
else
if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
else
}
/**
* Sets up AMD-V for the specified VM.
* This function is only called once per-VM during initalization.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
{
int rc = VINF_SUCCESS;
{
/* Trap exceptions unconditionally (debug purposes). */
#ifdef HMSVM_ALWAYS_TRAP_PF
#endif
#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
| RT_BIT(X86_XCPT_MF);
#endif
/* Set up unconditional intercepts and conditions. */
| SVM_CTRL1_INTERCEPT_VINTR /* When guest enables interrupts cause a VM-exit. */
| SVM_CTRL1_INTERCEPT_NMI /* Non-Maskable Interrupts causes a VM-exit. */
| SVM_CTRL1_INTERCEPT_SMI /* System Management Interrupt cause a VM-exit. */
| SVM_CTRL1_INTERCEPT_INIT /* INIT signal causes a VM-exit. */
| SVM_CTRL1_INTERCEPT_RDPMC /* RDPMC causes a VM-exit. */
| SVM_CTRL1_INTERCEPT_CPUID /* CPUID causes a VM-exit. */
| SVM_CTRL1_INTERCEPT_RSM /* RSM causes a VM-exit. */
| SVM_CTRL1_INTERCEPT_HLT /* HLT causes a VM-exit. */
| SVM_CTRL1_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO VM-exits. */
| SVM_CTRL1_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a VM-exit.*/
| SVM_CTRL1_INTERCEPT_INVLPGA /* INVLPGA causes a VM-exit. */
| SVM_CTRL1_INTERCEPT_SHUTDOWN /* Shutdown events causes a VM-exit. */
| SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Intercept "freezing" during legacy FPU handling. */
| SVM_CTRL2_INTERCEPT_VMMCALL /* VMMCALL causes a VM-exit. */
| SVM_CTRL2_INTERCEPT_VMLOAD /* VMLOAD causes a VM-exit. */
| SVM_CTRL2_INTERCEPT_VMSAVE /* VMSAVE causes a VM-exit. */
| SVM_CTRL2_INTERCEPT_STGI /* STGI causes a VM-exit. */
| SVM_CTRL2_INTERCEPT_CLGI /* CLGI causes a VM-exit. */
| SVM_CTRL2_INTERCEPT_SKINIT /* SKINIT causes a VM-exit. */
| SVM_CTRL2_INTERCEPT_WBINVD /* WBINVD causes a VM-exit. */
| SVM_CTRL2_INTERCEPT_MONITOR /* MONITOR causes a VM-exit. */
| SVM_CTRL2_INTERCEPT_MWAIT; /* MWAIT causes a VM-exit. */
/* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
/* CR0, CR4 writes must be intercepted for the same reasons as above. */
/* Intercept all DRx reads and writes by default. Changed later on. */
/* Ignore the priority in the TPR; we take into account the guest TPR anyway while delivering interrupts. */
/* Set IO and MSR bitmap permission bitmap physical addresses. */
/* No LBR virtualization. */
/* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from memory. */
/* The guest ASID MBNZ, set it to 1. The host uses 0. */
/*
* Setup the PAT MSR (applicable for Nested Paging only).
* The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
* so choose type 6 for all PAT slots.
*/
/* Without Nested Paging, we need additionally intercepts. */
{
/* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
/* Page faults must be intercepted to implement shadow paging. */
}
/*
*/
hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
}
return rc;
}
/**
* Flushes the appropriate tagged-TLB entries.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
{
/*
* Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
* This can happen both for start & resume due to long jumps back to ring-3.
* If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
* so we cannot reuse the ASIDs without flushing.
*/
bool fNewAsid = false;
{
fNewAsid = true;
}
/* Set TLB flush state as checked until we return from the world switch. */
/* Check for explicit TLB shootdowns. */
{
}
{
/*
* This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
*/
}
{
if (fNewAsid)
{
++pCpu->uCurrentAsid;
bool fHitASIDLimit = false;
{
fHitASIDLimit = true;
{
pCpu->fFlushAsidBeforeUse = true;
}
else
{
pCpu->fFlushAsidBeforeUse = false;
}
}
if ( !fHitASIDLimit
&& pCpu->fFlushAsidBeforeUse)
{
else
{
pCpu->fFlushAsidBeforeUse = false;
}
}
}
else
{
else
}
}
else
{
/** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
* not be executed. See hmQueueInvlPage() where it is commented
* out. Support individual entry flushing someday. */
{
/* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
}
}
/* Update VMCB with the ASID. */
{
}
("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
#ifdef VBOX_WITH_STATISTICS
{
}
else
#endif
}
/** @name 64-bit guest on 32-bit host OS helper functions.
*
* The host CPU is still 64-bit capable but the host OS is running in 32-bit
* bits for the 32->64 switcher.
*
* @{ */
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
/**
* Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
*
* @returns VBox status code.
* @param HCPhysVmcbHost Physical address of host VMCB.
* @param HCPhysVmcb Physical address of the VMCB.
* @param pCtx Pointer to the guest-CPU context.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
{
}
/**
* Executes the specified VMRUN handler in 64-bit mode.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
* @param enmOp The operation to perform.
* @param cbParam Number of parameters.
* @param paParam Array of 32-bit parameters.
*/
VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
{
/* Disable interrupts. */
#endif
for (int i = (int)cbParam - 1; i >= 0; i--)
/* Call the switcher. */
int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
/* Restore interrupts. */
return rc;
}
#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
/** @} */
{
{
}
}
{
#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
{
}
#endif
}
/**
* Loads the guest control registers (CR0, CR2, CR3, CR4) into the VMCB.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pVmcb Pointer to the VMCB.
* @param pCtx Pointer the guest-CPU context.
*
* @remarks No-long-jump zone!!!
*/
{
/*
* Guest CR0.
*/
{
/* Always enable caching. */
/*
* When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
*/
{
u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
}
/*
* Guest FPU bits.
*/
bool fInterceptNM = false;
bool fInterceptMF = false;
u64GuestCR0 |= X86_CR0_NE; /* Use internal x87 FPU exceptions handling rather than external interrupts. */
{
/* Catch floating point exceptions if we need to report them to the guest in a different way. */
if (!(u64GuestCR0 & X86_CR0_NE))
{
Log4(("hmR0SvmLoadGuestControlRegs: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
fInterceptMF = true;
}
}
else
{
fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
| X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
}
/*
* Update the exception intercept bitmap.
*/
if (fInterceptNM)
else
if (fInterceptMF)
else
}
/*
* Guest CR2.
*/
{
}
/*
* Guest CR3.
*/
{
{
#if HC_ARCH_BITS == 32
if (CPUMIsGuestInLongModeEx(pCtx))
else
#endif
}
else
}
/*
* Guest CR4.
*/
{
{
{
case PGMMODE_REAL:
case PGMMODE_PROTECTED: /* Protected mode, no paging. */
AssertFailed();
case PGMMODE_32_BIT: /* 32-bit paging. */
u64GuestCR4 &= ~X86_CR4_PAE;
break;
case PGMMODE_PAE: /* PAE paging. */
case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
/** Must use PAE paging as we could use physical memory > 4 GB */
break;
case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
#ifdef VBOX_ENABLE_64_BITS_GUESTS
break;
#else
AssertFailed();
#endif
default: /* shut up gcc */
AssertFailed();
}
}
}
return VINF_SUCCESS;
}
/**
* Loads the guest segment registers into the VMCB.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pVmcb Pointer to the VMCB.
* @param pCtx Pointer to the guest-CPU context.
*
* @remarks No-long-jump zone!!!
*/
{
/* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
{
}
/* Guest TR. */
{
}
/* Guest LDTR. */
{
}
/* Guest GDTR. */
{
}
/* Guest IDTR. */
{
}
}
/**
* Loads the guest MSRs into the VMCB.
*
* @param pVCpu Pointer to the VMCPU.
* @param pVmcb Pointer to the VMCB.
* @param pCtx Pointer to the guest-CPU context.
*
* @remarks No-long-jump zone!!!
*/
{
/* Guest Sysenter MSRs. */
/*
* Guest EFER MSR.
* AMD-V requires guest EFER.SVME to be set. Weird. .
* See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
*/
/* 64-bit MSRs. */
if (CPUMIsGuestInLongModeEx(pCtx))
{
}
else
{
/* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
}
* be writable in 32-bit mode. Clarify with AMD spec. */
}
/**
* Loads the guest debug registers into the VMCB.
*
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*
* @remarks No-long-jump zone!!!
* @remarks Requires EFLAGS to be up-to-date in the VMCB!
*/
{
return;
/** @todo Turn these into assertions if possible. */
/* Update DR6, DR7 with the guest values. */
bool fInterceptDB = false;
bool fInterceptMovDRx = false;
if (DBGFIsStepping(pVCpu))
{
/* AMD-V doesn't have any monitor-trap flag equivalent. Instead, enable tracing in the guest and trap #DB. */
fInterceptDB = true;
}
{
if (!CPUMIsHyperDebugStateActive(pVCpu))
{
/* Update DR6, DR7 with the hypervisor values. */
}
fInterceptMovDRx = true;
}
{
if (!CPUMIsGuestDebugStateActive(pVCpu))
{
}
Assert(fInterceptMovDRx == false);
}
else if (!CPUMIsGuestDebugStateActive(pVCpu))
{
/* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
fInterceptMovDRx = true;
}
if (fInterceptDB)
else
if (fInterceptMovDRx)
{
{
}
}
else
{
{
}
}
}
/**
* Loads the guest APIC state (currently just the TPR).
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pVmcb Pointer to the VMCB.
* @param pCtx Pointer to the guest-CPU context.
*/
{
return VINF_SUCCESS;
bool fPendingIntr;
/** Assume that we need to trap all TPR accesses and thus need not check on
* every #VMEXIT if we should update the TPR. */
/* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
{
/* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
if (fPendingIntr)
else
{
}
}
else
{
/* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
/* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
if (fPending)
else
{
}
}
return rc;
}
/**
* Sets up the appropriate function to run guest code.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*
* @remarks No-long-jump zone!!!
*/
{
if (CPUMIsGuestInLongModeEx(pCtx))
{
#ifndef VBOX_ENABLE_64_BITS_GUESTS
#endif
/* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
#else
/* 64-bit host or hybrid host. */
#endif
}
else
{
/* Guest is not in long mode, use the 32-bit handler. */
}
return VINF_SUCCESS;
}
/**
* Enters the AMD-V session.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCpu Pointer to the CPU info struct.
*/
{
/* Nothing to do here. */
return VINF_SUCCESS;
}
/**
* Leaves the AMD-V session.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
{
/* Nothing to do here. Everything is taken care of in hmR0SvmLongJmpToRing3(). */
return VINF_SUCCESS;
}
/**
* Saves the host state.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*
* @remarks No-long-jump zone!!!
*/
{
/* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
return VINF_SUCCESS;
}
/**
* Loads the guest state.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*
* @remarks No-long-jump zone!!!
*/
{
AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
/* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */
AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
/* Clear any unused and reserved bits. */
("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
return rc;
}
/**
* Saves the entire guest state from the VMCB into the
* guest-CPU context. Currently there is no residual state left in the CPU that
* is not updated in the VMCB.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*/
{
/*
* Guest interrupt shadow.
*/
else
/*
* Guest Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
*/
/*
* Guest MSRs.
*/
/*
* Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
*/
/*
* Correct the hidden CS granularity flag. Haven't seen it being wrong in any other
* register (yet).
*/
/** @todo Verify this. */
{
}
#ifdef VBOX_STRICT
# define HMSVM_ASSERT_SEL_GRANULARITY(reg) \
#endif
/*
* Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that
* and thus it's possible that when the CPL changes during guest execution that the SS DPL
* isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests.
* See AMD spec. 15.5.1 "Basic operation".
*/
/*
* Guest Descriptor-Table registers.
*/
/*
* Guest Debug registers.
*/
/*
* With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now.
* This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
*/
{
}
}
/**
* Does the necessary state syncing before doing a longjmp to ring-3.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
* @param rcExit The reason for exiting to ring-3. Can be
* VINF_VMM_UNKNOWN_RING3_CALL.
*
* @remarks No-long-jmp zone!!!
*/
{
/* Restore host FPU state if necessary and resync on next R0 reentry .*/
{
}
/* Restore host debug registers if necessary and resync on next R0 reentry. */
{
}
else if (CPUMIsHyperDebugStateActive(pVCpu))
{
}
}
/**
* VMMRZCallRing3() callback wrapper which saves the guest state (or restores
* any remaining host state) before we longjump to ring-3 and possibly get
* preempted.
*
* @param pVCpu Pointer to the VMCPU.
* @param enmOperation The operation causing the ring-3 longjump.
* @param pvUser The user argument (pointer to the possibly
* out-of-date guest-CPU context).
*
* @remarks Must never be called with @a enmOperation ==
* VMMCALLRING3_VM_R0_ASSERTION.
*/
{
/* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
Log4(("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));
}
/**
* An action requires us to go back to ring-3. This function does the necessary
* steps before we can safely return to ring-3. This is not the same as longjmps
* to ring-3, this is voluntary.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
* @param rcExit The reason for exiting to ring-3. Can be
* VINF_VMM_UNKNOWN_RING3_CALL.
*/
{
{
/* We don't need to do any syncing here, we're not going to come back to execute anything again. */
return;
}
/* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
/* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
{
}
/* Sync. the guest state. */
/* On our way back from ring-3 the following needs to be done. */
/** @todo This can change with preemption hooks. */
if (rcExit == VINF_EM_RAW_INTERRUPT)
else
}
/**
* Sets up the usage of TSC offsetting for the VCPU.
*
* @param pVCpu Pointer to the VMCPU.
*
* @remarks No-long-jump zone!!!
*/
{
{
{
}
else
{
}
}
else
{
}
}
/**
* Sets an event as a pending event to be injected into the guest.
*
* @param pVCpu Pointer to the VMCPU.
* @param pEvent Pointer to the SVM event.
* @param GCPtrFaultAddress The fault-address (CR2) in case it's a
* page-fault.
*/
DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
{
#ifdef VBOX_STRICT
if (GCPtrFaultAddress)
{
("hmR0SvmSetPendingEvent: Setting fault-address for non-#PF. u8Vector=%#x Type=%#RX32 GCPtrFaultAddr=%#RGx\n",
}
#endif
Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x ErrorCodeValid=%#x ErrorCode=%#RX32\n", pEvent->u,
pEvent->n.u8Vector, pEvent->n.u3Type, (uint8_t)pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
}
/**
* Injects an event into the guest upon VMRUN by updating the relevant field
* in the VMCB.
*
* @param pVCpu Pointer to the VMCPU.
* @param pVmcb Pointer to the guest VMCB.
* @param pCtx Pointer to the guest-CPU context.
* @param pEvent Pointer to the event.
*
* @remarks No-long-jump zone!!!
* @remarks Requires CR0!
*/
DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
{
}
/**
* Converts any TRPM trap into a pending SVM event. This is typically used when
* entering from ring-3 (not longjmp returns).
*
* @param pVCpu Pointer to the VMCPU.
*/
{
pEvent->u = 0;
/* Refer AMD spec. 15.20 "Event Injection" for the format. */
if (enmTrpmEvent == TRPM_TRAP)
{
switch (uVector)
{
case X86_XCPT_PF:
case X86_XCPT_DF:
case X86_XCPT_TS:
case X86_XCPT_NP:
case X86_XCPT_SS:
case X86_XCPT_GP:
case X86_XCPT_AC:
{
break;
}
}
}
else if (enmTrpmEvent == TRPM_HARDWARE_INT)
{
if (uVector == X86_XCPT_NMI)
else
}
else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
else
Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%#x uErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
}
/**
* Converts any pending SVM event into a TRPM trap. Typically used when leaving
* AMD-V to execute any instruction.
*
* @param pvCpu Pointer to the VMCPU.
*/
{
Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
switch (uVectorType)
{
case SVM_EVENT_NMI:
break;
case SVM_EVENT_SOFTWARE_INT:
break;
case SVM_EVENT_EXCEPTION:
break;
default:
break;
}
if (pEvent->n.u1ErrorCodeValid)
if ( uVectorType == SVM_EVENT_EXCEPTION
&& uVector == X86_XCPT_PF)
{
}
else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
{
}
}
/**
* Gets the guest's interrupt-shadow.
*
* @returns The guest's interrupt-shadow.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*
* @remarks No-long-jump zone!!!
* @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
*/
{
/*
* Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
* inhibit interrupts or clear any existing interrupt-inhibition.
*/
uint32_t uIntrState = 0;
{
{
/*
* We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
* AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct.
*/
}
else
}
return uIntrState;
}
/**
* Sets the virtual interrupt intercept control in the VMCB which
* instructs AMD-V to cause a #VMEXIT as soon as the guest is in a state to
* receive interrupts.
*
* @param pVmcb Pointer to the VMCB.
*/
{
{
pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */
}
}
/**
* Injects any pending events into the guest if the guest is in a state to
* receive them.
*
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
{
Event.u = 0;
{
bool fInject = true;
if ( fIntShadow
{
fInject = false;
}
if ( fInject
{
}
else
} /** @todo SMI. SMIs take priority over NMIs. */
else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
{
if (!fIntShadow)
{
Log4(("Injecting NMI\n"));
}
else
}
{
/* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
if ( !fBlockInt
&& !fIntShadow)
{
if (RT_SUCCESS(rc))
{
}
else
{
/** @todo Does this actually happen? If not turn it into an assertion. */
}
}
else
}
/* Update the guest interrupt shadow in the VMCB. */
}
/**
* Reports world-switch error and dumps some useful debug info.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param rcVMRun The return code from VMRUN (or
* VERR_SVM_INVALID_GUEST_STATE for invalid
* guest-state).
* @param pCtx Pointer to the guest-CPU context.
*/
{
if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
{
#ifdef VBOX_STRICT
#endif
}
else
}
/**
* Check per-VM and per-VCPU force flag actions that require us to go back to
* ring-3 for one reason or another.
*
* @returns VBox status code (information status code included).
* @retval VINF_SUCCESS if we don't have any actions that require going back to
* ring-3.
* @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
* @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
* interrupts)
* @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
* all EMTs to be in ring-3.
* @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
* @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
* to the EM loop.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
{
if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
|| VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
{
/* Pending HM CR3 sync. No PAE PDPEs (VMCPU_FF_HM_UPDATE_PAE_PDPES) on AMD-V. */
{
}
/* Pending PGM C3 sync. */
{
rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
if (rc != VINF_SUCCESS)
{
return rc;
}
}
/* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
/* -XXX- what was that about single stepping? */
{
rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
return rc;
}
/* Pending VM request packets, such as hardware interrupts. */
{
Log4(("hmR0SvmCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
return VINF_EM_PENDING_REQUEST;
}
/* Pending PGM pool flushes. */
{
Log4(("hmR0SvmCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
return VINF_PGM_POOL_FLUSH_PENDING;
}
/* Pending DMA requests. */
{
Log4(("hmR0SvmCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
return VINF_EM_RAW_TO_R3;
}
}
/* Paranoia. */
return VINF_SUCCESS;
}
/**
* Does the preparations before executing guest code in AMD-V.
*
* This may cause longjmps to ring-3 and may even result in rescheduling to the
* recompiler. We must be cautious what we do here regarding committing
* guest-state information into the the VMCB assuming we assuredly execute the
* guest in AMD-V. If we fall back to the recompiler after updating the VMCB and
* clearing the common-state (TRPM/forceflags), we must undo those changes so
* that the recompiler can (and should) use them when it resumes guest
* execution. Otherwise such operations must be done when we can no longer
* exit to ring-3.
*
* @returns VBox status code (informational status codes included).
* @retval VINF_SUCCESS if we can proceed with running the guest.
* @retval VINF_* scheduling changes, we have to go back to ring-3.
*
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
* @param pSvmTransient Pointer to the SVM transient structure.
*/
{
/* Check force flag actions that might require us to go back to ring-3. */
if (rc != VINF_SUCCESS)
return rc;
/* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
{
/* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
return VINF_EM_RAW_INTERRUPT;
}
#endif
/* Convert any pending TRPM traps to HM events for injection. */
/** @todo Optimization: move this before disabling interrupts, restore state
* using pVmcb->ctrl.EventInject.u. */
if (TRPMHasTrap(pVCpu))
return VINF_SUCCESS;
}
/**
* Prepares to run guest code in VT-x and we've committed to doing so. This
* means there is no backing out to ring-3 or anywhere else at this
* point.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
* @param pSvmTransient Pointer to the SVM transient structure.
*
* @remarks Called with preemption disabled.
* @remarks No-long-jump zone!!!
*/
DECLINLINE(void) hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
{
/** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
#endif
/*
* Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
* We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.
*/
/** @todo The above assumption could be wrong. It's not documented what
* should be done wrt to the VMCB Clean Bit, but we'll find out the
* hard way. */
/* Load the guest state. */
AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
/*
* If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
* so we can update it on the way back if the guest changed the TPR.
*/
{
else
}
/* Flush the appropriate tagged-TLB entries. */
ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
to start executing. */
/*
* Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
* RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
*
* This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
*/
{
uint64_t u64GuestTscAux = 0;
}
}
/**
* Wrapper for running the guest code in AMD-V.
*
* @returns VBox strict status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*
* @remarks No-long-jump zone!!!
*/
{
/*
* 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
* using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
* Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
*/
#ifdef VBOX_WITH_KERNEL_USING_XMM
return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
#else
return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
#endif
}
/**
* Performs some essential restoration of state after running guest code in
* AMD-V.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
* @param pSvmTransient Pointer to the SVM transient structure.
* @param rcVMRun Return code of VMRUN.
*
* @remarks Called with interrupts disabled.
* @remarks No-long-jump zone!!! This function will however re-enable longjmps
* unconditionally when it is safe to do so.
*/
DECLINLINE(void) hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, rcVMRun)
{
ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
/* Restore host's TSC_AUX if required. */
{
/** @todo Find a way to fix hardcoding a guestimate. */
}
/* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
{
return;
}
hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */
{
{
/* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
{
}
{
}
}
/* -XXX- premature interruption during event injection */
}
}
/**
* Runs the guest code using AMD-V.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
{
int rc = VERR_INTERNAL_ERROR_5;
for (;; cLoops++)
{
Assert(!HMR0SuspendPending());
/* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
if (rc != VINF_SUCCESS)
break;
/*
* No longjmps to ring-3 from this point on!!!
* Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
* This also disables flushing of the R0-logger instance (if any).
*/
/*
* Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
* This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
*/
|| SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for errors due to invalid guest state. */
{
if (rc == VINF_SUCCESS);
return rc;
}
/* Handle the #VMEXIT. */
if (rc != VINF_SUCCESS)
break;
{
break;
}
}
if (rc == VERR_EM_INTERPRETER)
else if (rc == VINF_EM_RESET)
return rc;
}
/**
* Handles a #VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
*
* @returns VBox status code (informational status codes included).
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
* @param pSvmTransient Pointer to the SVM transient structure.
*/
{
int rc;
switch (pSvmTransient->u64ExitCode)
{
case SVM_EXIT_CPUID:
case SVM_EXIT_RDTSC:
case SVM_EXIT_RDTSCP:
case SVM_EXIT_MONITOR:
case SVM_EXIT_MWAIT:
case SVM_EXIT_WRITE_CR0:
case SVM_EXIT_WRITE_CR3:
case SVM_EXIT_WRITE_CR4:
case SVM_EXIT_WRITE_CR8:
case SVM_EXIT_READ_CR0:
case SVM_EXIT_READ_CR3:
case SVM_EXIT_READ_CR4:
case SVM_EXIT_MSR:
case SVM_EXIT_INTR:
case SVM_EXIT_FERR_FREEZE:
case SVM_EXIT_NMI:
case SVM_EXIT_INIT:
case SVM_EXIT_WBINVD:
case SVM_EXIT_INVD:
case SVM_EXIT_RDPMC:
case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:
case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
default:
{
case SVM_EXIT_INVLPGA:
case SVM_EXIT_RSM:
case SVM_EXIT_VMRUN:
case SVM_EXIT_VMLOAD:
case SVM_EXIT_VMSAVE:
case SVM_EXIT_STGI:
case SVM_EXIT_CLGI:
case SVM_EXIT_SKINIT:
default:
{
break;
}
}
}
return rc;
}
#ifdef DEBUG
# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
# define HMSVM_ASSERT_PREEMPT_CPUID() \
do \
{ \
RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
} while (0)
# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
do { \
Assert(ASMIntAreEnabled()); \
if (VMMR0IsLogFlushDisabled(pVCpu)) \
} while (0)
#else /* Release builds */
# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0)
#endif
/**
* Worker for hmR0SvmInterpretInvlpg().
*
* @return VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pCpu Pointer to the disassembler state.
* @param pRegFrame Pointer to the register frame.
*/
{
if (RT_FAILURE(rc))
return VERR_EM_INTERPRETER;
{
return VERR_EM_INTERPRETER;
}
else
{
}
return rc;
}
/**
* Interprets INVLPG.
*
* @returns VBox status code.
* @retval VINF_* Scheduling instructions.
* @retval VERR_EM_INTERPRETER Something we can't cope with.
* @retval VERR_* Fatal errors.
*
* @param pVM Pointer to the VM.
* @param pRegFrame Pointer to the register frame.
*
* @remarks Updates the RIP if the instruction was executed successfully.
*/
{
/* Only allow 32 & 64 bit code. */
{
if ( RT_SUCCESS(rc)
{
if (RT_SUCCESS(rc))
return rc;
}
else
Log4(("hmR0SvmInterpretInvlpg: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
}
return VERR_EM_INTERPRETER;
}
/**
* Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
*
* @param pVCpu Pointer to the VMCPU.
*/
{
Event.u = 0;
}
/**
* Sets an debug (#DB) exception as pending-for-injection into the VM.
*
* @param pVCpu Pointer to the VMCPU.
*/
{
Event.u = 0;
}
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/**
* #VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT
* signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT).
*/
{
/* 32-bit Windows hosts (4 cores) has trouble with this on Intel; causes higher interrupt latency. Assuming the
same for AMD-V.*/
return VINF_SUCCESS;
#else
return VINF_EM_RAW_INTERRUPT;
#endif
}
/**
* #VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional #VMEXIT.
*/
{
return VINF_SUCCESS;
}
/**
* #VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional #VMEXIT.
*/
{
return VINF_SUCCESS;
}
/**
* #VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional #VMEXIT.
*/
{
else
{
}
return rc;
}
/**
* #VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional #VMEXIT.
*/
{
else
{
}
return rc;
}
/**
* #VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional #VMEXIT.
*/
{
else
{
}
return rc;
}
/**
* #VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional #VMEXIT.
*/
{
else
{
}
return rc;
}
/**
* #VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional #VMEXIT.
*/
{
/** @todo Decode Assist. */
return rc;
}
/**
* #VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional #VMEXIT.
*/
{
return rc;
}
/**
* #VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional #VMEXIT.
*/
{
else
{
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
}
return rc;
}
/**
* #VMEXIT handler for MWAIT (SVM_EXIT_MWAIT). Conditional #VMEXIT.
*/
{
if ( rc == VINF_EM_HALT
|| rc == VINF_SUCCESS)
{
if ( rc == VINF_EM_HALT
{
rc = VINF_SUCCESS;
}
}
else
{
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
}
("hmR0SvmExitMwait: EMInterpretMWait failed rc=%Rrc\n", rc));
return rc;
}
/**
* #VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN).
* Conditional #VMEXIT.
*/
{
return VINF_EM_RESET;
}
/**
* #VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional #VMEXIT.
*/
{
/** @todo Decode Assist. */
return rc;
}
/**
* #VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional #VMEXIT.
*/
{
/** @todo Decode Assist. */
if (rc == VINF_SUCCCES)
{
/* RIP has been updated by EMInterpretInstruction(). */
{
case 0: /* CR0. */
break;
case 3: /* CR3. */
break;
case 4: /* CR4. */
break;
case 8: /* CR8 (TPR). */
break;
default:
AsserMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n",
break;
}
}
else
return rc;
}
/**
* #VMEXIT handler for instructions that result in a #UD exception delivered to
* the guest.
*/
HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
{
return hmR0SvmSetPendingXcptUD(pVCpu);
}
/**
* #VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional #VMEXIT.
*/
{
int rc;
{
/* Handle TPR patching; intercepted LSTAR write. */
{
{
/* Our patch code uses LSTAR for TPR caching for 32-bit guests. */
}
return VINF_SUCCESS;
}
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc));
}
else
{
/* MSR Read access. */
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc));
}
/* RIP has been updated by EMInterpret[Rd|Wr]msr(). */
return rc;
}
/**
* #VMEXIT handler for DRx read (SVM_EXIT_READ_DRx). Conditional #VMEXIT.
*/
{
/* We should -not- get this VM-exit if the guest is debugging. */
{
return VERR_SVM_UNEXPECTED_EXIT;
}
if ( !DBGFIsStepping(pVCpu)
{
/* Don't intercept DRx read and writes. */
/* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
return VINF_SUCCESS;
}
/** @todo Decode assist. */
{
/* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
}
else
Assert(c == VERR_EM_INTERPRETER);
return rc;
}
/**
* #VMEXIT handler for DRx write (SVM_EXIT_WRITE_DRx). Conditional #VMEXIT.
*/
{
/* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
return rc;
}
/**
* #VMEXIT handler for I/O instructions (SVM_EXIT_IOIO). Conditional #VMEXIT.
*/
{
/* I/O operation lookup arrays. */
static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
/* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
if (RT_UNLIKELY(!uIOSize))
{
return VERR_EM_INTERPRETER;
}
int rc;
if (IoExitInfo.n.u1STR)
{
/** @todo Huh? why can't we use the segment prefix information given by AMD-V
* in EXITINFO1? Investigate once this thing is up and running. */
if (rc == VINF_SUCCESS)
{
{
}
else
{
}
}
else
}
else
{
{
if (rc == VINF_IOM_R3_IOPORT_WRITE)
HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize);
}
else
{
if (IOM_SUCCESS(rc))
{
}
else if (rc == VINF_IOM_R3_IOPORT_READ)
HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize);
}
}
if (IOM_SUCCESS(rc))
{
/* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */
{
/* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
{
/* I/O breakpoint length, in bytes. */
for (unsigned i = 0; i < 4; i++)
{
{
/* Clear all breakpoint status flags and set the one we just hit. */
/*
* Note: AMD64 Architecture Programmer's Manual 13.1:
* Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared
* by software after the contents have been read.
*/
/* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */
/* Paranoia. */
/* Inject the debug exception. */
break;
}
}
}
}
}
#ifdef DEBUG
if (rc == VINF_IOM_R3_IOPORT_READ)
else if (rc == VINF_IOM_R3_IOPORT_WRITE)
else
{
|| rc == VINF_SUCCESS
|| rc == VINF_EM_RAW_EMULATE_INSTR
|| rc == VINF_EM_RAW_GUEST_TRAP
}
#endif
return rc;
}