HMSVMR0.cpp revision 2f4030b34103977b721649e8f793f7f139788e5a
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * HM SVM (AMD-V) - Host Context Ring-0.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Copyright (C) 2013 Oracle Corporation
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * This file is part of VirtualBox Open Source Edition (OSE), as
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * available from http://www.virtualbox.org. This file is free software;
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * you can redistribute it and/or modify it under the terms of the GNU
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * General Public License (GPL) as published by the Free Software
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Foundation, in version 2 as it comes in the "COPYING" file of the
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/*******************************************************************************
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster* Header Files *
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster*******************************************************************************/
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/*******************************************************************************
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster* Defined Constants And Macros *
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster*******************************************************************************/
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[(u64ExitCode) & MASK_EXITREASON_STAT]); \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster# define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0)
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** If we decide to use a function table approach this can be useful to
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * switch to a "static DECLCALLBACK(int)". */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster#define HMSVM_EXIT_DECL static int
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * The CPU format of the segment attribute is described in X86DESCATTRBITS
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * which is 16-bits (i.e. includes 4 bits of the segment limit).
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * only the attribute bits and nothing else). Upper 4-bits are unused.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster#define HMSVM_CPU_2_VMCB_SEG_ATTR(a) (a & 0xff) | ((a & 0xf000) >> 4)
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster#define HMSVM_VMCB_2_CPU_SEG_ATTR(a) (a & 0xff) | ((a & 0x0f00) << 4)
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** @name Macros for loading, storing segment registers to/from the VMCB.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pVmcb->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pMixedCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pMixedCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pMixedCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pMixedCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pMixedCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster pMixedCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** @name Macro for checking and returning from the using function for
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * #VMEXIT intercepts that maybe caused during delivering of another
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * event in the guest. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster#define HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY() \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster int rc = hmR0SvmCheckExitDueToEventDelivery(pVCpu, pCtx, pSvmTransient); \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @name Exception bitmap mask for all contributory exceptions.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Page fault is deliberately excluded here as it's conditional whether it's
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * contributory or benign. It's handled separately.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster#define HMSVM_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** @name VMCB Clean Bits.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * These flags are used for VMCB-state caching. A set VMCB Clean Bit indicates
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** All intercepts vectors, TSC offset, PAUSE filter counter. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** I/O permission bitmap, MSR permission bitmap. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterV_INTR_VECTOR. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** Nested Paging: Nested CR3 (nCR3), PAT. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** Control registers (CR0, CR3, CR4, EFER). */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** Debug registers (DR6, DR7). */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** GDT, IDT limit and base. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** Segment register: CS, SS, DS, ES limit and base. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** Last-branch record (DbgCtlMsr, br_from, br_to, lastint_from, lastint_to) */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** AVIC (AVIC APIC_BAR; AVIC APIC_BACKING_PAGE, AVIC
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterPHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** Mask of all valid VMCB Clean bits. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster#define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/** @name SVM transient.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * A state structure for holding miscellaneous information across AMD-V
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * VMRUN/#VMEXIT operation, restored after the transition.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /** The host's rflags/eflags. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /** The guest's TPR value used for TPR shadowing. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /** Whether the #VMEXIT was caused by a page-fault during delivery of a
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * contributary exception or a page-fault. */
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major /** Reading this MSR causes a VM-exit. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /** Reading this MSR does not cause a VM-exit. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /** Writing to this MSR causes a VM-exit. */
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major /** Writing to this MSR does not cause a VM-exit. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster/*******************************************************************************
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster* Internal Functions *
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster*******************************************************************************/
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Fosterstatic void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Fosterstatic void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterHMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorHMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter MajorDECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major/*******************************************************************************
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major* Global Variables *
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major*******************************************************************************/
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major/** Ring-0 memory object for the IO bitmap. */
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major/** Physical address of the IO bitmap. */
c69d65cfa0d6a6aa71407ec342c0f677ad46ebe6Peter Major/** Virtual address of the IO bitmap. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Sets up and activates AMD-V on the current CPU.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @returns VBox status code.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @param pCpu Pointer to the CPU info struct.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @param pVM Pointer to the VM (can be NULL after a resume!).
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @param pvCpuPage Pointer to the global CPU page.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @param HCPhysCpuPage Physical address of the global CPU page.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterVMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * We must turn on AMD-V and setup the host state physical address, as those MSRs are per CPU.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /* Turn on AMD-V in the EFER MSR. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster ASMWrMsr(MSR_K6_EFER, u64HostEfer | MSR_K6_EFER_SVME);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /* Write the physical page address where the CPU will store the host state while executing the VM. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * to flush the TLB with before using a new ASID.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Deactivates AMD-V on the current CPU.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @returns VBox status code.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @param pCpu Pointer to the CPU info struct.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @param pvCpuPage Pointer to the global CPU page.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @param HCPhysCpuPage Physical address of the global CPU page.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan FosterVMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /* Turn off AMD-V in the EFER MSR if AMD-V is active. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster ASMWrMsr(MSR_K6_EFER, u64HostEfer & ~MSR_K6_EFER_SVME);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /* Invalidate host state physical address. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Does global AMD-V initialization (called during module initialization).
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @returns VBox status code.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * once globally here instead of per-VM.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster g_pvIOBitmap = RTR0MemObjAddress(g_hMemObjIOBitmap);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster g_HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjIOBitmap, 0 /* iPage */);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster /* Set all bits to intercept all IO accesses. */
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster ASMMemFill32(g_pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff));
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Does global AMD-V termination (called during module termination).
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster RTR0MemObjFree(g_hMemObjIOBitmap, false /* fFreeMappings */);
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * Frees any allocated per-VCPU structures for a VM.
a688bcbb4bcff5398fdd29b86f83450257dc0df4Allan Foster * @param pVM Pointer to the VM.
Log4(("SVMR0InitVM: AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
* Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
* FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */);
goto failure_cleanup;
pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
goto failure_cleanup;
* Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
goto failure_cleanup;
pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
unsigned ulBit;
AssertFailed();
#ifdef HMSVM_ALWAYS_TRAP_PF
#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
/* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
/* Ignore the priority in the TPR; we take into account the guest TPR anyway while delivering interrupts. */
/* Initially set all VMCB clean bits to 0 indicating that everything should be loaded from memory. */
hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
return rc;
if (!fFlushPending)
/* If we get a flush in 64-bit guest mode, then force a full TLB flush. INVLPGA takes only 32-bit addresses. */
return VINF_SUCCESS;
* Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
* If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
bool fNewAsid = false;
fNewAsid = true;
if (fNewAsid)
bool fHitASIDLimit = false;
fHitASIDLimit = true;
if ( !fHitASIDLimit
("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
#ifdef VBOX_WITH_STATISTICS
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
return rc;
#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
* When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
u64GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
bool fInterceptNM = false;
bool fInterceptMF = false;
u64GuestCR0 |= X86_CR0_NE; /* Use internal x87 FPU exceptions handling rather than external interrupts. */
fInterceptMF = true;
if (fInterceptNM)
if (fInterceptMF)
case PGMMODE_REAL:
AssertFailed();
#ifdef VBOX_ENABLE_64_BITS_GUESTS
AssertFailed();
AssertFailed();
return VINF_SUCCESS;
/* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
bool fInterceptDB = false;
bool fInterceptMovDRx = false;
/* AMD-V doesn't have any monitor-trap flag equivalent. Instead, enable tracing in the guest and trap #DB. */
fInterceptDB = true;
fInterceptMovDRx = true;
/* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
fInterceptMovDRx = true;
if (fInterceptDB)
if (fInterceptMovDRx)
return VINF_SUCCESS;
bool fPendingIntr;
/* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
if (fPendingIntr)
/* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
/* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
if (fPendingIntr)
return rc;
#ifndef VBOX_ENABLE_64_BITS_GUESTS
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
/* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */
AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
return rc;
* Guest Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
#ifdef VBOX_STRICT
* This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
#ifdef VBOX_STRICT
/* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
/* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
/* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
#ifdef VBOX_STRICT
if (GCPtrFaultAddress)
("hmR0SvmSetPendingEvent: Setting fault-address for non-#PF. u8Vector=%#x Type=%#RX32 GCPtrFaultAddr=%#RGx\n",
Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x ErrorCodeValid=%#x ErrorCode=%#RX32\n", pEvent->u,
pEvent->n.u8Vector, pEvent->n.u3Type, (uint8_t)pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
Event.u = 0;
switch (uVector)
case X86_XCPT_PF:
case X86_XCPT_DF:
case X86_XCPT_TS:
case X86_XCPT_NP:
case X86_XCPT_SS:
case X86_XCPT_GP:
case X86_XCPT_AC:
Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector,
Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
switch (uVectorType)
case SVM_EVENT_EXTERNAL_IRQ:
case SVM_EVENT_NMI:
case SVM_EVENT_SOFTWARE_INT:
case SVM_EVENT_EXCEPTION:
* Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
* We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
return uIntrState;
pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */
Event.u = 0;
bool fInject = true;
if ( fIntShadow
fInject = false;
if (fInject)
else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
if (!fIntShadow)
/* Check if there are guest external interrupts (PIC/APIC) pending and inject them, if the guest can receive them. */
if ( !fBlockInt
&& !fIntShadow)
#ifdef VBOX_STRICT
if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
|| VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
return rc;
int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
return rc;
return VINF_EM_PENDING_REQUEST;
return VINF_PGM_POOL_FLUSH_PENDING;
return VINF_EM_RAW_TO_R3;
return VINF_SUCCESS;
* clearing the common-state (TRPM/forceflags), we must undo those changes so
DECLINLINE(int) hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
return rc;
/* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
/* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
return VINF_EM_RAW_INTERRUPT;
return VINF_SUCCESS;
DECLINLINE(void) hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
/** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
* Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
* We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.
AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
/* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
* 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
* using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
* Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
#ifdef VBOX_WITH_KERNEL_USING_XMM
return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
DECLINLINE(void) hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */
hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */
/* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
for (;; cLoops++)
/* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
* Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
* Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
|| SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
return rc;
AssertMsg(SvmTransient.u64ExitCode != (uint64_t)SVM_EXIT_INVALID, ("%#x\n", SvmTransient.u64ExitCode));
return rc;
* The ordering of the case labels is based on most-frequently-occurring VM-exits for most guests under
case SVM_EXIT_NPF:
case SVM_EXIT_IOIO:
case SVM_EXIT_RDTSC:
case SVM_EXIT_RDTSCP:
case SVM_EXIT_CPUID:
case SVM_EXIT_MONITOR:
case SVM_EXIT_MWAIT:
case SVM_EXIT_READ_CR0:
case SVM_EXIT_READ_CR3:
case SVM_EXIT_READ_CR4:
case SVM_EXIT_WRITE_CR0:
case SVM_EXIT_WRITE_CR3:
case SVM_EXIT_WRITE_CR4:
case SVM_EXIT_WRITE_CR8:
case SVM_EXIT_VINTR:
case SVM_EXIT_INTR:
case SVM_EXIT_FERR_FREEZE:
case SVM_EXIT_NMI:
case SVM_EXIT_INIT:
case SVM_EXIT_MSR:
case SVM_EXIT_INVLPG:
case SVM_EXIT_WBINVD:
case SVM_EXIT_INVD:
case SVM_EXIT_RDPMC:
case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:
case SVM_EXIT_TASK_SWITCH:
case SVM_EXIT_VMMCALL:
case SVM_EXIT_INVLPGA:
case SVM_EXIT_RSM:
case SVM_EXIT_VMRUN:
case SVM_EXIT_VMLOAD:
case SVM_EXIT_VMSAVE:
case SVM_EXIT_STGI:
case SVM_EXIT_CLGI:
case SVM_EXIT_SKINIT:
#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
Event.u = 0;
case X86_XCPT_GP:
case X86_XCPT_BP:
case X86_XCPT_DE:
case X86_XCPT_UD:
case X86_XCPT_SS:
case X86_XCPT_NP:
Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
return VINF_SUCCESS;
return VERR_SVM_UNEXPECTED_EXIT;
#ifdef DEBUG
# define HMSVM_ASSERT_PREEMPT_CPUID_VAR() \
# define HMSVM_ASSERT_PREEMPT_CPUID() \
RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0)
return VERR_EM_INTERPRETER;
return VERR_EM_INTERPRETER;
return rc;
return rc;
Log4(("hmR0SvmInterpretInvlpg: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));
return VERR_EM_INTERPRETER;
Event.u = 0;
Event.u = 0;
DECLINLINE(void) hmR0SvmSetPendingXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t u32ErrCode, RTGCUINTPTR uFaultAddress)
Event.u = 0;
Event.u = 0;
Event.u = 0;
Event.u = 0;
bool fPending;
if (!pPatch)
case HMTPRINSTR_READ:
case HMTPRINSTR_WRITE_REG:
case HMTPRINSTR_WRITE_IMM:
AssertMsgFailedReturn(("Unexpected patch type %d\n", pPatch->enmType), VERR_SVM_UNEXPECTED_PATCH_TYPE);
return VINF_SUCCESS;
switch (uVector)
case X86_XCPT_GP:
case X86_XCPT_SS:
case X86_XCPT_NP:
case X86_XCPT_TS:
case X86_XCPT_DE:
static int hmR0SvmCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
/* See AMD spec. 15.7.3 "EXITINFO Pseudo-Code". The EXITINTINFO (if valid) contains the prior exception (IDT vector)
* that was trying to be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). */
uint8_t uExitVector = UINT8_MAX; /* Start off with an invalid vector, updated when it's valid. See below. */
Log4(("IDT: Pending vectoring #DF %#RX64 uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uExitVector));
* If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original
switch (enmReflect)
case SVMREFLECTXCPT_XCPT:
/* If uExitVector is #PF, CR2 value will be updated from the VMCB if it's a guest #PF. See hmR0SvmExitXcptPF(). */
Log4(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32\n", pVmcb->ctrl.ExitIntInfo.u,
case SVMREFLECTXCPT_DF:
Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
case SVMREFLECTXCPT_TF:
return rc;
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/* 32-bit Windows hosts (4 cores) has trouble with this on Intel; causes higher interrupt latency. Assuming the
return VINF_SUCCESS;
return VINF_EM_RAW_INTERRUPT;
return VINF_SUCCESS;
return VINF_SUCCESS;
return rc;
return rc;
return rc;
return rc;
return rc;
return rc;
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
return rc;
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
return rc;
return VINF_EM_RESET;
return rc;
AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n",
return rc;
HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
return VINF_SUCCESS;
int rc;
return VINF_SUCCESS;
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc));
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc));
return rc;
return VERR_SVM_UNEXPECTED_EXIT;
return rc;
/* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
return rc;
/* For now it's the same since we interpret the instruction anyway. Will change when using of Decode Assist is implemented. */
return rc;
static uint32_t const s_aIOSize[8] = { 0, 1, 2, 0, 4, 0, 0, 0 }; /* Size of the I/O accesses in bytes. */
static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving
/* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
return VERR_EM_INTERPRETER;
int rc;
VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize);
HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize);
#ifdef DEBUG
return rc;
/* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode));
#ifdef VBOX_HM_WITH_GUEST_PATCHING
|| (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */
if (!pPatch)
return VINF_EM_HM_PATCH_TPR_INSTR;
int rc;
VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr,
* of the page containing the instruction via the guest's page tables (we would invalidate the guest page
* in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
return rc;
rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
return rc;
pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 0; /* No virtual interrupts pending, we'll inject the current one before reentry. */
/* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
/* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEventVmcb() and resume guest execution. */
return VINF_SUCCESS;
Log4(("hmR0SvmExitTaskSwitch: TS occurred during event delivery. Kept pending u8Vector=%#x\n", Event.n.u8Vector));
return VERR_EM_INTERPRETER;
return VINF_SUCCESS;
Log4(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RGv ErrCode=%#x\n", pCtx->cs, (RTGCPTR)pCtx->rip, uFaultAddress,
u32ErrCode));
return VINF_SUCCESS;
#ifdef VBOX_HM_WITH_GUEST_PATCHING
if (!pPatch)
return VINF_EM_HM_PATCH_TPR_INSTR;
Log4(("#PF: uFaultAddress=%#RX64 cs:rip=%#04x:%#RX64 u32ErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel,
return rc;
return VINF_SUCCESS;
return rc;
#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
return VINF_SUCCESS;
return VINF_SUCCESS;
int rc;
return rc;
/* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases
DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */
Event.u = 0;
return rc;