HMVMXR0.cpp revision 99a9c374a950f1d39dc3aa49dc2e5e6d11520eae
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * HM VMX (Intel VT-x) - Host Context Ring-0.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Copyright (C) 2012-2013 Oracle Corporation
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * This file is part of VirtualBox Open Source Edition (OSE), as
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * available from http://www.virtualbox.org. This file is free software;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * you can redistribute it and/or modify it under the terms of the GNU
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * General Public License (GPL) as published by the Free Software
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Foundation, in version 2 as it comes in the "COPYING" file of the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*******************************************************************************
25cf1a301a396c38e8adf52c15f537b80d2483f7jl* Header Files *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl*******************************************************************************/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*******************************************************************************
25cf1a301a396c38e8adf52c15f537b80d2483f7jl* Defined Constants And Macros *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl*******************************************************************************/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl# define HMVMX_IS_64BIT_HOST_MODE() (true)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# define HMVMX_IS_64BIT_HOST_MODE() (false)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh/** Use the function table. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** This bit indicates the segment selector is unusable in VT-x. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** Determine which tagged-TLB flush handler to use. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/** Updated-guest-state flags. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Flags to skip redundant reads of some common VMCS fields that are not part of
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * the guest-CPU state but are in the transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Exception bitmap mask for real-mode guests (real-on-v86). We need to intercept all exceptions manually (except #PF).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * #NM is also handled spearetely, see hmR0VmxLoadGuestControlRegs(). #PF need not be intercepted even in real-mode if
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * we have Nested Paging support.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Exception bitmap mask for all contributory exceptions.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** Maximum VM-instruction error number. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*******************************************************************************
25cf1a301a396c38e8adf52c15f537b80d2483f7jl* Structures and Typedefs *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl*******************************************************************************/
193974072f41a843678abf5f61979c748687e66bSherry Moore * A state structure for holding miscellaneous information across
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMX non-root operation and restored after the transition.
25cf1a301a396c38e8adf52c15f537b80d2483f7jltypedef struct VMXTRANSIENT
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** The host's rflags/eflags. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The guest's TPR value used for TPR shadowing. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Alignment. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The basic VM-exit reason. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Alignment. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /** The VM-exit interruption error code. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** The VM-exit exit qualification. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Alignment. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The VM-exit interruption-information field. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The VM-exit instruction-length field. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Whether the VM-entry failed or not. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Alignment. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** The VM-entry interruption-information field. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The VM-entry exception error code field. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The VM-entry instruction length field. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** IDT-vectoring information field. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** IDT-vectoring error code. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Whether TSC-offsetting should be setup before VM-entry. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Whether the VM-exit was caused by a page-fault during delivery of a
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram * contributary exception or a page-fault. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuramAssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
c964b0e6c778331eb72036bb4607ce574c2500a2raghuramAssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
0cc8ae8667155d352d327b5c92b62899a7e05bcdavAssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram * MSR-bitmap read permissions.
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Reading this MSR causes a VM-exit. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Reading this MSR does not cause a VM-exit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * MSR-bitmap write permissions.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav /** Writing to this MSR causes a VM-exit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Writing to this MSR does not cause a VM-exit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/*******************************************************************************
0cc8ae8667155d352d327b5c92b62899a7e05bcdav* Internal Functions *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav*******************************************************************************/
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywstatic int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywDECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywstatic DECLCALLBACK(int) hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywstatic DECLCALLBACK(int) hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywstatic DECLCALLBACK(int) hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
601c2e1ed5ec8de33296fed3938598da99915e7adhainstatic DECLCALLBACK(int) hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic DECLCALLBACK(int) hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
1039f409262fcc394c002cfbadf60149156d2bcbavstatic DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*******************************************************************************
0cc8ae8667155d352d327b5c92b62899a7e05bcdav* Global Variables *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl*******************************************************************************/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VM-exit handler.
193974072f41a843678abf5f61979c748687e66bSherry Moore * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * fields before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX-transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jltypedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** Pointer to VM-exit handler. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_EXIT dispatch table.
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl#endif /* HMVMX_USE_FUNCTION_TABLE */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 16 */ "VM entry with invalid executive-VMCS pointer.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif /* VBOX_STRICT */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Updates the VM's last error record. If there was a VMX instruction error,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * reads the error data from the VMCS and updates VCPU's last error record as
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VERR_VMX_UNABLE_TO_START_VM or
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VERR_VMX_INVALID_VMCS_FIELD).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param rc The error code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Reads the VM-entry interruption-information field from the VMCS into the VMX
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVmxTransient Pointer to the VMX transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Reads the VM-entry exception error code field from the VMCS into
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * the VMX transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVmxTransient Pointer to the VMX transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the VM-entry exception error code field from the VMCS into
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the VM-exit interruption-information field from the VMCS into the VMX
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pVmxTransient Pointer to the VMX transient structure.
0b240fcdeb4772e65fed050aee3e3dc63308ae72whDECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Reads the VM-exit interruption error code from the VMCS into the VMX
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the VM-exit instruction length field from the VMCS into the VMX
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the exit qualification from the VMCS into the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * Reads the IDT-vectoring information field from the VMCS into the VMX
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the IDT-vectoring error code from the VMCS into the VMX
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
0b240fcdeb4772e65fed050aee3e3dc63308ae72whDECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Enters VMX root mode operation on the current CPU.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM (optional, can be NULL, after
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * a resume).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param HCPhysCpuPage Physical address of the VMXON region.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pvCpuPage Pointer to the VMXON region.
738dd1949fabecbe3a63d62def16a5d521e85911hywstatic int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
738dd1949fabecbe3a63d62def16a5d521e85911hyw AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Write the VMCS revision dword to the VMXON region. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Enable the VMX bit in CR4 if necessary. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Enter VMX root mode. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Exits VMX root mode operation on the current CPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxLeaveRootMode(void)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* If we're for some reason not in VMX root mode, then don't leave it. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Exit VMX root mode and clear the VMX bit in CR4 */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Allocates and maps one physically contiguous page. The allocated page is
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * zero'd out. (Used by various VT-x structures).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns IPRT status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMemObj Pointer to the ring-0 memory object.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param ppVirt Where to store the virtual address of the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * allocation.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pPhys Where to store the physical address of the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * allocation.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Frees and unmaps an allocated physical page.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMemObj Pointer to the ring-0 memory object.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param ppVirt Where to re-initialize the virtual address of
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * allocation as 0.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pHCPhys Where to re-initialize the physical address of the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * allocation as 0.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Worker function to free VT-x related structures.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns IPRT status code.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Worker function to allocate VT-x related VM structures.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @returns IPRT status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Initialize members up-front so we can cleanup properly on allocation failure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Allocate all the VT-x structures.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Initialize per-VCPU VT-x structures.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Allocate the VM control structure (VMCS). */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Allocate the Virtual-APIC page for transparent TPR accesses. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /* Allocate the VM-exit MSR-load page for the host MSRs. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * Does global VT-x initialization (called during module initialization).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Does global VT-x termination (called during module termination).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /* Nothing to do currently. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * Sets up and activates VT-x on the current CPU.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @returns VBox status code.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pCpu Pointer to the global CPU info struct.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pVM Pointer to the VM (can be NULL after a host resume
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * operation).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki * fEnabledByHost is true).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @a fEnabledByHost is true).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * enable VT-x/AMD-V on the host.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jlVMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * we can avoid an explicit flush while using new VPIDs. We would still need to flush
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * each time while reusing a VPID after hitting the MaxASID limit once.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav && (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav hmR0VmxFlushVpid(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Deactivates VT-x on the current CPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pCpu Pointer to the global CPU info struct.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pvCpuPage Pointer to the VMXON region.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param HCPhysCpuPage Physical address of the VMXON region.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlVMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Sets the permission bits for the specified MSR in the MSR bitmap.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param uMSR The MSR value.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param enmRead Whether reading this MSR causes a VM-exit.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param enmWrite Whether writing this MSR causes a VM-exit.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * 0x000 - 0x3ff - Low MSR read bits
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * 0x400 - 0x7ff - High MSR read bits
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * 0x800 - 0xbff - Low MSR write bits
056c948b50f079598d6121c0aeabf1de50fabd4etsien * 0xc00 - 0xfff - High MSR write bits
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Flushes the TLB using EPT.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVM Pointer to the VM.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param enmFlush Type of flush.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogFlowFunc(("pVM=%p pVCpu=%p enmFlush=%d\n", pVM, pVCpu, enmFlush));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu->hm.s.vmx.HCPhysEPTP, rc));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Flushes the TLB using VPID.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * enmFlush).
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param enmFlush Type of flush.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param GCPtr Virtual address of the page to flush (can be 0 depending
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * on @a enmFlush).
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Invalidates a guest page by guest virtual address. Only relevant for
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * EPT/VPID, otherwise there is nothing really to invalidate.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVCpu Pointer to the VMCPU.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param GCVirt Guest virtual address of the page to invalidate.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlVMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * See @bugref{6043} and @bugref{6177}.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * function maybe called in a loop with individual addresses.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * otherwise there is nothing really to invalidate.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @returns VBox status code.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param pVM Pointer to the VM.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param pVCpu Pointer to the VMCPU.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param GCPhys Guest physical address of the page to invalidate.
738dd1949fabecbe3a63d62def16a5d521e85911hywVMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
056c948b50f079598d6121c0aeabf1de50fabd4etsien * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
056c948b50f079598d6121c0aeabf1de50fabd4etsien * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
738dd1949fabecbe3a63d62def16a5d521e85911hyw * This function might be called in a loop.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
738dd1949fabecbe3a63d62def16a5d521e85911hyw * case where neither EPT nor VPID is supported by the CPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks Called with interrupts disabled.
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(void) hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @remarks All references to "ASID" in this function pertains to "VPID" in
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Intel's nomenclature. The reason is, to avoid confusion in compare
601c2e1ed5ec8de33296fed3938598da99915e7adhain * statements since the host-CPU copies are named "ASID".
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks Called with interrupts disabled.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic DECLCALLBACK(void) hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
25cf1a301a396c38e8adf52c15f537b80d2483f7jl "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * This can happen both for start & resume due to long jumps back to ring-3.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fNewASID = false;
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Check for explicit TLB shootdowns.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
738dd1949fabecbe3a63d62def16a5d521e85911hyw AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
601c2e1ed5ec8de33296fed3938598da99915e7adhain ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
601c2e1ed5ec8de33296fed3938598da99915e7adhain /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * not be executed. See hmQueueInvlPage() where it is commented
738dd1949fabecbe3a63d62def16a5d521e85911hyw * out. Support individual entry flushing someday. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
738dd1949fabecbe3a63d62def16a5d521e85911hyw * as supported by the CPU.
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
738dd1949fabecbe3a63d62def16a5d521e85911hyw STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VMCS with the VPID. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Flushes the tagged-TLB entries for EPT CPUs as necessary.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @returns VBox status code.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVM Pointer to the VM.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVCpu Pointer to the VMCPU.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @remarks Called with interrupts disabled.
601c2e1ed5ec8de33296fed3938598da99915e7adhainstatic DECLCALLBACK(void) hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * This can happen both for start & resume due to long jumps back to ring-3.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
601c2e1ed5ec8de33296fed3938598da99915e7adhain STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Check for explicit TLB shootdown flushes. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
601c2e1ed5ec8de33296fed3938598da99915e7adhain /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * not be executed. See hmQueueInvlPage() where it is commented
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out. Support individual entry flushing someday. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* We cannot flush individual entries without VPID support. Flush using EPT. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Flushes the tagged-TLB entries for VPID CPUs as necessary.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVM Pointer to the VM.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks Called with interrupts disabled.
601c2e1ed5ec8de33296fed3938598da99915e7adhainstatic DECLCALLBACK(void) hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * This can happen both for start & resume due to long jumps back to ring-3.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
601c2e1ed5ec8de33296fed3938598da99915e7adhain * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
601c2e1ed5ec8de33296fed3938598da99915e7adhain STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Check for explicit TLB shootdown flushes. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
601c2e1ed5ec8de33296fed3938598da99915e7adhain * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
601c2e1ed5ec8de33296fed3938598da99915e7adhain * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
601c2e1ed5ec8de33296fed3938598da99915e7adhain /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
601c2e1ed5ec8de33296fed3938598da99915e7adhain * not be executed. See hmQueueInvlPage() where it is commented
601c2e1ed5ec8de33296fed3938598da99915e7adhain * out. Support individual entry flushing someday. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Flushes the guest TLB entry based on CPU capabilities.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVCpu Pointer to the VMCPU.
601c2e1ed5ec8de33296fed3938598da99915e7adhain case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
601c2e1ed5ec8de33296fed3938598da99915e7adhain case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
601c2e1ed5ec8de33296fed3938598da99915e7adhain case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
601c2e1ed5ec8de33296fed3938598da99915e7adhain case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsgFailed(("Invalid flush-tag function identifier\n"));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * TLB entries from the host TLB before VM-entry.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @returns VBox status code.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVM Pointer to the VM.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Determine optimal flush type for nested paging.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
601c2e1ed5ec8de33296fed3938598da99915e7adhain * guest execution (see hmR3InitFinalizeR0()).
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
601c2e1ed5ec8de33296fed3938598da99915e7adhain else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Make sure the write-back cacheable memory type for EPT is supported. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Determine optimal flush type for VPID.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Setup the handler for flushing tagged-TLBs.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up pin-based VM-execution controls in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
601c2e1ed5ec8de33296fed3938598da99915e7adhain uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Enable the VMX preemption timer. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VCPU with the currently set pin-based VM-execution controls. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up processor-based VM-execution controls in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVMCPU Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT /* HLT causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We toggle VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT))
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT combo!"));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Use TPR shadowing if supported by the CPU. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* CR8 writes causes a VM-exit based on TPR threshold. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Use MSR-bitmaps if supported by the CPU. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1039f409262fcc394c002cfbadf60149156d2bcbav hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, val);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VCPU with the currently set processor-based VM-execution controls. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Secondary processor-based VM-execution controls.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT when INVPCID is executed by the guest.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * done dynamically. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
25cf1a301a396c38e8adf52c15f537b80d2483f7jl "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, val);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up miscellaneous (everything other than Pin & Processor-based
1039f409262fcc394c002cfbadf60149156d2bcbav * VM-execution) control fields in the VMCS.
1039f409262fcc394c002cfbadf60149156d2bcbav * @returns VBox status code.
1039f409262fcc394c002cfbadf60149156d2bcbav * @param pVM Pointer to the VM.
1039f409262fcc394c002cfbadf60149156d2bcbav * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * We thus use the exception bitmap to control it rather than use both.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** @todo Explore possibility of using IO-bitmaps. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* All IO & IOIO instructions cause VM-exits. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Setup MSR autoloading/autostoring. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Setup debug controls */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo think about this. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Sets up the initial exception bitmap in the VMCS based on static conditions
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * (i.e. conditions that cannot ever change at runtime).
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @returns VBox status code.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVM Pointer to the VM.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVCpu Pointer to the VMCPU.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Without nested paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Sets up the initial guest-state mask. The guest-state mask is consulted
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * before reading guest-state fields from the VMCS as VMREADs can be expensive
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * for the nested virtualization case (as it would cause a VM-exit).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Does per-VM VT-x initialization.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Does per-VM VT-x termination.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @returns VBox status code.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up the VM for execution under VT-x.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * This function is only called once per-VM during initalization.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* -XXX- change hmR3InitFinalizeR0() to fail if pRealModeTSS alloc fails. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Initialize these always, see hmR3InitFinalizeR0().*/
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Setup the tagged-TLB flush handlers. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set revision dword at the beginning of the VMCS structure. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Load this VMCS as the current VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
056c948b50f079598d6121c0aeabf1de50fabd4etsien AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * the host-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewhDECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Host Selector registers.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** @todo Verify if we have any platform that actually run with DS or ES with
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * RPL != 0 in kernel space. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Assertion is right but we would not have updated u32ExitCtls yet. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE))
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Write these host selector fields into the host-state area in the VMCS. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR);
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Host GDTR and IDTR.
601c2e1ed5ec8de33296fed3938598da99915e7adhain /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should
601c2e1ed5ec8de33296fed3938598da99915e7adhain * be safe to -not- save and restore GDTR and IDTR in the assembly
601c2e1ed5ec8de33296fed3938598da99915e7adhain * code and just do it here and don't care if the limits are zapped on
601c2e1ed5ec8de33296fed3938598da99915e7adhain * VM-exit. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit.TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We need the 64-bit TR base for hybrid darwin. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, u64TRBase);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * Host FS base and GS base.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which .
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * would take care of the bases. In 64-bit, the MSRs come into play.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_GS_BASE, u64GSBase);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * host-state area of the VMCS. Theses MSRs will be automatically restored on
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * the host after every successful VM exit.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @returns VBox status code.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pVM Pointer to the VM.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pVCpu Pointer to the VMCPU.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hywDECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Must match the EFER value in our 64 bits switcher. */
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Host Sysenter MSRs.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
738dd1949fabecbe3a63d62def16a5d521e85911hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
738dd1949fabecbe3a63d62def16a5d521e85911hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * hmR0VmxSetupExitCtls() !! */
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw * Sets up VM-entry controls in the VMCS. These controls can affect things done
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * controls".
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * The following should not be set (since we're not in SMM mode):
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * - VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * - VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** @todo VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR */
738dd1949fabecbe3a63d62def16a5d521e85911hyw LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, val);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Update VCPU with the currently set VM-exit controls. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up the VM-exit controls in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks requires EFER.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** @todo VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_PERF_MSR,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR,
738dd1949fabecbe3a63d62def16a5d521e85911hyw * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR,
738dd1949fabecbe3a63d62def16a5d521e85911hyw * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR. */
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
738dd1949fabecbe3a63d62def16a5d521e85911hyw val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VCPU with the currently set VM-exit controls. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Loads the guest APIC and related state.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fPendingIntr = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * If there are external interrupts pending but masked by the TPR value, apply the threshold so that if the guest
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * lowers the TPR, it would cause a VM-exit and we can deliver the interrupt.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * the interrupt when we VM-exit for other reasons.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t u32TprThreshold = fPendingIntr ? (u8GuestTpr >> 4) : 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* EFER always up-to-date. */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * @param pVCpu Pointer to the VMCPU.
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * @param pMixedCtx Pointer to the guest-CPU context. The data may be
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * out-of-sync. Make sure to update the required fields
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
56f33205c9ed776c3c909e07d52e94610a675740Jonathan AdamsDECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * inhibit interrupts or clear any existing interrupt-inhibition.
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VT-x the flag's condition to be cleared is met and thus the cleared state is correct.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Loads the guest's interruptibility-state into the guest-state area in the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param uIntrState The interruptibility-state to set.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Loads the guest's RIP into the guest-state area in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Loads the guest's RSP into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Loads the guest's RFLAGS into the guest-state area in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Let us assert it as such and use 32-bit VMWRITE. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uEFlags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.RealMode.eflags.u32 = uEFlags.u32; /* Save the original eflags of the real-mode guest. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest CR0.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest FPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* The guest's view (read access) of its CR0 is unblemished. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Setup VT-x's view of the guest CR0. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a VM-exit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest FPU bits.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
601c2e1ed5ec8de33296fed3938598da99915e7adhain bool fInterceptNM = false;
601c2e1ed5ec8de33296fed3938598da99915e7adhain fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
601c2e1ed5ec8de33296fed3938598da99915e7adhain We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Catch floating point exceptions if we need to report them to the guest in a different way. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh bool fInterceptMF = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Additional intercepts for debugging, define these yourself explicitly. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64CR0Mask);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest CR2.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * It's always loaded in the assembler code. Nothing to do here.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest CR3.
738dd1949fabecbe3a63d62def16a5d521e85911hyw /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl Log(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
68ac2337c38c8af06edcf32a72e42de36ec72a9djl /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
68ac2337c38c8af06edcf32a72e42de36ec72a9djl have Unrestricted Execution to handle the guest when it's not using paging. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * EPT takes care of translating it to host-physical addresses.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We obtain it here every time as the guest could have relocated this PCI region. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Non-nested paging case, just use the hypervisor's CR3. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("Load: VMX_VMCS_GUEST_CR3=%#RGv\n", GCPhysGuestCR3));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest CR4.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* The guest's view of its CR4 is unblemished. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Setup VT-x's view of the guest CR4. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Our identity mapping is a 32 bits page directory. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* else use guest CR4.*/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl case PGMMODE_PROTECTED: /* Protected mode without paging. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav /* Write VT-x's view of the guest CR4 into the VMCS. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav Log(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64CR4Mask);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki * Loads the guest debug registers into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * This also sets up whether #DB and MOV DRx accesses cause VM exits.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * @returns VBox status code.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * @param pVCpu Pointer to the VMCPU.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * out-of-sync. Make sure to update the required fields
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * before using them.
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki * @remarks No-long-jump zone!!!
78ed97a7b79b59ef2ef41f190c9be35c54d90119jlstatic int hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pMixedCtx->dr[7] >> 32)); /* upper 32 bits are reserved (MBZ). */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert((pMixedCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert((pMixedCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fInterceptDB = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fInterceptMovDRx = false;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
738dd1949fabecbe3a63d62def16a5d521e85911hyw pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* The guest's view of its DR7 is unblemished. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Strict function to validate segment registers.
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Protected mode checks */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->cs.Attr.u && pCtx->cs.Attr.u != HMVMX_SEL_UNUSABLE); /* CS cannot be loaded with NULL in protected mode. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->ss.Attr.u && pCtx->ss.Attr.u != HMVMX_SEL_UNUSABLE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* CR0 might not be up-to-date here always, hence disabled. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->ds.Attr.u && pCtx->ds.Attr.u != HMVMX_SEL_UNUSABLE)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->es.Attr.u && pCtx->es.Attr.u != HMVMX_SEL_UNUSABLE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (pCtx->fs.Attr.u && pCtx->fs.Attr.u != HMVMX_SEL_UNUSABLE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->gs.Attr.u && pCtx->gs.Attr.u != HMVMX_SEL_UNUSABLE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* 64-bit capable CPUs. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Real and v86 mode checks. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 64-bit capable CPUs. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#endif /* VBOX_STRICT */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * Writes a guest segment register into the guest-state area in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @returns VBox status code.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pVCpu Pointer to the VMCPU.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param idxSel Index of the selector in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param idxLimit Index of the segment limit in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param idxBase Index of the segment base in the VMCS.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param idxAccess Index of the access rights of the segment in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pSelReg Pointer to the segment selector.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pCtx Pointer to the guest-CPU context.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @remarks No-long-jump zone!!!
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hywstatic int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * loaded in protected-mode have their attribute as 0.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertMsg((u32Access == HMVMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * into the guest-state area in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCPU Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
738dd1949fabecbe3a63d62def16a5d521e85911hyw * out-of-sync. Make sure to update the required fields
738dd1949fabecbe3a63d62def16a5d521e85911hyw * before using them.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @remarks No-long-jump zone!!!
07d06da50d310a325b457d6330165aebab1e0064Surya Prakkistatic int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest Segment registers: CS, SS, DS, ES, FS, GS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw in real-mode (e.g. OpenBSD 4.0) */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest TR.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Validate. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest GDTR.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->gdtr.cbGdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest LDTR.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
feb5832b942e462df2101f763387f2416fb45f84Mary Beale /* Validate. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest IDTR.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->idtr.cbIdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * areas. These MSRs will automatically be loaded to the host CPU on every
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * successful VM entry and stored from the host CPU on every successful VM exit.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * Also loads the sysenter MSRs into the guest-state area in the VMCS.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @returns VBox status code.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pVCpu Pointer to the VMCPU.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav const bool fSupportsNX = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav const bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** @todo support save IA32_EFER, i.e.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR, in which case the
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * guest EFER need not be part of the VM-entry MSR-load area. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
1039f409262fcc394c002cfbadf60149156d2bcbav pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Update the VCPU's copy of the guest MSR count. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest Sysenter MSRs.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VM-exits on WRMSRs for these MSRs.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Loads the guest activity state into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** @todo See if we can make use of other states, e.g.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Sets up the appropriate function to run guest code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 64-bit host or hybrid host. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Guest is not in long mode, use the 32-bit handler. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * Wrapper for running the guest code in VT-x.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox strict status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pCtx Pointer to the guest-CPU context.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0b240fcdeb4772e65fed050aee3e3dc63308ae72whDECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Report world-switch error and dump some useful debug info.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pCtx Pointer to the guest-CPU context.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVmxTransient Pointer to the VMX transient structure (only
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * exitReason updated).
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.lasterror.u32ExitReason);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("uExitReason %#x (VmxTransient %#x)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Exit Qualification %#x\n", pVmxTransient->uExitQualification));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("InstrError %#x\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* VMX control bits. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2 %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &u32Val); AssertRC(rc);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki Log(("VMX_VMCS32_CTRL_ENTRY_CONTROLS %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
1039f409262fcc394c002cfbadf60149156d2bcbav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
1039f409262fcc394c002cfbadf60149156d2bcbav Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm /* Guest bits. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uGCReg); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Old Guest Rip %#RGv New %#RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)uGCReg));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uGCReg); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Old Guest Rsp %#RGv New %#RGv\n", (RTGCPTR)pCtx->rsp, (RTGCPTR)uGCReg));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh /* Host bits. */
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh#endif /* VBOX_STRICT */
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh /* Impossible */
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return true;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return false;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Read-only fields. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return true;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* All readable fields should also be part of the VMCS write cache. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif /* VBOX_STRICT */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Executes the specified handler in 64-bit mode.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pCtx Pointer to the guest CPU context.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param enmOp The operation to perform.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param cbParam Number of parameters.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param paParam Array of 32-bit parameters.
0cc8ae8667155d352d327b5c92b62899a7e05bcdavVMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Disable interrupts. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Leave VMX Root Mode. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Call the switcher. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki /** @todo replace with hmR0VmxEnterRootMode() and LeaveRootMode(). */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Make sure the VMX instructions don't cause #UD faults. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Re-enter VMX Root Mode */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * supporting 64-bit guests.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param fResume Whether to VMLAUNCH or VMRESUME.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pCtx Pointer to the guest-CPU context.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pCache Pointer to the VMCS cache.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pVM Pointer to the VM.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
#ifdef VBOX_STRICT
aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
#ifdef VBOX_STRICT
AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
return rc;
++cReadFields; \
/* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
return VINF_SUCCESS;
int rc;
switch (idxField)
* These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
* values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
case VMX_VMCS_GUEST_CR3:
case VMX_VMCS_GUEST_ES_BASE:
case VMX_VMCS_GUEST_CS_BASE:
case VMX_VMCS_GUEST_SS_BASE:
case VMX_VMCS_GUEST_DS_BASE:
case VMX_VMCS_GUEST_FS_BASE:
case VMX_VMCS_GUEST_GS_BASE:
case VMX_VMCS_GUEST_LDTR_BASE:
case VMX_VMCS_GUEST_TR_BASE:
case VMX_VMCS_GUEST_GDTR_BASE:
case VMX_VMCS_GUEST_IDTR_BASE:
case VMX_VMCS_GUEST_DR7:
case VMX_VMCS_GUEST_RSP:
case VMX_VMCS_GUEST_RIP:
AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
return rc;
return VINF_SUCCESS;
return VINF_SUCCESS;
#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
bool fOffsettedTsc = false;
uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
if (fOffsettedTsc)
/* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
switch (uVector)
case X86_XCPT_GP:
case X86_XCPT_SS:
case X86_XCPT_NP:
case X86_XCPT_TS:
case X86_XCPT_DE:
DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
#ifdef VBOX_STRICT
* Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
switch (enmReflect)
case VMXREFLECTXCPT_XCPT:
/* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
case VMXREFLECTXCPT_DF:
Log(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
case VMXREFLECTXCPT_TF:
return rc;
return rc;
return rc;
return rc;
return rc;
return rc;
return rc;
if (!uIntrState)
/* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
return VINF_SUCCESS;
return rc;
return rc;
return rc;
return VINF_SUCCESS;
pMsr += i;
AssertFailed();
return VERR_HM_UNEXPECTED_LD_ST_MSR;
return VINF_SUCCESS;
/* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
/* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
return rc;
DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
return rc;
#ifdef VMX_USE_CACHED_VMCS_ACCESSES
/* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
return rc;
return rc;
return VINF_SUCCESS;
return VINF_SUCCESS;
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
return rc;
if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
|| VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
rc = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
return rc;
rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
return rc;
return VINF_EM_PENDING_REQUEST;
return VINF_PGM_POOL_FLUSH_PENDING;
return VINF_EM_RAW_TO_R3;
return VINF_SUCCESS;
/* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
switch (uVector)
case X86_XCPT_BP:
case X86_XCPT_OF:
case X86_XCPT_PF:
case X86_XCPT_DF:
case X86_XCPT_TS:
case X86_XCPT_NP:
case X86_XCPT_SS:
case X86_XCPT_GP:
case X86_XCPT_AC:
Log(("Converting TRPM trap: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
switch (uVectorType)
Log(("Converting pending HM event to TRPM trap uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
if (fErrorCodeValid)
/* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
/* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
/* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
/* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
} /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
bool fInject = true;
if ( fBlockInt
|| fBlockSti
|| fBlockMovSS)
fInject = false;
&& ( fBlockMovSS
|| fBlockSti))
/* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
fInject = false;
if (fInject)
rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
/* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
if ( !fBlockMovSS
&& !fBlockSti)
/* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
if ( !fBlockInt
&& !fBlockSti
&& !fBlockMovSS)
* Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
* hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
if ( fBlockSti
|| fBlockMovSS)
if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
* The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, VMX_EXIT_MTF
* VMX_EXIT_APIC_WRITE, VMX_EXIT_VIRTUALIZED_EOI. See Intel spec. 27.3.4 "Saving Non-Register State".
/* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
uIntrState = 0;
* There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
* VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
return rc;
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
if (fErrorCodeValid)
return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
* The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
return VINF_EM_RESET;
int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
return rc;
static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
/* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
* Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
* See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
return VINF_EM_RESET;
/* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
/* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
Log(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
return rc;
* For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
Assert(VMX_EXIT_INTERRUPTION_INFO_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RGv\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
return rc;
return VERR_VMX_X86_CR4_VMXE_CLEARED;
return rc;
return VINF_SUCCESS;
return rc;
/* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
return VINF_SUCCESS;
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
return rc;
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
return rc;
* clearing the common-state (TRPM/forceflags), we must undo those changes so
DECLINLINE(int) hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return rc;
/* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
/* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
/* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
/* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
return VINF_EM_RAW_INTERRUPT;
* This clears force-flags, TRPM traps & pending HM events. We cannot safely restore the state if we exit to ring-3
* (before running guest code) after calling this function (e.g. how do we reverse the effects of calling PDMGetInterrupt()?)
return rc;
DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
/** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
#ifdef HMVMX_SYNC_FULL_GUEST_STATE
AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
* TPR patching (only active for 32-bit guests on 64-bit capable CPUs) when the CPU does not supported virtualizing
/* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */
/* The patch code uses the LSTAR as it's not used by a guest in 32-bit mode implicitly (i.e. SYSCALL is 64-bit only). */
#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
DECLINLINE(void) hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
Log(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
* If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
* we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why
for (;; cLoops++)
/* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
* Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
/* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
* Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
return rc;
#ifdef HM_PROFILE_EXIT_DISPATCH
#ifdef HMVMX_USE_FUNCTION_TABLE
return rc;
#ifndef HMVMX_USE_FUNCTION_TABLE
DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
int rc;
switch (rcReason)
case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_VMCALL:
case VMX_EXIT_VMCLEAR:
case VMX_EXIT_VMLAUNCH:
case VMX_EXIT_VMPTRLD:
case VMX_EXIT_VMPTRST:
case VMX_EXIT_VMREAD:
case VMX_EXIT_VMRESUME:
case VMX_EXIT_VMWRITE:
case VMX_EXIT_VMXOFF:
case VMX_EXIT_VMXON:
case VMX_EXIT_INVEPT:
case VMX_EXIT_INVVPID:
case VMX_EXIT_VMFUNC:
return rc;
#ifdef HM_PROFILE_EXIT_DISPATCH
# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
#ifdef DEBUG
# define VMX_ASSERT_PREEMPT_CPUID_VAR() \
# define VMX_ASSERT_PREEMPT_CPUID() \
RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() \
# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return rc;
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
static DECLCALLBACK(int) hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VINF_SUCCESS;
return VINF_EM_RAW_INTERRUPT;
static DECLCALLBACK(int) hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VINF_EM_RAW_INTERRUPT;
/* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
return VINF_SUCCESS;
return rc;
switch (uIntrType)
switch (uVector)
#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
return rc;
static DECLCALLBACK(int) hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
/* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
/* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
return VINF_SUCCESS;
static DECLCALLBACK(int) hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
static DECLCALLBACK(int) hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
static DECLCALLBACK(int) hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return rc;
static DECLCALLBACK(int) hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VINF_EM_RAW_EMULATE_INSTR;
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
/* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
return rc;
static DECLCALLBACK(int) hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
/* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
return rc;
static DECLCALLBACK(int) hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
return rc;
static DECLCALLBACK(int) hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RGv failed with %Rrc\n",
return rc;
static DECLCALLBACK(int) hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
return rc;
static DECLCALLBACK(int) hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
return rc;
static DECLCALLBACK(int) hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
* Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
* get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
* This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
* See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
* SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
* don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
* INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM. See Intel spec. "33.14.1 Default Treatment of
* SMI Delivery" and "29.3 VMX Instructions" for "VMXON". It is -NOT- blocked in VMX non-root operation so we can potentially
static DECLCALLBACK(int) hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VINF_EM_RESET;
static DECLCALLBACK(int) hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return rc;
static DECLCALLBACK(int) hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VINF_SUCCESS;
static DECLCALLBACK(int) hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
static DECLCALLBACK(int) hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_EM_INTERPRETER;
static DECLCALLBACK(int) hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_EM_INTERPRETER;
static DECLCALLBACK(int) hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_VMX_INVALID_GUEST_STATE;
static DECLCALLBACK(int) hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
return VERR_VMX_UNDEFINED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_EM_INTERPRETER;
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_EM_INTERPRETER;
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return rc;
static DECLCALLBACK(int) hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
/* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
return VINF_SUCCESS;
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
/* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */
#ifdef VBOX_STRICT
case MSR_IA32_SYSENTER_CS:
case MSR_IA32_SYSENTER_EIP:
case MSR_IA32_SYSENTER_ESP:
case MSR_K8_FS_BASE:
case MSR_K8_GS_BASE:
return VERR_VMX_UNEXPECTED_EXIT_CODE;
case MSR_K8_LSTAR:
case MSR_K6_STAR:
case MSR_K8_SF_MASK:
case MSR_K8_TSC_AUX:
case MSR_K8_KERNEL_GS_BASE:
AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
return VERR_VMX_UNEXPECTED_EXIT_CODE;
return rc;
static DECLCALLBACK(int) hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_EM_INTERPRETER;
return VERR_VMX_UNEXPECTED_EXIT_CODE;
static DECLCALLBACK(int) hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
* The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
* the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
return VINF_SUCCESS;
static DECLCALLBACK(int) hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
switch (uAccessType)
/* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
/* We don't need to update HM_CHANGED_VMX_GUEST_APIC_STATE here as this -cannot- happen with TPR shadowing. */
AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
return rc;
static DECLCALLBACK(int) hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
/* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
if (fIOString)
if (fIOWrite)
if (fIOWrite)
HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
#ifdef DEBUG
return rc;
static DECLCALLBACK(int) hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
/* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
if (fErrorCodeValid)
return VERR_EM_INTERPRETER;
static DECLCALLBACK(int) hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VINF_EM_DBG_STOP;
static DECLCALLBACK(int) hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
/* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
return VINF_SUCCESS;
return rc;
/* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
switch (uAccessType)
RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
return rc;
static DECLCALLBACK(int) hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
return VERR_VMX_UNEXPECTED_EXIT_CODE;
#ifdef VBOX_WITH_STATISTICS
if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
return VINF_SUCCESS;
* EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
return rc;
static DECLCALLBACK(int) hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
/* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
return VINF_SUCCESS;
return rc;
* of the page containing the instruction via the guest's page tables (we would invalidate the guest page
* in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
return VINF_SUCCESS;
return rc;
static DECLCALLBACK(int) hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
/* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
return VINF_SUCCESS;
return rc;
AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RGv", pVmxTransient->uExitQualification));
Log(("EPT violation %#x at %#RGv ErrorCode %#x CS:EIP=%04x:%#RX64\n", (uint32_t)pVmxTransient->uExitQualification, GCPhys,
rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
return VINF_SUCCESS;
return rc;
return VERR_EM_INTERPRETER;
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
return rc;
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
return rc;
/* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
return rc;
#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
return VINF_SUCCESS;
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
return rc;
#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
/* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
Log(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RGv CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
return rc;
return VERR_VMX_UNEXPECTED_EXCEPTION;
Log(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
case OP_CLI:
case OP_STI:
case OP_HLT:
case OP_POPF:
rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
&GCPtrStack);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
case OP_PUSHF:
/* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
case OP_IRET:
rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
&GCPtrStack);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
case OP_INT:
case OP_INTO:
VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
return rc;
/* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
return VINF_SUCCESS;
pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
return rc;
#ifdef VBOX_HM_WITH_GUEST_PATCHING
if (!pPatch)
return VINF_EM_HM_PATCH_TPR_INSTR;
Log(("#PF: cr2=%#RGv cs:rip=%#04x:%#RGv uErrCode %#RX32 cr3=%#RGv\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,
TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
return rc;
pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
return VINF_SUCCESS;
return rc;