HMVMXR0.cpp revision 99a9c374a950f1d39dc3aa49dc2e5e6d11520eae
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/* $Id$ */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** @file
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * HM VMX (Intel VT-x) - Host Context Ring-0.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Copyright (C) 2012-2013 Oracle Corporation
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * This file is part of VirtualBox Open Source Edition (OSE), as
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * available from http://www.virtualbox.org. This file is free software;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * you can redistribute it and/or modify it under the terms of the GNU
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * General Public License (GPL) as published by the Free Software
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Foundation, in version 2 as it comes in the "COPYING" file of the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*******************************************************************************
25cf1a301a396c38e8adf52c15f537b80d2483f7jl* Header Files *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl*******************************************************************************/
1e2e7a75ddb1eedcefa449ce98fd5862749b72eehuah#define LOG_GROUP LOG_GROUP_HM
392e836b07e8da771953e4d64233b2abe4393efeGavin Maltby#include <iprt/asm-amd64-x86.h>
1e2e7a75ddb1eedcefa449ce98fd5862749b72eehuah#include <iprt/thread.h>
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include <iprt/string.h>
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include "HMInternal.h"
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include <VBox/vmm/vm.h>
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include "HWVMXR0.h"
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include <VBox/vmm/pdmapi.h>
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include <VBox/vmm/dbgf.h>
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include <VBox/vmm/iom.h>
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include <VBox/vmm/selm.h>
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#include <VBox/vmm/tm.h>
1e2e7a75ddb1eedcefa449ce98fd5862749b72eehuah#ifdef VBOX_WITH_REM
25cf1a301a396c38e8adf52c15f537b80d2483f7jl# include <VBox/vmm/rem.h>
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef DEBUG_ramshankar
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_SAVE_FULL_GUEST_STATE
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_SYNC_FULL_GUEST_STATE
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_ALWAYS_TRAP_PF
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*******************************************************************************
25cf1a301a396c38e8adf52c15f537b80d2483f7jl* Defined Constants And Macros *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl*******************************************************************************/
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMXHCUINTREG RTHCUINTREG
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#if defined(RT_ARCH_AMD64)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl# define HMVMX_IS_64BIT_HOST_MODE() (true)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
0cc8ae8667155d352d327b5c92b62899a7e05bcdavextern "C" uint32_t g_fVMXIs64bitHost;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# undef HMVMXHCUINTREG
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# define HMVMXHCUINTREG uint64_t
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#else
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# define HMVMX_IS_64BIT_HOST_MODE() (false)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#endif
738dd1949fabecbe3a63d62def16a5d521e85911hyw
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh/** Use the function table. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_USE_FUNCTION_TABLE
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** This bit indicates the segment selector is unusable in VT-x. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_SEL_UNUSABLE RT_BIT(16)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** Determine which tagged-TLB flush handler to use. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/** Updated-guest-state flags. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
738dd1949fabecbe3a63d62def16a5d521e85911hyw#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_RSP \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_RFLAGS \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_CR0 \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_CR3 \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | HMVMX_UPDATED_GUEST_CR4 \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_GDTR \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_IDTR \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_LDTR \
601c2e1ed5ec8de33296fed3938598da99915e7adhain | HMVMX_UPDATED_GUEST_TR \
601c2e1ed5ec8de33296fed3938598da99915e7adhain | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh | HMVMX_UPDATED_GUEST_DEBUG \
601c2e1ed5ec8de33296fed3938598da99915e7adhain | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | HMVMX_UPDATED_GUEST_APIC_STATE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Flags to skip redundant reads of some common VMCS fields that are not part of
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * the guest-CPU state but are in the transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Exception bitmap mask for real-mode guests (real-on-v86). We need to intercept all exceptions manually (except #PF).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * #NM is also handled spearetely, see hmR0VmxLoadGuestControlRegs(). #PF need not be intercepted even in real-mode if
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * we have Nested Paging support.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_XF))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Exception bitmap mask for all contributory exceptions.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_DE))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** Maximum VM-instruction error number. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define HMVMX_INSTR_ERROR_MAX 28
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*******************************************************************************
25cf1a301a396c38e8adf52c15f537b80d2483f7jl* Structures and Typedefs *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl*******************************************************************************/
193974072f41a843678abf5f61979c748687e66bSherry Moore/**
193974072f41a843678abf5f61979c748687e66bSherry Moore * A state structure for holding miscellaneous information across
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMX non-root operation and restored after the transition.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jltypedef struct VMXTRANSIENT
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** The host's rflags/eflags. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl RTCCUINTREG uEFlags;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#if HC_ARCH_BITS == 32
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl uint32_t u32Alignment0;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl#endif
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl uint64_t u64LStarMsr;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The guest's TPR value used for TPR shadowing. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint8_t u8GuestTpr;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Alignment. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint8_t abAlignment0[6];
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The basic VM-exit reason. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint16_t uExitReason;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Alignment. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl uint16_t u16Alignment0;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /** The VM-exit interruption error code. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t uExitIntrErrorCode;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** The VM-exit exit qualification. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTGCUINTPTR uExitQualification;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#if GC_ARCH_BITS == 32
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Alignment. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32Alignment1;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The VM-exit interruption-information field. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t uExitIntrInfo;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The VM-exit instruction-length field. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram uint32_t cbInstr;
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Whether the VM-entry failed or not. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram bool fVMEntryFailed;
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Alignment. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram uint8_t abAlignment1[5];
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** The VM-entry interruption-information field. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram uint32_t uEntryIntrInfo;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The VM-entry exception error code field. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t uEntryXcptErrorCode;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** The VM-entry instruction length field. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl uint32_t cbEntryInstr;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** IDT-vectoring information field. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t uIdtVectoringInfo;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** IDT-vectoring error code. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t uIdtVectoringErrorCode;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t fVmcsFieldsRead;
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Whether TSC-offsetting should be setup before VM-entry. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram bool fUpdateTscOffsettingAndPreemptTimer;
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Whether the VM-exit was caused by a page-fault during delivery of a
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram * contributary exception or a page-fault. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram bool fVectoringPF;
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram} VMXTRANSIENT, *PVMXTRANSIENT;
c964b0e6c778331eb72036bb4607ce574c2500a2raghuramAssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
c964b0e6c778331eb72036bb4607ce574c2500a2raghuramAssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
0cc8ae8667155d352d327b5c92b62899a7e05bcdavAssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram/**
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram * MSR-bitmap read permissions.
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuramtypedef enum VMXMSREXITREAD
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram{
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Reading this MSR causes a VM-exit. */
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram VMXMSREXIT_INTERCEPT_READ = 0xb,
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram /** Reading this MSR does not cause a VM-exit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav VMXMSREXIT_PASSTHRU_READ
0cc8ae8667155d352d327b5c92b62899a7e05bcdav} VMXMSREXITREAD;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * MSR-bitmap write permissions.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavtypedef enum VMXMSREXITWRITE
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav /** Writing to this MSR causes a VM-exit. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav VMXMSREXIT_INTERCEPT_WRITE = 0xd,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** Writing to this MSR does not cause a VM-exit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav VMXMSREXIT_PASSTHRU_WRITE
0cc8ae8667155d352d327b5c92b62899a7e05bcdav} VMXMSREXITWRITE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/*******************************************************************************
0cc8ae8667155d352d327b5c92b62899a7e05bcdav* Internal Functions *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav*******************************************************************************/
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywstatic int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw#endif
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw#ifndef HMVMX_USE_FUNCTION_TABLE
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywDECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw#endif
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywstatic DECLCALLBACK(int) hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywstatic DECLCALLBACK(int) hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hywstatic DECLCALLBACK(int) hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
601c2e1ed5ec8de33296fed3938598da99915e7adhainstatic DECLCALLBACK(int) hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic DECLCALLBACK(int) hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
1039f409262fcc394c002cfbadf60149156d2bcbavstatic DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(int) hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
2742aa22da2d055d9d4db2034bd13536052a0e73hyw
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/*******************************************************************************
0cc8ae8667155d352d327b5c92b62899a7e05bcdav* Global Variables *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl*******************************************************************************/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef HMVMX_USE_FUNCTION_TABLE
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VM-exit handler.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
193974072f41a843678abf5f61979c748687e66bSherry Moore * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * fields before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX-transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jltypedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/** Pointer to VM-exit handler. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jltypedef FNVMEXITHANDLER *const PFNVMEXITHANDLER;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_EXIT dispatch table.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 00 VMX_EXIT_XCPT_NMI */ hmR0VmxExitXcptNmi,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 40 UNDEFINED */ hmR0VmxExitPause,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl};
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl#endif /* HMVMX_USE_FUNCTION_TABLE */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_STRICT
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 0 */ "(Not Used)",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 1 */ "VMCALL executed in VMX root operation.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 2 */ "VMCLEAR with invalid physical address.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 3 */ "VMCLEAR with VMXON pointer.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 4 */ "VMLAUNCH with non-clear VMCS.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 5 */ "VMRESUME with non-launched VMCS.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 6 */ "VMRESUME after VMXOFF",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 7 */ "VM entry with invalid control fields.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 8 */ "VM entry with invalid host state fields.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 9 */ "VMPTRLD with invalid physical address.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 10 */ "VMPTRLD with VMXON pointer.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 11 */ "VMPTRLD with incorrect revision identifier.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 13 */ "VMWRITE to read-only VMCS component.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 14 */ "(Not Used)",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 15 */ "VMXON executed in VMX root operation.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 16 */ "VM entry with invalid executive-VMCS pointer.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 17 */ "VM entry with non-launched executing VMCS.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 19 */ "VMCALL with non-clear VMCS.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 20 */ "VMCALL with invalid VM-exit control fields.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 21 */ "(Not Used)",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 24 */ "VMCALL with invalid SMM-monitor features.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 26 */ "VM entry with events blocked by MOV SS.",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 27 */ "(Not Used)",
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 28 */ "Invalid operand to INVEPT/INVVPID."
0cc8ae8667155d352d327b5c92b62899a7e05bcdav};
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif /* VBOX_STRICT */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Updates the VM's last error record. If there was a VMX instruction error,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * reads the error data from the VMCS and updates VCPU's last error record as
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * well.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VERR_VMX_UNABLE_TO_START_VM or
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VERR_VMX_INVALID_VMCS_FIELD).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param rc The error code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertPtr(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if ( rc == VERR_VMX_INVALID_VMCS_FIELD
0cc8ae8667155d352d327b5c92b62899a7e05bcdav || rc == VERR_VMX_UNABLE_TO_START_VM)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertPtrReturnVoid(pVCpu);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
d3d50737e566cade9a08d73d2af95105ac7cd960Rafael Vanoni }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVM->hm.s.lLastError = rc;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Reads the VM-entry interruption-information field from the VMCS into the VMX
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVmxTransient Pointer to the VMX transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Reads the VM-entry exception error code field from the VMCS into
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * the VMX transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVmxTransient Pointer to the VMX transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the VM-entry exception error code field from the VMCS into
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the VM-exit interruption-information field from the VMCS into the VMX
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pVmxTransient Pointer to the VMX transient structure.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh */
0b240fcdeb4772e65fed050aee3e3dc63308ae72whDECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh{
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh/**
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Reads the VM-exit interruption error code from the VMCS into the VMX
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * transient structure.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the VM-exit instruction length field from the VMCS into the VMX
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the exit qualification from the VMCS into the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * Reads the IDT-vectoring information field from the VMCS into the VMX
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Reads the IDT-vectoring error code from the VMCS into the VMX
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVmxTransient Pointer to the VMX transient structure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0b240fcdeb4772e65fed050aee3e3dc63308ae72whDECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh{
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertRCReturn(rc, rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh return VINF_SUCCESS;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh}
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Enters VMX root mode operation on the current CPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM (optional, can be NULL, after
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * a resume).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param HCPhysCpuPage Physical address of the VMXON region.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pvCpuPage Pointer to the VMXON region.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
738dd1949fabecbe3a63d62def16a5d521e85911hywstatic int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
738dd1949fabecbe3a63d62def16a5d521e85911hyw{
738dd1949fabecbe3a63d62def16a5d521e85911hyw AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Write the VMCS revision dword to the VMXON region. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Enable the VMX bit in CR4 if necessary. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl RTCCUINTREG uCr4 = ASMGetCR4();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!(uCr4 & X86_CR4_VMXE))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ASMSetCR4(uCr4 | X86_CR4_VMXE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Enter VMX root mode. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXEnable(HCPhysCpuPage);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (RT_FAILURE(rc))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ASMSetCR4(uCr4);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Exits VMX root mode operation on the current CPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxLeaveRootMode(void)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* If we're for some reason not in VMX root mode, then don't leave it. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (ASMGetCR4() & X86_CR4_VMXE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Exit VMX root mode and clear the VMX bit in CR4 */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMXDisable();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Allocates and maps one physically contiguous page. The allocated page is
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * zero'd out. (Used by various VT-x structures).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns IPRT status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMemObj Pointer to the ring-0 memory object.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param ppVirt Where to store the virtual address of the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * allocation.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pPhys Where to store the physical address of the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * allocation.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (RT_FAILURE(rc))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *ppVirt = RTR0MemObjAddress(*pMemObj);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ASMMemZero32(*ppVirt, PAGE_SIZE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Frees and unmaps an allocated physical page.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMemObj Pointer to the ring-0 memory object.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param ppVirt Where to re-initialize the virtual address of
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * allocation as 0.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pHCPhys Where to re-initialize the physical address of the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * allocation as 0.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh{
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertPtr(pMemObj);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(ppVirt);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pHCPhys);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (*pMemObj != NIL_RTR0MEMOBJ)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh {
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertRC(rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *pMemObj = NIL_RTR0MEMOBJ;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *ppVirt = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *pHCPhys = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Worker function to free VT-x related structures.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns IPRT status code.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic void hmR0VmxStructsFree(PVM pVM)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl for (VMCPUID i = 0; i < pVM->cCpus; i++)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PVMCPU pVCpu = &pVM->aCpus[i];
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_WITH_CRASHDUMP_MAGIC
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Worker function to allocate VT-x related VM structures.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @returns IPRT status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxStructsAlloc(PVM pVM)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Initialize members up-front so we can cleanup properly on allocation failure.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.HCPhys##a_Name = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_WITH_CRASHDUMP_MAGIC
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl for (VMCPUID i = 0; i < pVM->cCpus; i++)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PVMCPU pVCpu = &pVM->aCpus[i];
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#undef VMXLOCAL_INIT_VM_MEMOBJ
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Allocate all the VT-x structures.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_WITH_CRASHDUMP_MAGIC
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (RT_FAILURE(rc))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl goto cleanup;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
738dd1949fabecbe3a63d62def16a5d521e85911hyw
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl &pVM->hm.s.vmx.HCPhysApicAccess);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (RT_FAILURE(rc))
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav goto cleanup;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /*
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Initialize per-VCPU VT-x structures.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl for (VMCPUID i =0; i < pVM->cCpus; i++)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav PVMCPU pVCpu = &pVM->aCpus[i];
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertPtr(pVCpu);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Allocate the VM control structure (VMCS). */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (RT_FAILURE(rc))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav goto cleanup;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Allocate the Virtual-APIC page for transparent TPR accesses. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav &pVCpu->hm.s.vmx.HCPhysVirtApic);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (RT_FAILURE(rc))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav goto cleanup;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl }
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav if (RT_FAILURE(rc))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav goto cleanup;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (RT_FAILURE(rc))
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav goto cleanup;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /* Allocate the VM-exit MSR-load page for the host MSRs. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
056c948b50f079598d6121c0aeabf1de50fabd4etsien if (RT_FAILURE(rc))
056c948b50f079598d6121c0aeabf1de50fabd4etsien goto cleanup;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdavcleanup:
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav hmR0VmxStructsFree(pVM);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki return rc;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * Does global VT-x initialization (called during module initialization).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl *
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @returns VBox status code.
056c948b50f079598d6121c0aeabf1de50fabd4etsien */
056c948b50f079598d6121c0aeabf1de50fabd4etsienVMMR0DECL(int) VMXR0GlobalInit(void)
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef HMVMX_USE_FUNCTION_TABLE
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki# ifdef VBOX_STRICT
0cc8ae8667155d352d327b5c92b62899a7e05bcdav for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(g_apfnVMExitHandlers[i]);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav# endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Does global VT-x termination (called during module termination).
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deavVMMR0DECL(void) VMXR0GlobalTerm()
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /* Nothing to do currently. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl}
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl/**
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * Sets up and activates VT-x on the current CPU.
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki *
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @returns VBox status code.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pCpu Pointer to the global CPU info struct.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pVM Pointer to the VM (can be NULL after a host resume
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * operation).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki * fEnabledByHost is true).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @a fEnabledByHost is true).
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * enable VT-x/AMD-V on the host.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jlVMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertReturn(pCpu, VERR_INVALID_PARAMETER);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (!fEnabledByHost)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (RT_FAILURE(rc))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * we can avoid an explicit flush while using new VPIDs. We would still need to flush
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * each time while reusing a VPID after hitting the MaxASID limit once.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if ( pVM
0cc8ae8667155d352d327b5c92b62899a7e05bcdav && pVM->hm.s.vmx.fVpid
0cc8ae8667155d352d327b5c92b62899a7e05bcdav && (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav hmR0VmxFlushVpid(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pCpu->fFlushAsidBeforeUse = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pCpu->fFlushAsidBeforeUse = true;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ++pCpu->cTlbFlushes;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Deactivates VT-x on the current CPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pCpu Pointer to the global CPU info struct.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pvCpuPage Pointer to the VMXON region.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param HCPhysCpuPage Physical address of the VMXON region.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlVMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl NOREF(pCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl NOREF(pvCpuPage);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl NOREF(HCPhysCpuPage);
392e836b07e8da771953e4d64233b2abe4393efeGavin Maltby
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxLeaveRootMode();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh/**
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Sets the permission bits for the specified MSR in the MSR bitmap.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param uMSR The MSR value.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param enmRead Whether reading this MSR causes a VM-exit.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param enmWrite Whether writing this MSR causes a VM-exit.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int32_t iBit;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Layout:
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * 0x000 - 0x3ff - Low MSR read bits
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * 0x400 - 0x7ff - High MSR read bits
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * 0x800 - 0xbff - Low MSR write bits
056c948b50f079598d6121c0aeabf1de50fabd4etsien * 0xc00 - 0xfff - High MSR write bits
056c948b50f079598d6121c0aeabf1de50fabd4etsien */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (uMsr <= 0x00001FFF)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh iBit = uMsr;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if ( uMsr >= 0xC0000000
25cf1a301a396c38e8adf52c15f537b80d2483f7jl && uMsr <= 0xC0001FFF)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl iBit = (uMsr - 0xC0000000);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pbMsrBitmap += 0x400;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(iBit <= 0x1fff);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (enmRead == VMXMSREXIT_INTERCEPT_READ)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ASMBitSet(pbMsrBitmap, iBit);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ASMBitClear(pbMsrBitmap, iBit);
056c948b50f079598d6121c0aeabf1de50fabd4etsien
056c948b50f079598d6121c0aeabf1de50fabd4etsien if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
056c948b50f079598d6121c0aeabf1de50fabd4etsien ASMBitSet(pbMsrBitmap + 0x800, iBit);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav ASMBitClear(pbMsrBitmap + 0x800, iBit);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Flushes the TLB using EPT.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVM Pointer to the VM.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param enmFlush Type of flush.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVM->hm.s.fNestedPaging);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogFlowFunc(("pVM=%p pVCpu=%p enmFlush=%d\n", pVM, pVCpu, enmFlush));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint64_t descriptor[2];
25cf1a301a396c38e8adf52c15f537b80d2483f7jl descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu->hm.s.vmx.HCPhysEPTP, rc));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Flushes the TLB using VPID.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * enmFlush).
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param enmFlush Type of flush.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param GCPtr Virtual address of the page to flush (can be 0 depending
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * on @a enmFlush).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVM->hm.s.vmx.fVpid);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh uint64_t descriptor[2];
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav descriptor[0] = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav descriptor[1] = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertPtr(pVCpu);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav descriptor[0] = pVCpu->hm.s.uCurrentAsid;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl descriptor[1] = GCPtr;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertMsg(rc == VINF_SUCCESS,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if ( RT_SUCCESS(rc)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav && pVCpu)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Invalidates a guest page by guest virtual address. Only relevant for
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * EPT/VPID, otherwise there is nothing really to invalidate.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh *
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVCpu Pointer to the VMCPU.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param GCVirt Guest virtual address of the page to invalidate.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlVMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!fFlushPending)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * See @bugref{6043} and @bugref{6177}.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * function maybe called in a loop with individual addresses.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.fVpid)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1039f409262fcc394c002cfbadf60149156d2bcbav STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
056c948b50f079598d6121c0aeabf1de50fabd4etsien }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh else if (pVM->hm.s.fNestedPaging)
738dd1949fabecbe3a63d62def16a5d521e85911hyw VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
738dd1949fabecbe3a63d62def16a5d521e85911hyw }
738dd1949fabecbe3a63d62def16a5d521e85911hyw
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * otherwise there is nothing really to invalidate.
738dd1949fabecbe3a63d62def16a5d521e85911hyw *
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @returns VBox status code.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param pVM Pointer to the VM.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param pVCpu Pointer to the VMCPU.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param GCPhys Guest physical address of the page to invalidate.
738dd1949fabecbe3a63d62def16a5d521e85911hyw */
738dd1949fabecbe3a63d62def16a5d521e85911hywVMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
056c948b50f079598d6121c0aeabf1de50fabd4etsien{
738dd1949fabecbe3a63d62def16a5d521e85911hyw LogFlowFunc(("%RGp\n", GCPhys));
738dd1949fabecbe3a63d62def16a5d521e85911hyw
056c948b50f079598d6121c0aeabf1de50fabd4etsien /*
056c948b50f079598d6121c0aeabf1de50fabd4etsien * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
056c948b50f079598d6121c0aeabf1de50fabd4etsien * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
738dd1949fabecbe3a63d62def16a5d521e85911hyw * This function might be called in a loop.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh */
738dd1949fabecbe3a63d62def16a5d521e85911hyw VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
738dd1949fabecbe3a63d62def16a5d521e85911hyw return VINF_SUCCESS;
738dd1949fabecbe3a63d62def16a5d521e85911hyw}
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw/**
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
738dd1949fabecbe3a63d62def16a5d521e85911hyw * case where neither EPT nor VPID is supported by the CPU.
738dd1949fabecbe3a63d62def16a5d521e85911hyw *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks Called with interrupts disabled.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic DECLCALLBACK(void) hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav NOREF(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertPtr(pVCpu);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.TlbShootdown.cPages = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.idLastCpu = pCpu->idCpu;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fForceTLBFlush = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
738dd1949fabecbe3a63d62def16a5d521e85911hyw/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @remarks All references to "ASID" in this function pertains to "VPID" in
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Intel's nomenclature. The reason is, to avoid confusion in compare
601c2e1ed5ec8de33296fed3938598da99915e7adhain * statements since the host-CPU copies are named "ASID".
738dd1949fabecbe3a63d62def16a5d521e85911hyw *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks Called with interrupts disabled.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic DECLCALLBACK(void) hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
738dd1949fabecbe3a63d62def16a5d521e85911hyw AssertPtr(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
25cf1a301a396c38e8adf52c15f537b80d2483f7jl "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
738dd1949fabecbe3a63d62def16a5d521e85911hyw AssertPtr(pCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * This can happen both for start & resume due to long jumps back to ring-3.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fNewASID = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
738dd1949fabecbe3a63d62def16a5d521e85911hyw || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
738dd1949fabecbe3a63d62def16a5d521e85911hyw {
1039f409262fcc394c002cfbadf60149156d2bcbav pVCpu->hm.s.fForceTLBFlush = true;
738dd1949fabecbe3a63d62def16a5d521e85911hyw fNewASID = true;
738dd1949fabecbe3a63d62def16a5d521e85911hyw STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
738dd1949fabecbe3a63d62def16a5d521e85911hyw }
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw /*
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Check for explicit TLB shootdowns.
738dd1949fabecbe3a63d62def16a5d521e85911hyw */
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
738dd1949fabecbe3a63d62def16a5d521e85911hyw {
738dd1949fabecbe3a63d62def16a5d521e85911hyw pVCpu->hm.s.fForceTLBFlush = true;
738dd1949fabecbe3a63d62def16a5d521e85911hyw STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
738dd1949fabecbe3a63d62def16a5d521e85911hyw }
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw pVCpu->hm.s.idLastCpu = pCpu->idCpu;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (pVCpu->hm.s.fForceTLBFlush)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (fNewASID)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain ++pCpu->uCurrentAsid;
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
601c2e1ed5ec8de33296fed3938598da99915e7adhain pCpu->cTlbFlushes++;
601c2e1ed5ec8de33296fed3938598da99915e7adhain pCpu->fFlushAsidBeforeUse = true;
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (pCpu->fFlushAsidBeforeUse)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh else
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
601c2e1ed5ec8de33296fed3938598da99915e7adhain else
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.fForceTLBFlush = false;
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
738dd1949fabecbe3a63d62def16a5d521e85911hyw else
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
738dd1949fabecbe3a63d62def16a5d521e85911hyw AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
601c2e1ed5ec8de33296fed3938598da99915e7adhain ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
601c2e1ed5ec8de33296fed3938598da99915e7adhain pCpu->uCurrentAsid, pCpu->cTlbFlushes));
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * not be executed. See hmQueueInvlPage() where it is commented
738dd1949fabecbe3a63d62def16a5d521e85911hyw * out. Support individual entry flushing someday. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh {
601c2e1ed5ec8de33296fed3938598da99915e7adhain STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain /*
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
738dd1949fabecbe3a63d62def16a5d521e85911hyw * as supported by the CPU.
738dd1949fabecbe3a63d62def16a5d521e85911hyw */
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh {
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
738dd1949fabecbe3a63d62def16a5d521e85911hyw }
738dd1949fabecbe3a63d62def16a5d521e85911hyw else
738dd1949fabecbe3a63d62def16a5d521e85911hyw STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
738dd1949fabecbe3a63d62def16a5d521e85911hyw }
738dd1949fabecbe3a63d62def16a5d521e85911hyw pVCpu->hm.s.TlbShootdown.cPages = 0;
738dd1949fabecbe3a63d62def16a5d521e85911hyw VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VMCS with the VPID. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertRC(rc);
601c2e1ed5ec8de33296fed3938598da99915e7adhain}
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain/**
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Flushes the tagged-TLB entries for EPT CPUs as necessary.
601c2e1ed5ec8de33296fed3938598da99915e7adhain *
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @returns VBox status code.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVM Pointer to the VM.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVCpu Pointer to the VMCPU.
601c2e1ed5ec8de33296fed3938598da99915e7adhain *
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @remarks Called with interrupts disabled.
601c2e1ed5ec8de33296fed3938598da99915e7adhain */
601c2e1ed5ec8de33296fed3938598da99915e7adhainstatic DECLCALLBACK(void) hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
601c2e1ed5ec8de33296fed3938598da99915e7adhain{
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertPtr(pVM);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertPtr(pVCpu);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
601c2e1ed5ec8de33296fed3938598da99915e7adhain
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * This can happen both for start & resume due to long jumps back to ring-3.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fForceTLBFlush = true;
601c2e1ed5ec8de33296fed3938598da99915e7adhain STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Check for explicit TLB shootdown flushes. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.fForceTLBFlush = true;
601c2e1ed5ec8de33296fed3938598da99915e7adhain STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.idLastCpu = pCpu->idCpu;
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pVCpu->hm.s.fForceTLBFlush)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.fForceTLBFlush = false;
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain else
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * not be executed. See hmQueueInvlPage() where it is commented
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out. Support individual entry flushing someday. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* We cannot flush individual entries without VPID support. Flush using EPT. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.TlbShootdown.cPages = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Flushes the tagged-TLB entries for VPID CPUs as necessary.
601c2e1ed5ec8de33296fed3938598da99915e7adhain *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVM Pointer to the VM.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks Called with interrupts disabled.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
601c2e1ed5ec8de33296fed3938598da99915e7adhainstatic DECLCALLBACK(void) hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
601c2e1ed5ec8de33296fed3938598da99915e7adhain /*
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * This can happen both for start & resume due to long jumps back to ring-3.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
601c2e1ed5ec8de33296fed3938598da99915e7adhain * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
601c2e1ed5ec8de33296fed3938598da99915e7adhain */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
601c2e1ed5ec8de33296fed3938598da99915e7adhain || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.fForceTLBFlush = true;
601c2e1ed5ec8de33296fed3938598da99915e7adhain STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Check for explicit TLB shootdown flushes. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain /*
601c2e1ed5ec8de33296fed3938598da99915e7adhain * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
601c2e1ed5ec8de33296fed3938598da99915e7adhain * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
601c2e1ed5ec8de33296fed3938598da99915e7adhain * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
601c2e1ed5ec8de33296fed3938598da99915e7adhain */
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.fForceTLBFlush = true;
601c2e1ed5ec8de33296fed3938598da99915e7adhain STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.idLastCpu = pCpu->idCpu;
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pVCpu->hm.s.fForceTLBFlush)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain ++pCpu->uCurrentAsid;
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
601c2e1ed5ec8de33296fed3938598da99915e7adhain pCpu->fFlushAsidBeforeUse = true;
601c2e1ed5ec8de33296fed3938598da99915e7adhain pCpu->cTlbFlushes++;
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.fForceTLBFlush = false;
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pCpu->fFlushAsidBeforeUse)
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain else
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
601c2e1ed5ec8de33296fed3938598da99915e7adhain pCpu->uCurrentAsid, pCpu->cTlbFlushes));
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
601c2e1ed5ec8de33296fed3938598da99915e7adhain * not be executed. See hmQueueInvlPage() where it is commented
601c2e1ed5ec8de33296fed3938598da99915e7adhain * out. Support individual entry flushing someday. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh else
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh else
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pVCpu->hm.s.TlbShootdown.cPages = 0;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertRC(rc);
601c2e1ed5ec8de33296fed3938598da99915e7adhain}
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain/**
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Flushes the guest TLB entry based on CPU capabilities.
601c2e1ed5ec8de33296fed3938598da99915e7adhain *
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVCpu Pointer to the VMCPU.
601c2e1ed5ec8de33296fed3938598da99915e7adhain */
601c2e1ed5ec8de33296fed3938598da99915e7adhainDECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
601c2e1ed5ec8de33296fed3938598da99915e7adhain{
601c2e1ed5ec8de33296fed3938598da99915e7adhain PVM pVM = pVCpu->CTX_SUFF(pVM);
601c2e1ed5ec8de33296fed3938598da99915e7adhain switch (pVM->hm.s.vmx.uFlushTaggedTlb)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
601c2e1ed5ec8de33296fed3938598da99915e7adhain case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
601c2e1ed5ec8de33296fed3938598da99915e7adhain case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
601c2e1ed5ec8de33296fed3938598da99915e7adhain case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
601c2e1ed5ec8de33296fed3938598da99915e7adhain default:
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsgFailed(("Invalid flush-tag function identifier\n"));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl break;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * TLB entries from the host TLB before VM-entry.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh *
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @returns VBox status code.
601c2e1ed5ec8de33296fed3938598da99915e7adhain * @param pVM Pointer to the VM.
601c2e1ed5ec8de33296fed3938598da99915e7adhain */
601c2e1ed5ec8de33296fed3938598da99915e7adhainstatic int hmR0VmxSetupTaggedTlb(PVM pVM)
601c2e1ed5ec8de33296fed3938598da99915e7adhain{
601c2e1ed5ec8de33296fed3938598da99915e7adhain /*
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Determine optimal flush type for nested paging.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
601c2e1ed5ec8de33296fed3938598da99915e7adhain * guest execution (see hmR3InitFinalizeR0()).
601c2e1ed5ec8de33296fed3938598da99915e7adhain */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.fNestedPaging)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
601c2e1ed5ec8de33296fed3938598da99915e7adhain else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
601c2e1ed5ec8de33296fed3938598da99915e7adhain else
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
601c2e1ed5ec8de33296fed3938598da99915e7adhain return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Make sure the write-back cacheable memory type for EPT is supported. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain else
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Determine optimal flush type for VPID.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.fVpid)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.fVpid = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.fVpid = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Setup the handler for flushing tagged-TLBs.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pVM->hm.s.fNestedPaging)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pVM->hm.s.vmx.fVpid)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up pin-based VM-execution controls in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertPtr(pVM);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertPtr(pVCpu);
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Enable the VMX preemption timer. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.fUsePreemptTimer)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if ((val & zap) != val)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VCPU with the currently set pin-based VM-execution controls. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32PinCtls = val;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up processor-based VM-execution controls in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVMCPU Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VERR_INTERNAL_ERROR_5;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT /* HLT causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We toggle VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT))
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw {
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT combo!"));
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!pVM->hm.s.fNestedPaging)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Use TPR shadowing if supported by the CPU. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* CR8 writes causes a VM-exit based on TPR threshold. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Use MSR-bitmaps if supported by the CPU. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1039f409262fcc394c002cfbadf60149156d2bcbav hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if ((val & zap) != val)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1039f409262fcc394c002cfbadf60149156d2bcbav return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
1039f409262fcc394c002cfbadf60149156d2bcbav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, val);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VCPU with the currently set processor-based VM-execution controls. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32ProcCtls = val;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Secondary processor-based VM-execution controls.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.fNestedPaging)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT when INVPCID is executed by the guest.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.fVpid)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.fUnrestrictedGuest)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * done dynamically. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVM->hm.s.vmx.HCPhysApicAccess);
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertRCReturn(rc, rc);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if ((val & zap) != val)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
25cf1a301a396c38e8adf52c15f537b80d2483f7jl "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, val);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32ProcCtls2 = val;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up miscellaneous (everything other than Pin & Processor-based
1039f409262fcc394c002cfbadf60149156d2bcbav * VM-execution) control fields in the VMCS.
1039f409262fcc394c002cfbadf60149156d2bcbav *
1039f409262fcc394c002cfbadf60149156d2bcbav * @returns VBox status code.
1039f409262fcc394c002cfbadf60149156d2bcbav * @param pVM Pointer to the VM.
1039f409262fcc394c002cfbadf60149156d2bcbav * @param pVCpu Pointer to the VMCPU.
1039f409262fcc394c002cfbadf60149156d2bcbav */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVCpu);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VERR_GENERAL_FAILURE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * We thus use the exception bitmap to control it rather than use both.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** @todo Explore possibility of using IO-bitmaps. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* All IO & IOIO instructions cause VM-exits. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Setup MSR autoloading/autostoring. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Setup debug controls */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo think about this. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertRCReturn(rc, rc);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh return rc;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh}
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh/**
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Sets up the initial exception bitmap in the VMCS based on static conditions
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * (i.e. conditions that cannot ever change at runtime).
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh *
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @returns VBox status code.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVM Pointer to the VM.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param pVCpu Pointer to the VMCPU.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewhstatic int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh{
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertPtr(pVM);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertPtr(pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32XcptBitmap = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Without nested paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (!pVM->hm.s.fNestedPaging)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertRCReturn(rc, rc);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh return rc;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh}
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh/**
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Sets up the initial guest-state mask. The guest-state mask is consulted
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * before reading guest-state fields from the VMCS as VMREADs can be expensive
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * for the nested virtualization case (as it would cause a VM-exit).
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
738dd1949fabecbe3a63d62def16a5d521e85911hyw{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
738dd1949fabecbe3a63d62def16a5d521e85911hyw/**
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Does per-VM VT-x initialization.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbmVMMR0DECL(int) VMXR0InitVM(PVM pVM)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl LogFlowFunc(("pVM=%p\n", pVM));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = hmR0VmxStructsAlloc(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (RT_FAILURE(rc))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
738dd1949fabecbe3a63d62def16a5d521e85911hyw return VINF_SUCCESS;
738dd1949fabecbe3a63d62def16a5d521e85911hyw}
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw/**
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Does per-VM VT-x termination.
738dd1949fabecbe3a63d62def16a5d521e85911hyw *
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @returns VBox status code.
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavVMMR0DECL(int) VMXR0TermVM(PVM pVM)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh{
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh LogFlowFunc(("pVM=%p\n", pVM));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_WITH_CRASHDUMP_MAGIC
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh hmR0VmxStructsFree(pVM);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up the VM for execution under VT-x.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * This function is only called once per-VM during initalization.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlVMMR0DECL(int) VMXR0SetupVM(PVM pVM)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav LogFlowFunc(("pVM=%p\n", pVM));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* -XXX- change hmR3InitFinalizeR0() to fail if pRealModeTSS alloc fails. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if ( !pVM->hm.s.vmx.fUnrestrictedGuest
0cc8ae8667155d352d327b5c92b62899a7e05bcdav && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh || !pVM->hm.s.vmx.pRealModeTSS))
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh return VERR_INTERNAL_ERROR;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Initialize these always, see hmR3InitFinalizeR0().*/
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Setup the tagged-TLB flush handlers. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = hmR0VmxSetupTaggedTlb(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (RT_FAILURE(rc))
738dd1949fabecbe3a63d62def16a5d521e85911hyw {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh for (VMCPUID i = 0; i < pVM->cCpus; i++)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PVMCPU pVCpu = &pVM->aCpus[i];
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertPtr(pVCpu);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set revision dword at the beginning of the VMCS structure. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
738dd1949fabecbe3a63d62def16a5d521e85911hyw
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Load this VMCS as the current VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
1039f409262fcc394c002cfbadf60149156d2bcbav rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
1039f409262fcc394c002cfbadf60149156d2bcbav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
1039f409262fcc394c002cfbadf60149156d2bcbav hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
056c948b50f079598d6121c0aeabf1de50fabd4etsien rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
056c948b50f079598d6121c0aeabf1de50fabd4etsien AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
056c948b50f079598d6121c0aeabf1de50fabd4etsien hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
056c948b50f079598d6121c0aeabf1de50fabd4etsien
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl RTCCUINTREG uReg = ASMGetCR0();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (HMVMX_IS_64BIT_HOST_MODE())
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
738dd1949fabecbe3a63d62def16a5d521e85911hyw uint64_t uRegCR3 = hmR0Get64bitCR3();
738dd1949fabecbe3a63d62def16a5d521e85911hyw rc |= VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh else
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uReg = ASMGetCR3();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh uReg = ASMGetCR4();
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * the host-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewhDECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh{
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh int rc = VERR_INTERNAL_ERROR_5;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTSEL uSelCS = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTSEL uSelSS = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTSEL uSelDS = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTSEL uSelES = 0;
738dd1949fabecbe3a63d62def16a5d521e85911hyw RTSEL uSelFS = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTSEL uSelGS = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTSEL uSelTR = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Host Selector registers.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (HMVMX_IS_64BIT_HOST_MODE())
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uSelCS = ASMGetCS();
1039f409262fcc394c002cfbadf60149156d2bcbav uSelSS = ASMGetSS();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uSelTR = ASMGetTR();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** @todo Verify if we have any platform that actually run with DS or ES with
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * RPL != 0 in kernel space. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(uSelCS != 0);
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(uSelTR != 0);
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Assertion is right but we would not have updated u32ExitCtls yet. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain#if 0
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE))
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(uSelSS != 0);
601c2e1ed5ec8de33296fed3938598da99915e7adhain#endif
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Write these host selector fields into the host-state area in the VMCS. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */
601c2e1ed5ec8de33296fed3938598da99915e7adhain#if 0
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS);
601c2e1ed5ec8de33296fed3938598da99915e7adhain#endif
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR);
601c2e1ed5ec8de33296fed3938598da99915e7adhain AssertRCReturn(rc, rc);
601c2e1ed5ec8de33296fed3938598da99915e7adhain
601c2e1ed5ec8de33296fed3938598da99915e7adhain /*
601c2e1ed5ec8de33296fed3938598da99915e7adhain * Host GDTR and IDTR.
601c2e1ed5ec8de33296fed3938598da99915e7adhain */
601c2e1ed5ec8de33296fed3938598da99915e7adhain /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should
601c2e1ed5ec8de33296fed3938598da99915e7adhain * be safe to -not- save and restore GDTR and IDTR in the assembly
601c2e1ed5ec8de33296fed3938598da99915e7adhain * code and just do it here and don't care if the limits are zapped on
601c2e1ed5ec8de33296fed3938598da99915e7adhain * VM-exit. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain RTGDTR Gdtr;
601c2e1ed5ec8de33296fed3938598da99915e7adhain RT_ZERO(Gdtr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (HMVMX_IS_64BIT_HOST_MODE())
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain X86XDTR64 Gdtr64;
601c2e1ed5ec8de33296fed3938598da99915e7adhain X86XDTR64 Idtr64;
601c2e1ed5ec8de33296fed3938598da99915e7adhain hmR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain Gdtr.cbGdt = Gdtr64.cb;
601c2e1ed5ec8de33296fed3938598da99915e7adhain Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain else
601c2e1ed5ec8de33296fed3938598da99915e7adhain#endif
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain RTIDTR Idtr;
601c2e1ed5ec8de33296fed3938598da99915e7adhain ASMGetGDTR(&Gdtr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain ASMGetIDTR(&Idtr);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
738dd1949fabecbe3a63d62def16a5d521e85911hyw * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit.TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VERR_VMX_INVALID_HOST_STATE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (HMVMX_IS_64BIT_HOST_MODE())
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We need the 64-bit TR base for hybrid darwin. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, u64TRBase);
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
601c2e1ed5ec8de33296fed3938598da99915e7adhain else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uintptr_t uTRBase;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#if HC_ARCH_BITS == 64
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uTRBase = X86DESC64_BASE(pDesc);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw#else
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw uTRBase = X86DESC_BASE(pDesc);
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw#endif
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertRCReturn(rc, rc);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /*
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * Host FS base and GS base.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which .
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * would take care of the bases. In 64-bit, the MSRs come into play.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (HMVMX_IS_64BIT_HOST_MODE())
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw {
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_FS_BASE, u64FSBase);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_GS_BASE, u64GSBase);
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl AssertRCReturn(rc, rc);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh#endif
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh return rc;
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw/**
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * host-state area of the VMCS. Theses MSRs will be automatically restored on
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * the host after every successful VM exit.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw *
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @returns VBox status code.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pVM Pointer to the VM.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pVCpu Pointer to the VMCPU.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hywDECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw{
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw AssertPtr(pVCpu);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw int rc = VINF_SUCCESS;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw uint32_t cHostMsrs = 0;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pHostMsr->u32IndexMSR = MSR_K6_EFER;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pHostMsr->u32Reserved = 0;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (CPUMIsGuestInLongMode(pVCpu))
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Must match the EFER value in our 64 bits switcher. */
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh else
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh# endif
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr++; cHostMsrs++;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (HMVMX_IS_64BIT_HOST_MODE())
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh {
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u32IndexMSR = MSR_K6_STAR;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u32Reserved = 0;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr++; cHostMsrs++;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u32Reserved = 0;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr++; cHostMsrs++;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u32Reserved = 0;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr++; cHostMsrs++;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u32Reserved = 0;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pHostMsr++; cHostMsrs++;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw# endif
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw }
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Host Sysenter MSRs.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (HMVMX_IS_64BIT_HOST_MODE())
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw }
738dd1949fabecbe3a63d62def16a5d521e85911hyw else
738dd1949fabecbe3a63d62def16a5d521e85911hyw {
738dd1949fabecbe3a63d62def16a5d521e85911hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh }
738dd1949fabecbe3a63d62def16a5d521e85911hyw# elif HC_ARCH_BITS == 32
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
738dd1949fabecbe3a63d62def16a5d521e85911hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# else
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl# endif
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw AssertRCReturn(rc, rc);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * hmR0VmxSetupExitCtls() !! */
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw return rc;
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw}
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw/**
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw * Sets up VM-entry controls in the VMCS. These controls can affect things done
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * controls".
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav PVM pVM = pVCpu->CTX_SUFF(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
1039f409262fcc394c002cfbadf60149156d2bcbav val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (CPUMIsGuestInLongModeEx(pMixedCtx))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * The following should not be set (since we're not in SMM mode):
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * - VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * - VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** @todo VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if ((val & zap) != val)
738dd1949fabecbe3a63d62def16a5d521e85911hyw {
738dd1949fabecbe3a63d62def16a5d521e85911hyw LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
738dd1949fabecbe3a63d62def16a5d521e85911hyw pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
738dd1949fabecbe3a63d62def16a5d521e85911hyw return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, val);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Update VCPU with the currently set VM-exit controls. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.u32EntryCtls = val;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Sets up the VM-exit controls in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks requires EFER.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PVM pVM = pVCpu->CTX_SUFF(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (HMVMX_IS_64BIT_HOST_MODE())
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (CPUMIsGuestInLongModeEx(pMixedCtx))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /** @todo VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_PERF_MSR,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR,
738dd1949fabecbe3a63d62def16a5d521e85911hyw * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR,
738dd1949fabecbe3a63d62def16a5d521e85911hyw * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR. */
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
738dd1949fabecbe3a63d62def16a5d521e85911hyw val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
738dd1949fabecbe3a63d62def16a5d521e85911hyw if ((val & zap) != val)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
738dd1949fabecbe3a63d62def16a5d521e85911hyw return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
738dd1949fabecbe3a63d62def16a5d521e85911hyw }
738dd1949fabecbe3a63d62def16a5d521e85911hyw
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, val);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Update VCPU with the currently set VM-exit controls. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32ExitCtls = val;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Loads the guest APIC and related state.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlDECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams int rc = VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams {
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fPendingIntr = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint8_t u8GuestTpr = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = PDMApicGetTPR(pVCpu, &u8GuestTpr, &fPendingIntr);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * If there are external interrupts pending but masked by the TPR value, apply the threshold so that if the guest
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * lowers the TPR, it would cause a VM-exit and we can deliver the interrupt.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * the interrupt when we VM-exit for other reasons.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t u32TprThreshold = fPendingIntr ? (u8GuestTpr >> 4) : 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* EFER always up-to-date. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pMixedCtx->msrLSTAR = u8GuestTpr;
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams if (fPendingIntr)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams else
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh *
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * @returns
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * @param pVCpu Pointer to the VMCPU.
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * @param pMixedCtx Pointer to the guest-CPU context. The data may be
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * out-of-sync. Make sure to update the required fields
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * before using them.
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan AdamsDECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * inhibit interrupts or clear any existing interrupt-inhibition.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t uIntrState = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
56f33205c9ed776c3c909e07d52e94610a675740Jonathan Adams AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VT-x the flag's condition to be cleared is met and thus the cleared state is correct.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pMixedCtx->eflags.Bits.u1IF)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return uIntrState;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Loads the guest's interruptibility-state into the guest-state area in the
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param uIntrState The interruptibility-state to set.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Loads the guest's RIP into the guest-state area in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh int rc = VINF_SUCCESS;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Loads the guest's RSP into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertRCReturn(rc, rc);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Loads the guest's RFLAGS into the guest-state area in the VMCS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCpu Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * out-of-sync. Make sure to update the required fields
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * before using them.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @remarks No-long-jump zone!!!
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh int rc = VINF_SUCCESS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Let us assert it as such and use 32-bit VMWRITE. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->rflags.u64 >> 32));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav X86EFLAGS uEFlags = pMixedCtx->eflags;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uEFlags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav uEFlags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav /*
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav {
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.RealMode.eflags.u32 = uEFlags.u32; /* Save the original eflags of the real-mode guest. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, uEFlags.u32);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav PVM pVM = pVCpu->CTX_SUFF(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest CR0.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest FPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pCtx->cr0 >> 32));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32GuestCR0 = pCtx->cr0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* The guest's view (read access) of its CR0 is unblemished. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Setup VT-x's view of the guest CR0. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.fNestedPaging)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (CPUMIsGuestPagingEnabledEx(pCtx))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
601c2e1ed5ec8de33296fed3938598da99915e7adhain }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
601c2e1ed5ec8de33296fed3938598da99915e7adhain rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a VM-exit. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest FPU bits.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
601c2e1ed5ec8de33296fed3938598da99915e7adhain */
601c2e1ed5ec8de33296fed3938598da99915e7adhain u32GuestCR0 |= X86_CR0_NE;
601c2e1ed5ec8de33296fed3938598da99915e7adhain bool fInterceptNM = false;
601c2e1ed5ec8de33296fed3938598da99915e7adhain if (CPUMIsGuestFPUStateActive(pVCpu))
601c2e1ed5ec8de33296fed3938598da99915e7adhain {
601c2e1ed5ec8de33296fed3938598da99915e7adhain fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
601c2e1ed5ec8de33296fed3938598da99915e7adhain /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
601c2e1ed5ec8de33296fed3938598da99915e7adhain We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Catch floating point exceptions if we need to report them to the guest in a different way. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh bool fInterceptMF = false;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (!(pCtx->cr0 & X86_CR0_NE))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl fInterceptMF = true;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(PDMVmmDevHeapIsEnabled(pVM));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVM->hm.s.vmx.pRealModeTSS);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl fInterceptNM = true;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl fInterceptMF = true;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (fInterceptNM)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh else
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (fInterceptMF)
738dd1949fabecbe3a63d62def16a5d521e85911hyw pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* Additional intercepts for debugging, define these yourself explicitly. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_BP)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | RT_BIT(X86_XCPT_DB)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | RT_BIT(X86_XCPT_DE)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | RT_BIT(X86_XCPT_NM)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | RT_BIT(X86_XCPT_UD)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh | RT_BIT(X86_XCPT_NP)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh | RT_BIT(X86_XCPT_SS)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh | RT_BIT(X86_XCPT_GP)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh | RT_BIT(X86_XCPT_PF)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh | RT_BIT(X86_XCPT_MF);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh#elif defined(HMVMX_ALWAYS_TRAP_PF)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR0 |= uSetCR0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR0 &= uZapCR0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint64_t u64CR0Mask = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u64CR0Mask = X86_CR0_PE
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_NE
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_WP
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_PG
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (pVM->hm.s.vmx.fUnrestrictedGuest)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u64CR0Mask &= ~X86_CR0_PE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVM->hm.s.fNestedPaging)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u64CR0Mask &= ~X86_CR0_WP;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (fInterceptNM)
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh u64CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u64CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.cr0_mask = u64CR0Mask;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64CR0Mask);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest CR2.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * It's always loaded in the assembler code. Nothing to do here.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest CR3.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVM->hm.s.fNestedPaging)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
68ac2337c38c8af06edcf32a72e42de36ec72a9djl | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl
68ac2337c38c8af06edcf32a72e42de36ec72a9djl /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
68ac2337c38c8af06edcf32a72e42de36ec72a9djl
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl AssertRCReturn(rc, rc);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl Log(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
68ac2337c38c8af06edcf32a72e42de36ec72a9djl
68ac2337c38c8af06edcf32a72e42de36ec72a9djl if ( pVM->hm.s.vmx.fUnrestrictedGuest
68ac2337c38c8af06edcf32a72e42de36ec72a9djl || CPUMIsGuestPagingEnabledEx(pCtx))
68ac2337c38c8af06edcf32a72e42de36ec72a9djl {
68ac2337c38c8af06edcf32a72e42de36ec72a9djl /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
68ac2337c38c8af06edcf32a72e42de36ec72a9djl if (CPUMIsGuestInPAEModeEx(pCtx))
68ac2337c38c8af06edcf32a72e42de36ec72a9djl {
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl AssertRCReturn(rc, rc);
68ac2337c38c8af06edcf32a72e42de36ec72a9djl }
68ac2337c38c8af06edcf32a72e42de36ec72a9djl
68ac2337c38c8af06edcf32a72e42de36ec72a9djl /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
68ac2337c38c8af06edcf32a72e42de36ec72a9djl have Unrestricted Execution to handle the guest when it's not using paging. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl GCPhysGuestCR3 = pCtx->cr3;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * EPT takes care of translating it to host-physical addresses.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTGCPHYS GCPhys;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(PDMVmmDevHeapIsEnabled(pVM));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We obtain it here every time as the guest could have relocated this PCI region. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl GCPhysGuestCR3 = GCPhys;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Non-nested paging case, just use the hypervisor's CR3. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl GCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("Load: VMX_VMCS_GUEST_CR3=%#RGv\n", GCPhysGuestCR3));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest CR4.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
601c2e1ed5ec8de33296fed3938598da99915e7adhain Assert(!(pCtx->cr4 >> 32));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t u32GuestCR4 = pCtx->cr4;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* The guest's view of its CR4 is unblemished. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Log(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Setup VT-x's view of the guest CR4. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVM->hm.s.vmx.pRealModeTSS);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(PDMVmmDevHeapIsEnabled(pVM));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32GuestCR4 &= ~X86_CR4_VME;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (pVM->hm.s.fNestedPaging)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
738dd1949fabecbe3a63d62def16a5d521e85911hyw if ( !CPUMIsGuestPagingEnabledEx(pCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav && !pVM->hm.s.vmx.fUnrestrictedGuest)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR4 |= X86_CR4_PSE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Our identity mapping is a 32 bits page directory. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR4 &= ~X86_CR4_PAE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* else use guest CR4.*/
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl switch (pVCpu->hm.s.enmShadowMode)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl case PGMMODE_REAL: /* Real-mode. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl case PGMMODE_PROTECTED: /* Protected mode without paging. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl case PGMMODE_32_BIT: /* 32-bit paging. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR4 &= ~X86_CR4_PAE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl break;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case PGMMODE_PAE: /* PAE paging. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav case PGMMODE_PAE_NX: /* PAE paging with NX. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32GuestCR4 |= X86_CR4_PAE;
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav break;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_ENABLE_64_BITS_GUESTS
25cf1a301a396c38e8adf52c15f537b80d2483f7jl break;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh default:
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertFailed();
25cf1a301a396c38e8adf52c15f537b80d2483f7jl return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR4 |= uSetCR4;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32GuestCR4 &= uZapCR4;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav /* Write VT-x's view of the guest CR4 into the VMCS. */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav Log(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint64_t u64CR4Mask = 0;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u64CR4Mask = X86_CR4_VME
25cf1a301a396c38e8adf52c15f537b80d2483f7jl | X86_CR4_PAE
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl | X86_CR4_PGE
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl | X86_CR4_PSE
0cc8ae8667155d352d327b5c92b62899a7e05bcdav | X86_CR4_VMXE;
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pVCpu->hm.s.vmx.cr4_mask = u64CR4Mask;
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64CR4Mask);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl }
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl return rc;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki * Loads the guest debug registers into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * This also sets up whether #DB and MOV DRx accesses cause VM exits.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * @returns VBox status code.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * @param pVCpu Pointer to the VMCPU.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * out-of-sync. Make sure to update the required fields
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * before using them.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl *
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki * @remarks No-long-jump zone!!!
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jlstatic int hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl{
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl return VINF_SUCCESS;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_STRICT
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pMixedCtx->dr[7] >> 32)); /* upper 32 bits are reserved (MBZ). */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert((pMixedCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert((pMixedCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VERR_INTERNAL_ERROR_5;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav PVM pVM = pVCpu->CTX_SUFF(pVM);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fInterceptDB = false;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl bool fInterceptMovDRx = false;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (DBGFIsStepping(pVCpu))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(fInterceptDB == false);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav fInterceptDB = true;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!CPUMIsHyperDebugStateActive(pVCpu))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRC(rc);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(CPUMIsHyperDebugStateActive(pVCpu));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl fInterceptMovDRx = true;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!CPUMIsGuestDebugStateActive(pVCpu))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(CPUMIsGuestDebugStateActive(pVCpu));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(fInterceptMovDRx == false);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (!CPUMIsGuestDebugStateActive(pVCpu))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl fInterceptMovDRx = true;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (fInterceptDB)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#endif
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw }
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (fInterceptMovDRx)
738dd1949fabecbe3a63d62def16a5d521e85911hyw pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh else
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* The guest's view of its DR7 is unblemished. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw return rc;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl}
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#ifdef VBOX_STRICT
25cf1a301a396c38e8adf52c15f537b80d2483f7jl/**
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Strict function to validate segment registers.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jlstatic void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
738dd1949fabecbe3a63d62def16a5d521e85911hyw{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if ( !pVM->hm.s.vmx.fUnrestrictedGuest
25cf1a301a396c38e8adf52c15f537b80d2483f7jl && ( !CPUMIsGuestInRealModeEx(pCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl && !CPUMIsGuestInV86ModeEx(pCtx)))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Protected mode checks */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* CS */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->cs.Attr.n.u1Present);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->cs.Attr.u & 0xf00));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || !(pCtx->cs.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( !(pCtx->cs.u32Limit & 0xfff00000)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->cs.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->cs.Attr.u && pCtx->cs.Attr.u != HMVMX_SEL_UNUSABLE); /* CS cannot be loaded with NULL in protected mode. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* SS */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->ss.Attr.u && pCtx->ss.Attr.u != HMVMX_SEL_UNUSABLE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ss.Attr.n.u1Present);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->ss.Attr.u & 0xf00));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || !(pCtx->ss.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( !(pCtx->ss.u32Limit & 0xfff00000)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->ss.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* CR0 might not be up-to-date here always, hence disabled. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#if 0
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (!pCtx->cr0 & X86_CR0_PE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!pCtx->ss.Attr.n.u2Dpl);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->ds.Attr.u && pCtx->ds.Attr.u != HMVMX_SEL_UNUSABLE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ds.Attr.n.u1Present);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(!(pCtx->ds.Attr.u & 0xf00));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || !(pCtx->ds.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( !(pCtx->ds.u32Limit & 0xfff00000)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->ds.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->es.Attr.u && pCtx->es.Attr.u != HMVMX_SEL_UNUSABLE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->es.Attr.n.u1Present);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->es.Attr.u & 0xf00));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->es.Attr.u & 0xfffe0000));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh || !(pCtx->es.Attr.n.u1Granularity));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert( !(pCtx->es.u32Limit & 0xfff00000)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->es.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (pCtx->fs.Attr.u && pCtx->fs.Attr.u != HMVMX_SEL_UNUSABLE)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->fs.Attr.n.u1Present);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(!(pCtx->fs.Attr.u & 0xf00));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh || !(pCtx->fs.Attr.n.u1Granularity));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert( !(pCtx->fs.u32Limit & 0xfff00000)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->fs.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pCtx->gs.Attr.u && pCtx->gs.Attr.u != HMVMX_SEL_UNUSABLE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(pCtx->gs.Attr.n.u1Present);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(!(pCtx->gs.Attr.u & 0xf00));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw || !(pCtx->gs.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( !(pCtx->gs.u32Limit & 0xfff00000)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->gs.Attr.n.u1Granularity));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh /* 64-bit capable CPUs. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->cs.u64Base >> 32));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl# endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else if ( CPUMIsGuestInV86ModeEx(pCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl || ( CPUMIsGuestInRealModeEx(pCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl && !pVM->hm.s.vmx.fUnrestrictedGuest))
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* Real and v86 mode checks. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl else
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* CS */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->cs.u32Limit == 0xffff);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(u32CSAttr == 0xf3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* SS */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ss.u32Limit == 0xffff);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(u32SSAttr == 0xf3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* DS */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh Assert(pCtx->ds.u32Limit == 0xffff);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(u32DSAttr == 0xf3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* ES */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->es.u32Limit == 0xffff);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(u32ESAttr == 0xf3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* FS */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->fs.u32Limit == 0xffff);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(u32FSAttr == 0xf3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* GS */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pCtx->gs.u32Limit == 0xffff);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(u32GSAttr == 0xf3);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* 64-bit capable CPUs. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!(pCtx->cs.u64Base >> 32));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw# endif
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw }
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw}
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#endif /* VBOX_STRICT */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw/**
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * Writes a guest segment register into the guest-state area in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw *
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @returns VBox status code.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pVCpu Pointer to the VMCPU.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param idxSel Index of the selector in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param idxLimit Index of the segment limit in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param idxBase Index of the segment base in the VMCS.
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh * @param idxAccess Index of the access rights of the segment in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pSelReg Pointer to the segment selector.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @param pCtx Pointer to the guest-CPU context.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw *
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * @remarks No-long-jump zone!!!
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hywstatic int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw{
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw int rc;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw AssertRCReturn(rc, rc);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw uint32_t u32Access = pSelReg->Attr.u;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl u32Access = 0xf3;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw else
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /*
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * loaded in protected-mode have their attribute as 0.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if (!u32Access)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw u32Access = HMVMX_SEL_UNUSABLE;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw }
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertMsg((u32Access == HMVMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh AssertRCReturn(rc, rc);
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh return rc;
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh}
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw/**
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw * into the guest-state area in the VMCS.
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw *
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @returns VBox status code.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVM Pointer to the VM.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pVCPU Pointer to the VMCPU.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
738dd1949fabecbe3a63d62def16a5d521e85911hyw * out-of-sync. Make sure to update the required fields
738dd1949fabecbe3a63d62def16a5d521e85911hyw * before using them.
738dd1949fabecbe3a63d62def16a5d521e85911hyw *
738dd1949fabecbe3a63d62def16a5d521e85911hyw * @remarks No-long-jump zone!!!
738dd1949fabecbe3a63d62def16a5d521e85911hyw */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakkistatic int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl{
25cf1a301a396c38e8adf52c15f537b80d2483f7jl int rc = VERR_INTERNAL_ERROR_5;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl PVM pVM = pVCpu->CTX_SUFF(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest Segment registers: CS, SS, DS, ES, FS, GS.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
738dd1949fabecbe3a63d62def16a5d521e85911hyw }
738dd1949fabecbe3a63d62def16a5d521e85911hyw
738dd1949fabecbe3a63d62def16a5d521e85911hyw#ifdef VBOX_WITH_REM
738dd1949fabecbe3a63d62def16a5d521e85911hyw if (!pVM->hm.s.vmx.fUnrestrictedGuest)
738dd1949fabecbe3a63d62def16a5d521e85911hyw {
738dd1949fabecbe3a63d62def16a5d521e85911hyw Assert(pVM->hm.s.vmx.pRealModeTSS);
738dd1949fabecbe3a63d62def16a5d521e85911hyw AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw if ( pVCpu->hm.s.vmx.fWasInRealMode
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw {
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw in real-mode (e.g. OpenBSD 4.0) */
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw REMFlushTBs(pVM);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw Log(("Load: Switch to protected mode detected!\n"));
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw pVCpu->hm.s.vmx.fWasInRealMode = false;
37afe445f2ac4e360ddb647505aa7deb929fe5e3hyw }
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw }
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki#endif
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
25cf1a301a396c38e8adf52c15f537b80d2483f7jl VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw#ifdef VBOX_STRICT
cfb9e06246189a19958ae6c1a6f3bcb07f06c191hyw hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
25cf1a301a396c38e8adf52c15f537b80d2483f7jl#endif
25cf1a301a396c38e8adf52c15f537b80d2483f7jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
25cf1a301a396c38e8adf52c15f537b80d2483f7jl }
25cf1a301a396c38e8adf52c15f537b80d2483f7jl
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Guest TR.
25cf1a301a396c38e8adf52c15f537b80d2483f7jl */
25cf1a301a396c38e8adf52c15f537b80d2483f7jl if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
25cf1a301a396c38e8adf52c15f537b80d2483f7jl {
25cf1a301a396c38e8adf52c15f537b80d2483f7jl /*
25cf1a301a396c38e8adf52c15f537b80d2483f7jl * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint16_t u16Sel = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32Limit = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint64_t u64Base = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32AccessRights = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u16Sel = pMixedCtx->tr.Sel;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32Limit = pMixedCtx->tr.u32Limit;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u64Base = pMixedCtx->tr.u64Base;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32AccessRights = pMixedCtx->tr.Attr.u;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVM->hm.s.vmx.pRealModeTSS);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTGCPHYS GCPhys;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav X86DESCATTR DescAttr;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav DescAttr.u = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav DescAttr.n.u1Present = 1;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u16Sel = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32Limit = HM_VTX_TSS_SIZE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u64Base = GCPhys; /* in real-mode phys = virt. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32AccessRights = DescAttr.u;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Validate. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(u16Sel & RT_BIT(2)));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl Assert( (u32Limit & 0xfff) == 0xfff
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel);
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest GDTR.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->gdtr.cbGdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest LDTR.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32Access = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (!pMixedCtx->ldtr.Attr.u)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32Access = HMVMX_SEL_UNUSABLE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav u32Access = pMixedCtx->ldtr.Attr.u;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
feb5832b942e462df2101f763387f2416fb45f84Mary Beale AssertRCReturn(rc, rc);
feb5832b942e462df2101f763387f2416fb45f84Mary Beale
feb5832b942e462df2101f763387f2416fb45f84Mary Beale /* Validate. */
feb5832b942e462df2101f763387f2416fb45f84Mary Beale if (!(u32Access & HMVMX_SEL_UNUSABLE))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
0cc8ae8667155d352d327b5c92b62899a7e05bcdav || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest IDTR.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt);
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(pMixedCtx->idtr.cbIdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
056c948b50f079598d6121c0aeabf1de50fabd4etsien}
056c948b50f079598d6121c0aeabf1de50fabd4etsien
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl/**
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * areas. These MSRs will automatically be loaded to the host CPU on every
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * successful VM entry and stored from the host CPU on every successful VM exit.
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * Also loads the sysenter MSRs into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @returns VBox status code.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pVCpu Pointer to the VMCPU.
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertPtr(pVCpu);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
0cc8ae8667155d352d327b5c92b62899a7e05bcdav PVM pVM = pVCpu->CTX_SUFF(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t cGuestMsrs = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav const bool fSupportsNX = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav const bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (fSupportsNX || fSupportsLongMode)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** @todo support save IA32_EFER, i.e.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR, in which case the
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * guest EFER need not be part of the VM-entry MSR-load area. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u32IndexMSR = MSR_K6_EFER;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u32Reserved = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u64Value = pMixedCtx->msrEFER;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (!CPUMIsGuestInLongModeEx(pMixedCtx))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr++; cGuestMsrs++;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (fSupportsLongMode)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u32Reserved = 0;
1039f409262fcc394c002cfbadf60149156d2bcbav pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
1039f409262fcc394c002cfbadf60149156d2bcbav pGuestMsr++; cGuestMsrs++;
1039f409262fcc394c002cfbadf60149156d2bcbav pGuestMsr->u32IndexMSR = MSR_K6_STAR;
1039f409262fcc394c002cfbadf60149156d2bcbav pGuestMsr->u32Reserved = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr++; cGuestMsrs++;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u32Reserved = 0;
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pGuestMsr++; cGuestMsrs++;
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pGuestMsr->u32Reserved = 0;
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav pGuestMsr++; cGuestMsrs++;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr->u32Reserved = 0;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pGuestMsr++; cGuestMsrs++;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
c964b0e6c778331eb72036bb4607ce574c2500a2raghuram return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Update the VCPU's copy of the guest MSR count. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /*
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Guest Sysenter MSRs.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VM-exits on WRMSRs for these MSRs.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl }
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl
78ed97a7b79b59ef2ef41f190c9be35c54d90119jl/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Loads the guest activity state into the guest-state area in the VMCS.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /** @todo See if we can make use of other states, e.g.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRCReturn(rc, rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Sets up the appropriate function to run guest code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pMixedCtx Pointer to the guest-CPU context. The data may be
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * out-of-sync. Make sure to update the required fields
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * before using them.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (CPUMIsGuestInLongModeEx(pMixedCtx))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifndef VBOX_ENABLE_64_BITS_GUESTS
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* 64-bit host or hybrid host. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Guest is not in long mode, use the 32-bit handler. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVCpu->hm.s.vmx.pfnStartVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return VINF_SUCCESS;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * Wrapper for running the guest code in VT-x.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox strict status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pCtx Pointer to the guest-CPU context.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @remarks No-long-jump zone!!!
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh */
0b240fcdeb4772e65fed050aee3e3dc63308ae72whDECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav /*
aeb241b2bcd5321bd5e76ba2b5c9a8370d81a6deav * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_WITH_KERNEL_USING_XMM
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Report world-switch error and dump some useful debug info.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pCtx Pointer to the guest-CPU context.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVmxTransient Pointer to the VMX transient structure (only
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * exitReason updated).
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVCpu);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pCtx);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VM-entry failure: %Rrc\n", rcVMRun));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav switch (rcVMRun)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VERR_VMX_INVALID_VMXON_PTR:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertFailed();
0cc8ae8667155d352d327b5c92b62899a7e05bcdav break;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.lasterror.u32ExitReason);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_STRICT
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("uExitReason %#x (VmxTransient %#x)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pVmxTransient->uExitReason));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Exit Qualification %#x\n", pVmxTransient->uExitQualification));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("InstrError %#x\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav else
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* VMX control bits. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t u32Val;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint64_t u64Val;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav HMVMXHCUINTREG uHCReg;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2 %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &u32Val); AssertRC(rc);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki Log(("VMX_VMCS32_CTRL_ENTRY_CONTROLS %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_EXIT_CONTROLS %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
1039f409262fcc394c002cfbadf60149156d2bcbav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
1039f409262fcc394c002cfbadf60149156d2bcbav Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm /* Guest bits. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTGCUINTREG uGCReg;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uGCReg); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Old Guest Rip %#RGv New %#RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)uGCReg));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uGCReg); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Old Guest Rsp %#RGv New %#RGv\n", (RTGCPTR)pCtx->rsp, (RTGCPTR)uGCReg));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Log(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh /* Host bits. */
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host CR0 %#RHr\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host CR3 %#RHr\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host CR4 %#RHr\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh RTGDTR HostGdtr;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh PCX86DESCHC pDesc;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh ASMGetGDTR(&HostGdtr);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host CS %#08x\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (u32Val < HostGdtr.cbGdt)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host DS %#08x\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (u32Val < HostGdtr.cbGdt)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host ES %#08x\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (u32Val < HostGdtr.cbGdt)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host FS %#08x\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (u32Val < HostGdtr.cbGdt)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host GS %#08x\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (u32Val < HostGdtr.cbGdt)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host SS %#08x\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (u32Val < HostGdtr.cbGdt)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host TR %#08x\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (u32Val < HostGdtr.cbGdt)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host TR Base %#RHv\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host GDTR Base %#RHv\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host IDTR Base %#RHv\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host SYSENTER CS %#08x\n", u32Val));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host SYSENTER EIP %#RHv\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host SYSENTER ESP %#RHv\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host RSP %#RHv\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("Host RIP %#RHv\n", uHCReg));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh if (HMVMX_IS_64BIT_HOST_MODE())
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh {
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh Log(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh# endif
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh#endif /* VBOX_STRICT */
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh break;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh default:
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh /* Impossible */
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh break;
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh }
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh NOREF(pVM);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifndef VMX_USE_CACHED_VMCS_ACCESSES
0cc8ae8667155d352d327b5c92b62899a7e05bcdav# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_STRICT
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic bool hmR0VmxIsValidWriteField(uint32_t idxField)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm switch (idxField)
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm {
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm case VMX_VMCS_GUEST_RIP:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_RSP:
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm case VMX_VMCS_GUEST_DR7:
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm case VMX_VMCS_GUEST_SYSENTER_EIP:
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm case VMX_VMCS_GUEST_SYSENTER_ESP:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_GDTR_BASE:
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm case VMX_VMCS_GUEST_IDTR_BASE:
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm case VMX_VMCS_GUEST_CS_BASE:
ad59b69d8d8982a0588be30a82c602e8cb5c67fbbm case VMX_VMCS_GUEST_DS_BASE:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_ES_BASE:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_FS_BASE:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_GS_BASE:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_SS_BASE:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_LDTR_BASE:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_TR_BASE:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_GUEST_CR3:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return true;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return false;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdavstatic bool hmR0VmxIsValidReadField(uint32_t idxField)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav switch (idxField)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Read-only fields. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav case VMX_VMCS_RO_EXIT_QUALIFICATION:
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return true;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* All readable fields should also be part of the VMCS write cache. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return hmR0VmxIsValidWriteField(idxField);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif /* VBOX_STRICT */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Executes the specified handler in 64-bit mode.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVM Pointer to the VM.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pCtx Pointer to the guest CPU context.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param enmOp The operation to perform.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param cbParam Number of parameters.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param paParam Array of 32-bit parameters.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavVMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t *paParam)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav int rc, rc2;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav PHMGLOBLCPUINFO pCpu;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTHCPHYS HCPhysCpuPage;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTCCUINTREG uOldEFlags;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
d8a0cca90e67ec5d7e06686e9747f250a1b7d14ewh
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_STRICT
0cc8ae8667155d352d327b5c92b62899a7e05bcdav for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Disable interrupts. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uOldEFlags = ASMIntDisableFlags();
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
0cc8ae8667155d352d327b5c92b62899a7e05bcdav RTCPUID idHostCpu = RTMpCpuId();
0cc8ae8667155d352d327b5c92b62899a7e05bcdav CPUMR0SetLApic(pVM, idHostCpu);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav#endif
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav pCpu = HMR0GetCurrentCpu();
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Leave VMX Root Mode. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki VMXDisable();
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki
0cc8ae8667155d352d327b5c92b62899a7e05bcdav CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav CPUMSetHyperEIP(pVCpu, enmOp);
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki for (int i = (int)cbParam - 1; i >= 0; i--)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav CPUMPushHyper(pVCpu, paParam[i]);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Call the switcher. */
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
0cc8ae8667155d352d327b5c92b62899a7e05bcdav STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
07d06da50d310a325b457d6330165aebab1e0064Surya Prakki /** @todo replace with hmR0VmxEnterRootMode() and LeaveRootMode(). */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Make sure the VMX instructions don't cause #UD faults. */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav /* Re-enter VMX Root Mode */
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc2 = VMXEnable(HCPhysCpuPage);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav if (RT_FAILURE(rc2))
0cc8ae8667155d352d327b5c92b62899a7e05bcdav {
0cc8ae8667155d352d327b5c92b62899a7e05bcdav ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav ASMSetFlags(uOldEFlags);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc2;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav }
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh AssertRC(rc2);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav Assert(!(ASMGetFlags() & X86_EFL_IF));
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh ASMSetFlags(uOldEFlags);
0cc8ae8667155d352d327b5c92b62899a7e05bcdav return rc;
0cc8ae8667155d352d327b5c92b62899a7e05bcdav}
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav
0cc8ae8667155d352d327b5c92b62899a7e05bcdav/**
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * supporting 64-bit guests.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav *
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @returns VBox status code.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param fResume Whether to VMLAUNCH or VMRESUME.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav * @param pCtx Pointer to the guest-CPU context.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pCache Pointer to the VMCS cache.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pVM Pointer to the VM.
0b240fcdeb4772e65fed050aee3e3dc63308ae72wh * @param pVCpu Pointer to the VMCPU.
0cc8ae8667155d352d327b5c92b62899a7e05bcdav */
0cc8ae8667155d352d327b5c92b62899a7e05bcdavDECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
0cc8ae8667155d352d327b5c92b62899a7e05bcdav{
0cc8ae8667155d352d327b5c92b62899a7e05bcdav uint32_t aParam[6];
PHMGLOBLCPUINFO pCpu = NULL;
RTHCPHYS HCPhysCpuPage = 0;
int rc = VERR_INTERNAL_ERROR_5;
AssertReturn(pVM->hm.s.pfnVMXGCStartVM64, VERR_HM_IPE_5);
pCpu = HMR0GetCurrentCpu();
HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
pCache->uPos = 1;
pCache->interPD = PGMGetInterPaeCR3(pVM);
pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
#endif
#ifdef VBOX_STRICT
pCache->TestIn.HCPhysCpuPage = 0;
pCache->TestIn.HCPhysVmcs = 0;
pCache->TestIn.pCache = 0;
pCache->TestOut.HCPhysVmcs = 0;
pCache->TestOut.pCache = 0;
pCache->TestOut.pCtx = 0;
pCache->TestOut.eflags = 0;
#endif
aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
aParam[5] = 0;
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
#endif
rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
Assert(pCtx->dr[4] == 10);
*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
#endif
#ifdef VBOX_STRICT
AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
pVCpu->hm.s.vmx.HCPhysVmcs));
AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
pCache->TestOut.HCPhysVmcs));
AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
pCache->TestOut.pCache));
AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
pCache->TestOut.pCtx));
Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
#endif
return rc;
}
/**
* Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
* running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
* 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
* 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
{
#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
{ \
Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
++cReadFields; \
}
AssertPtr(pVM);
AssertPtr(pVCpu);
PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
uint32_t cReadFields = 0;
/* Guest-natural selector base fields */
#if 0
/* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
#endif
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DR7);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
#if 0
/* Unused natural width guest-state fields. */
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
#endif
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
/* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
#if 0
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
#endif
/* Natural width guest-state fields. */
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
#if 0
/* Currently unused field. */
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
#endif
if (pVM->hm.s.fNestedPaging)
{
VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
}
else
{
AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
}
#undef VMXLOCAL_INIT_READ_CACHE_FIELD
return VINF_SUCCESS;
}
/**
* Writes a field into the VMCS. This can either directly invoke a VMWRITE or
* queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
* darwin, running 64-bit guests).
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param idxField The VMCS field encoding.
* @param u64Val 16, 32 or 64 bits value.
*/
VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
{
int rc;
switch (idxField)
{
/*
* These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
*/
/* 64-bit Control fields. */
case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
case VMX_VMCS64_CTRL_EPTP_FULL:
case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
/* 64-bit Guest-state fields. */
case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
case VMX_VMCS64_GUEST_PAT_FULL:
case VMX_VMCS64_GUEST_EFER_FULL:
case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
case VMX_VMCS64_GUEST_PDPTE0_FULL:
case VMX_VMCS64_GUEST_PDPTE1_FULL:
case VMX_VMCS64_GUEST_PDPTE2_FULL:
case VMX_VMCS64_GUEST_PDPTE3_FULL:
/* 64-bit Host-state fields. */
case VMX_VMCS64_HOST_FIELD_PAT_FULL:
case VMX_VMCS64_HOST_FIELD_EFER_FULL:
case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
{
rc = VMXWriteVmcs32(idxField, u64Val);
rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
break;
}
/*
* These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
* values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
*/
/* Natural-width Guest-state fields. */
case VMX_VMCS_GUEST_CR3:
case VMX_VMCS_GUEST_ES_BASE:
case VMX_VMCS_GUEST_CS_BASE:
case VMX_VMCS_GUEST_SS_BASE:
case VMX_VMCS_GUEST_DS_BASE:
case VMX_VMCS_GUEST_FS_BASE:
case VMX_VMCS_GUEST_GS_BASE:
case VMX_VMCS_GUEST_LDTR_BASE:
case VMX_VMCS_GUEST_TR_BASE:
case VMX_VMCS_GUEST_GDTR_BASE:
case VMX_VMCS_GUEST_IDTR_BASE:
case VMX_VMCS_GUEST_DR7:
case VMX_VMCS_GUEST_RSP:
case VMX_VMCS_GUEST_RIP:
case VMX_VMCS_GUEST_SYSENTER_ESP:
case VMX_VMCS_GUEST_SYSENTER_EIP:
{
if (!(u64Val >> 32))
{
/* If this field is 64-bit, VT-x will zero out the top bits. */
rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
}
else
{
/* Assert that only the 32->64 switcher case should ever come here. */
Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
}
break;
}
default:
{
AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
rc = VERR_INVALID_PARAMETER;
break;
}
}
AssertRCReturn(rc, rc);
return rc;
}
/**
* Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
* hosts (except darwin) for 64-bit guests.
*
* @param pVCpu Pointer to the VMCPU.
* @param idxField The VMCS field encoding.
* @param u64Val 16, 32 or 64 bits value.
*/
VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
{
AssertPtr(pVCpu);
PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
/* Make sure there are no duplicates. */
for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
{
if (pCache->Write.aField[i] == idxField)
{
pCache->Write.aFieldVal[i] = u64Val;
return VINF_SUCCESS;
}
}
pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
pCache->Write.cValidEntries++;
return VINF_SUCCESS;
}
/* Enable later when the assembly code uses these as callbacks. */
#if 0
/*
* Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
*
* @param pVCpu Pointer to the VMCPU.
* @param pCache Pointer to the VMCS cache.
*
* @remarks No-long-jump zone!!!
*/
VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
{
AssertPtr(pCache);
for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
{
int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
AssertRC(rc);
}
pCache->Write.cValidEntries = 0;
}
/**
* Stores the VMCS read-cache from the CPU (by executing VMREADs).
*
* @param pVCpu Pointer to the VMCPU.
* @param pCache Pointer to the VMCS cache.
*
* @remarks No-long-jump zone!!!
*/
VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
{
AssertPtr(pCache);
for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
{
int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
AssertRC(rc);
}
}
#endif
#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
/**
* Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
* not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
* timer.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @remarks No-long-jump zone!!!
*/
static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VERR_INTERNAL_ERROR_5;
bool fOffsettedTsc = false;
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (pVM->hm.s.vmx.fUsePreemptTimer)
{
uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
/* Make sure the returned values have sane upper and lower boundaries. */
uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
}
else
fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
if (fOffsettedTsc)
{
uint64_t u64CurTSC = ASMReadTSC();
if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
{
/* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
}
else
{
/* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
}
}
else
{
/* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
}
}
/**
* Determines if an exception is a contributory exception. Contributory
* exceptions are ones which can cause double-faults. Page-fault is
* intentionally not included here as it's a conditional contributory exception.
*
* @returns true if the exception is contributory, false otherwise.
* @param uVector The exception vector.
*/
DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
{
switch (uVector)
{
case X86_XCPT_GP:
case X86_XCPT_SS:
case X86_XCPT_NP:
case X86_XCPT_TS:
case X86_XCPT_DE:
return true;
default:
break;
}
return false;
}
/**
* Sets an event as a pending event to be injected into the guest.
*
* @param pVCpu Pointer to the VMCPU.
* @param u32IntrInfo The VM-entry interruption-information field.
* @param cbInstr The VM-entry instruction length in bytes (for software
* interrupts, exceptions and privileged software
* exceptions).
* @param u32ErrCode The VM-entry exception error code.
* @param GCPtrFaultAddress The fault-address (CR2) in case it's a
* page-fault.
*/
DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
RTGCUINTPTR GCPtrFaultAddress)
{
Assert(!pVCpu->hm.s.Event.fPending);
pVCpu->hm.s.Event.fPending = true;
pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
pVCpu->hm.s.Event.cbInstr = cbInstr;
pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
}
/**
* Sets a double-fault (#DF) exception as pending-for-injection into the VM.
*
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*/
DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
/* Inject the double-fault. */
uint32_t u32IntrInfo = X86_XCPT_DF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
}
/**
* Handle a condition that occurred while delivering an event through the guest
* IDT.
*
* @returns VBox status code (informational error codes included).
* @retval VINF_SUCCESS if we should continue handling the VM-exit.
* @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to
* continue execution of the guest which will delivery the #DF.
* @retval VINF_EM_RESET if we detected a triple-fault condition.
*
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @param pVmxTransient Pointer to the VMX transient structure.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
AssertRC(rc);
if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
{
rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
typedef enum
{
VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
VMXREFLECTXCPT_NONE /* Nothing to reflect. */
} VMXREFLECTXCPT;
/* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
{
enmReflect = VMXREFLECTXCPT_XCPT;
#ifdef VBOX_STRICT
if ( hmR0VmxIsContributoryXcpt(uIdtVector)
&& uExitVector == X86_XCPT_PF)
{
Log(("IDT: Contributory #PF uCR2=%#RGv\n", pMixedCtx->cr2));
}
#endif
if ( uExitVector == X86_XCPT_PF
&& uIdtVector == X86_XCPT_PF)
{
pVmxTransient->fVectoringPF = true;
Log(("IDT: Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2));
}
else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
&& hmR0VmxIsContributoryXcpt(uExitVector)
&& ( hmR0VmxIsContributoryXcpt(uIdtVector)
|| uIdtVector == X86_XCPT_PF))
{
enmReflect = VMXREFLECTXCPT_DF;
}
else if (uIdtVector == X86_XCPT_DF)
enmReflect = VMXREFLECTXCPT_TF;
}
else if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
&& uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
&& uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
{
/*
* Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
* (whatever they are) as they reoccur when restarting the instruction.
*/
enmReflect = VMXREFLECTXCPT_XCPT;
}
switch (enmReflect)
{
case VMXREFLECTXCPT_XCPT:
{
uint32_t u32ErrCode = 0;
if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
{
rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
AssertRCReturn(rc, rc);
u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
}
/* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
rc = VINF_SUCCESS;
Log(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo,
pVCpu->hm.s.Event.u32ErrCode));
break;
}
case VMXREFLECTXCPT_DF:
{
hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
rc = VINF_VMX_DOUBLE_FAULT;
Log(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
uIdtVector, uExitVector));
break;
}
case VMXREFLECTXCPT_TF:
{
Log(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
rc = VINF_EM_RESET;
break;
}
default:
Assert(rc == VINF_SUCCESS);
break;
}
}
Assert(rc == VINF_SUCCESS || rc == VINF_VMX_DOUBLE_FAULT || rc == VINF_EM_RESET);
return rc;
}
/**
* Saves the guest's CR0 register from the VMCS into the guest-CPU context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
{
uint32_t uVal = 0;
uint32_t uShadow = 0;
rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
AssertRCReturn(rc, rc);
uVal = (uShadow & pVCpu->hm.s.vmx.cr0_mask) | (uVal & ~pVCpu->hm.s.vmx.cr0_mask);
CPUMSetGuestCR0(pVCpu, uVal);
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
}
return rc;
}
/**
* Saves the guest's CR4 register from the VMCS into the guest-CPU context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
{
uint32_t uVal = 0;
uint32_t uShadow = 0;
rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
AssertRCReturn(rc, rc);
uVal = (uShadow & pVCpu->hm.s.vmx.cr4_mask) | (uVal & ~pVCpu->hm.s.vmx.cr4_mask);
CPUMSetGuestCR4(pVCpu, uVal);
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
}
return rc;
}
/**
* Saves the guest's RIP register from the VMCS into the guest-CPU context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
{
RTGCUINTREG uVal = 0;
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uVal);
AssertRCReturn(rc, rc);
pMixedCtx->rip = uVal;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
}
return rc;
}
/**
* Saves the guest's RSP register from the VMCS into the guest-CPU context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
{
RTGCUINTREG uVal = 0;
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal);
AssertRCReturn(rc, rc);
pMixedCtx->rsp = uVal;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
}
return rc;
}
/**
* Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
{
uint32_t uVal = 0;
rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
AssertRCReturn(rc, rc);
pMixedCtx->eflags.u32 = uVal;
/* Undo our real-on-v86-mode changes to eflags if necessary. */
if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
Assert(pVM->hm.s.vmx.pRealModeTSS);
Log(("Saving real-mode RFLAGS VT-x view=%#RX64\n", pMixedCtx->rflags.u64));
pMixedCtx->eflags.Bits.u1VM = 0;
pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
}
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
}
return rc;
}
/**
* Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
* guest-CPU context.
*/
DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
return rc;
}
/**
* Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
* from the guest-state area in the VMCS.
*
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
uint32_t uIntrState = 0;
int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
AssertRC(rc);
if (!uIntrState)
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
else
{
Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
|| uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
AssertRC(rc);
EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
}
}
/**
* Saves the guest's activity state.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
/* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
return VINF_SUCCESS;
}
/**
* Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
* the current VMCS into the guest-CPU context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
{
uint32_t u32Val = 0;
rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
pMixedCtx->SysEnter.cs = u32Val;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
}
RTGCUINTREG uGCVal = 0;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
{
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &uGCVal); AssertRCReturn(rc, rc);
pMixedCtx->SysEnter.eip = uGCVal;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
}
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
{
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &uGCVal); AssertRCReturn(rc, rc);
pMixedCtx->SysEnter.esp = uGCVal;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
}
return rc;
}
/**
* Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
* context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
{
RTGCUINTREG uVal = 0;
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &uVal); AssertRCReturn(rc, rc);
pMixedCtx->fs.u64Base = uVal;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
}
return rc;
}
/**
* Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
* context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
{
RTGCUINTREG uVal = 0;
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &uVal); AssertRCReturn(rc, rc);
pMixedCtx->gs.u64Base = uVal;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
}
return rc;
}
/**
* Saves the auto load/store'd guest MSRs from the current VMCS into the
* guest-CPU context. Currently these are LSTAR, STAR, SFMASK and TSC_AUX.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
return VINF_SUCCESS;
#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
{
PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
pMsr += i;
switch (pMsr->u32IndexMSR)
{
case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
default:
{
AssertFailed();
return VERR_HM_UNEXPECTED_LD_ST_MSR;
}
}
}
#endif
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
return VINF_SUCCESS;
}
/**
* Saves the guest control registers from the current VMCS into the guest-CPU
* context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
/* Guest CR0. Guest FPU. */
int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
/* Guest CR4. */
rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
/* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if ( pVM->hm.s.fNestedPaging
&& CPUMIsGuestPagingEnabledEx(pMixedCtx))
{
RTGCUINTREG uVal = 0;
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal);
if (pMixedCtx->cr3 != uVal)
{
CPUMSetGuestCR3(pVCpu, uVal);
/* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
}
/* We require EFER to check PAE mode. */
rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
/* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR. */
{
rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
/* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
}
AssertRCReturn(rc, rc);
}
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
}
return rc;
}
/**
* Reads a guest segment register from the current VMCS into the guest-CPU
* context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param idxSel Index of the selector in the VMCS.
* @param idxLimit Index of the segment limit in the VMCS.
* @param idxBase Index of the segment base in the VMCS.
* @param idxAccess Index of the access rights of the segment in the VMCS.
* @param pSelReg Pointer to the segment selector.
*
* @remarks No-long-jump zone!!!
* @remarks Never call this function directly. Use the VMXLOCAL_READ_SEG() macro
* as that takes care of whether to read from the VMCS cache or not.
*/
DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
PCPUMSELREG pSelReg)
{
uint32_t u32Val = 0;
int rc = VMXReadVmcs32(idxSel, &u32Val);
pSelReg->Sel = (uint16_t)u32Val;
pSelReg->ValidSel = (uint16_t)u32Val;
pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
rc |= VMXReadVmcs32(idxLimit, &u32Val);
pSelReg->u32Limit = u32Val;
RTGCUINTREG uGCVal = 0;
rc |= VMXReadVmcsGstNByIdxVal(idxBase, &uGCVal);
pSelReg->u64Base = uGCVal;
rc |= VMXReadVmcs32(idxAccess, &u32Val);
pSelReg->Attr.u = u32Val;
AssertRCReturn(rc, rc);
/*
* If VT-x marks the segment as unusable, the rest of the attributes are undefined.
* See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers.
*/
if (pSelReg->Attr.u & HMVMX_SEL_UNUSABLE)
{
Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR);
pSelReg->Attr.u = HMVMX_SEL_UNUSABLE;
}
return rc;
}
/**
* Saves the guest segment registers from the current VMCS into the guest-CPU
* context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
#ifdef VMX_USE_CACHED_VMCS_ACCESSES
#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
#else
#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
#endif
int rc = VINF_SUCCESS;
/* Guest segment registers. */
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
{
rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
rc |= VMXLOCAL_READ_SEG(CS, cs);
rc |= VMXLOCAL_READ_SEG(SS, ss);
rc |= VMXLOCAL_READ_SEG(DS, ds);
rc |= VMXLOCAL_READ_SEG(ES, es);
rc |= VMXLOCAL_READ_SEG(FS, fs);
rc |= VMXLOCAL_READ_SEG(GS, gs);
AssertRCReturn(rc, rc);
/* Restore segment attributes for real-on-v86 mode hack. */
if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
{
pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrCS.u;
pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrSS.u;
pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrDS.u;
pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrES.u;
pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrFS.u;
pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u;
}
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
}
/* Guest LDTR. */
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
{
rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
AssertRCReturn(rc, rc);
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
}
/* Guest GDTR. */
RTGCUINTREG uGCVal = 0;
uint32_t u32Val = 0;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
{
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal);
rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
pMixedCtx->gdtr.pGdt = uGCVal;
pMixedCtx->gdtr.cbGdt = u32Val;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
}
/* Guest IDTR. */
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
{
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal);
rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
pMixedCtx->idtr.pIdt = uGCVal;
pMixedCtx->idtr.cbIdt = u32Val;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
}
/* Guest TR. */
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
{
rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
/* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
rc |= VMXLOCAL_READ_SEG(TR, tr);
AssertRCReturn(rc, rc);
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
}
return rc;
}
/**
* Saves the guest debug registers from the current VMCS into the guest-CPU
* context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
int rc = VINF_SUCCESS;
if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
{
RTGCUINTREG uVal;
rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_DR7, &uVal); AssertRCReturn(rc, rc);
pMixedCtx->dr[7] = uVal;
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
}
return rc;
}
/**
* Saves the guest APIC state from the currentl VMCS into the guest-CPU context.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
/* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
return VINF_SUCCESS;
}
/**
* Saves the entire guest state from the currently active VMCS into the
* guest-CPU context. This essentially VMREADs all guest-data.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*/
static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
Assert(pVCpu);
Assert(pMixedCtx);
if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
return VINF_SUCCESS;
VMMRZCallRing3Disable(pVCpu);
Assert(VMMR0IsLogFlushDisabled(pVCpu));
LogFunc(("\n"));
int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
VMMRZCallRing3Enable(pVCpu);
return rc;
}
/**
* Check per-VM and per-VCPU force flag actions that require us to go back to
* ring-3 for one reason or another.
*
* @returns VBox status code (information status code included).
* @retval VINF_SUCCESS if we don't have any actions that require going back to
* ring-3.
* @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
* @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
* interrupts)
* @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
* all EMTs to be in ring-3.
* @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
* @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
* to the EM loop.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*/
static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
Assert(VMMRZCallRing3IsEnabled(pVCpu));
int rc = VERR_INTERNAL_ERROR_5;
if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
|| VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
| VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES))
{
/* We need the control registers now, make sure the guest-CPU context is updated. */
rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
/* Pending HM CR3 sync. */
if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
{
rc = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
}
if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
{
rc = PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
AssertRC(rc);
}
/* Pending PGM C3 sync. */
if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
{
rc = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
if (rc != VINF_SUCCESS)
{
AssertRC(rc);
Log(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
return rc;
}
}
/* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
/* -XXX- what was that about single stepping? */
if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
|| VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
{
STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
Log(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
return rc;
}
/* Pending VM request packets, such as hardware interrupts. */
if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
|| VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
{
Log(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
return VINF_EM_PENDING_REQUEST;
}
/* Pending PGM pool flushes. */
if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
{
Log(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
return VINF_PGM_POOL_FLUSH_PENDING;
}
/* Pending DMA requests. */
if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
{
Log(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
return VINF_EM_RAW_TO_R3;
}
}
/* Paranoia. */
Assert(rc != VERR_EM_INTERPRETER);
return VINF_SUCCESS;
}
/**
* Converts any TRPM trap into a pending VMX event. This is typically used when
* entering from ring-3 (not longjmp returns).
*
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
static void hmR0VmxUpdatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
{
if (!TRPMHasTrap(pVCpu))
{
Assert(!pVCpu->hm.s.Event.fPending);
return;
}
uint8_t uVector;
TRPMEVENT enmTrpmEvent;
RTGCUINT uErrCode;
RTGCUINTPTR GCPtrFaultAddress;
uint8_t cbInstr;
int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
AssertRC(rc);
/* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
uint32_t u32IntrInfo = uVector | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
if (enmTrpmEvent == TRPM_TRAP)
{
switch (uVector)
{
case X86_XCPT_BP:
case X86_XCPT_OF:
{
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
break;
}
case X86_XCPT_PF:
case X86_XCPT_DF:
case X86_XCPT_TS:
case X86_XCPT_NP:
case X86_XCPT_SS:
case X86_XCPT_GP:
case X86_XCPT_AC:
u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
/* no break! */
default:
{
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
break;
}
}
}
else if (enmTrpmEvent == TRPM_HARDWARE_INT)
{
if (uVector != X86_XCPT_NMI)
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
else
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
}
else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
else
AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
rc = TRPMResetTrap(pVCpu);
AssertRC(rc);
Log(("Converting TRPM trap: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
}
/**
* Converts any pending VMX event into a TRPM trap. Typically used when leaving
* VT-x to execute any instruction.
*
* @param pvCpu Pointer to the VMCPU.
*/
static void hmR0VmxUpdateTRPM(PVMCPU pVCpu)
{
if (pVCpu->hm.s.Event.fPending)
{
uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
/* If a trap was already pending, we did something wrong! */
Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
TRPMEVENT enmTrapType;
switch (uVectorType)
{
case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
case VMX_IDT_VECTORING_INFO_TYPE_NMI:
enmTrapType = TRPM_HARDWARE_INT;
break;
case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
enmTrapType = TRPM_SOFTWARE_INT;
break;
case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
enmTrapType = TRPM_TRAP;
break;
default:
AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
enmTrapType = TRPM_32BIT_HACK;
break;
}
Log(("Converting pending HM event to TRPM trap uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
AssertRC(rc);
if (fErrorCodeValid)
TRPMSetErrorCode(pVCpu, uErrorCode);
if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
&& uVector == X86_XCPT_PF)
{
TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
}
else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
|| uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
|| uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
{
AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
|| (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
}
pVCpu->hm.s.Event.fPending = false;
}
}
/**
* Does the necessary state syncing before doing a longjmp to ring-3.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @param rcExit The reason for exiting to ring-3. Can be
* VINF_VMM_UNKNOWN_RING3_CALL.
*
* @remarks No-long-jmp zone!!!
*/
static void hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
{
Assert(!VMMRZCallRing3IsEnabled(pVCpu));
Assert(VMMR0IsLogFlushDisabled(pVCpu));
int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
AssertRC(rc);
/* Restore FPU state if necessary and resync on next R0 reentry .*/
if (CPUMIsGuestFPUStateActive(pVCpu))
{
CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
Assert(!CPUMIsGuestFPUStateActive(pVCpu));
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
}
/* Restore debug registers if necessary and resync on next R0 reentry. */
if (CPUMIsGuestDebugStateActive(pVCpu))
{
CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
Assert(!CPUMIsGuestDebugStateActive(pVCpu));
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
}
else if (CPUMIsHyperDebugStateActive(pVCpu))
{
CPUMR0LoadHostDebugState(pVM, pVCpu);
Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
}
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
}
/**
* An action requires us to go back to ring-3. This function does the necessary
* steps before we can safely return to ring-3. This is not the same as longjmps
* to ring-3, this is voluntary.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @param rcExit The reason for exiting to ring-3. Can be
* VINF_VMM_UNKNOWN_RING3_CALL.
*/
static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
{
Assert(pVM);
Assert(pVCpu);
Assert(pMixedCtx);
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
{
/* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
return;
}
else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
{
VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();
return;
}
/* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
VMMRZCallRing3Disable(pVCpu);
Log(("hmR0VmxExitToRing3: rcExit=%d\n", rcExit));
/* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
hmR0VmxUpdateTRPM(pVCpu);
/* Sync. the guest state. */
hmR0VmxLongJmpToRing3(pVM, pVCpu, pMixedCtx, rcExit);
STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
| CPUM_CHANGED_LDTR
| CPUM_CHANGED_GDTR
| CPUM_CHANGED_IDTR
| CPUM_CHANGED_TR
| CPUM_CHANGED_HIDDEN_SEL_REGS);
/* On our way back from ring-3 the following needs to be done. */
/** @todo This can change with preemption hooks. */
if (rcExit == VINF_EM_RAW_INTERRUPT)
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
else
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
VMMRZCallRing3Enable(pVCpu);
}
/**
* VMMRZCallRing3 callback wrapper which saves the guest state before we
* longjump to ring-3 and possibly get preempted.
*
* @param pVCpu Pointer to the VMCPU.
* @param enmOperation The operation causing the ring-3 longjump.
* @param pvUser The user argument (pointer to the possibly
* out-of-date guest-CPU context).
*
* @remarks Must never be called with @a enmOperation ==
* VMMCALLRING3_VM_R0_ASSERTION.
*/
DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
{
/* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
Assert(pVCpu);
Assert(pvUser);
Assert(VMMRZCallRing3IsEnabled(pVCpu));
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
VMMRZCallRing3Disable(pVCpu);
Assert(VMMR0IsLogFlushDisabled(pVCpu));
Log(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n"));
hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
VMMRZCallRing3Enable(pVCpu);
}
/**
* Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
* cause a VM-exit as soon as the guest is in a state to receive interrupts.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
*/
DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
{
if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
{
if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
{
pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
AssertRC(rc);
}
} /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
}
/**
* Injects any pending events into the guest if the guest is in a state to
* receive them.
*
* @returns VBox status code (informational status codes included).
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*/
static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
/* Get the current interruptibility-state of the guest and then figure out what can be injected. */
uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
&& !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
int rc = VINF_SUCCESS;
if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
{
uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
bool fInject = true;
if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
{
rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
if ( fBlockInt
|| fBlockSti
|| fBlockMovSS)
{
fInject = false;
}
}
else if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
&& ( fBlockMovSS
|| fBlockSti))
{
/* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
fInject = false;
}
if (fInject)
{
Log(("Injecting pending event\n"));
rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
AssertRCReturn(rc, rc);
pVCpu->hm.s.Event.fPending = false;
}
else
hmR0VmxSetIntWindowExitVmcs(pVCpu);
} /** @todo SMI. SMIs take priority over NMIs. */
else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
{
/* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
if ( !fBlockMovSS
&& !fBlockSti)
{
Log(("Injecting NMI\n"));
RTGCUINTPTR uIntrInfo;
uIntrInfo = X86_XCPT_NMI | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
uIntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, uIntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
0 /* GCPtrFaultAddress */, &uIntrState);
AssertRCReturn(rc, rc);
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
}
else
hmR0VmxSetIntWindowExitVmcs(pVCpu);
}
else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
{
/* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
if ( !fBlockInt
&& !fBlockSti
&& !fBlockMovSS)
{
uint8_t u8Interrupt;
rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
if (RT_SUCCESS(rc))
{
Log(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
uint32_t u32IntrInfo = u8Interrupt | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
0 /* GCPtrFaultAddress */, &uIntrState);
}
else
{
/** @todo Does this actually happen? If not turn it into an assertion. */
Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
rc = VINF_SUCCESS;
}
}
else
hmR0VmxSetIntWindowExitVmcs(pVCpu);
}
/*
* Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
* hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
*/
fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
int rc2 = VINF_SUCCESS;
if ( fBlockSti
|| fBlockMovSS)
{
if (!DBGFIsStepping(pVCpu))
{
Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
{
/*
* The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, VMX_EXIT_MTF
* VMX_EXIT_APIC_WRITE, VMX_EXIT_VIRTUALIZED_EOI. See Intel spec. 27.3.4 "Saving Non-Register State".
*/
rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
}
}
else
{
/* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
delivering a #DB to the guest upon VM-entry when it shouldn't be. */
uIntrState = 0;
}
}
/*
* There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
* VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
*/
rc2 |= hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
AssertRC(rc2);
Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
return rc;
}
/**
* Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
*
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*/
DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
uint32_t u32IntrInfo = X86_XCPT_UD | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
}
/**
* Injects a double-fault (#DF) exception into the VM.
*
* @returns VBox status code (informational status code included).
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*/
DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
{
uint32_t u32IntrInfo = X86_XCPT_DF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
puIntrState);
}
/**
* Sets a debug (#DB) exception as pending-for-injection into the VM.
*
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*/
DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
uint32_t u32IntrInfo = X86_XCPT_DB | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
}
/**
* Sets an overflow (#OF) exception as pending-for-injection into the VM.
*
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @param cbInstr The value of RIP that is to be pushed on the guest
* stack.
*/
DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
{
uint32_t u32IntrInfo = X86_XCPT_OF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
}
/**
* Injects a general-protection (#GP) fault into the VM.
*
* @returns VBox status code (informational status code included).
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @param u32ErrorCode The error code associated with the #GP.
*/
DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
uint32_t *puIntrState)
{
uint32_t u32IntrInfo = X86_XCPT_GP | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
if (fErrorCodeValid)
u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
puIntrState);
}
/**
* Sets a software interrupt (INTn) as pending-for-injection into the VM.
*
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @param uVector The software interrupt vector number.
* @param cbInstr The value of RIP that is to be pushed on the guest
* stack.
*/
DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
{
uint32_t u32IntrInfo = uVector | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
if ( uVector == X86_XCPT_BP
|| uVector == X86_XCPT_OF)
{
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
}
else
u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
}
/**
* Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
* stack.
*
* @returns VBox status code (information status code included).
* @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
* @param pVM Pointer to the VM.
* @param pMixedCtx Pointer to the guest-CPU context.
* @param uValue The value to push to the guest stack.
*/
DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
{
/*
* The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
* virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
* See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
*/
if (pMixedCtx->sp == 1)
return VINF_EM_RESET;
pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
AssertRCReturn(rc, rc);
return rc;
}
/**
* Injects an event into the guest upon VM-entry by updating the relevant fields
* in the VM-entry area in the VMCS.
*
* @returns VBox status code (informational error codes included).
* @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
* @retval VINF_EM_RESET if event injection resulted in a triple-fault.
*
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may
* be out-of-sync. Make sure to update the required
* fields before using them.
* @param u64IntrInfo The VM-entry interruption-information field.
* @param cbInstr The VM-entry instruction length in bytes (for
* software interrupts, exceptions and privileged
* software exceptions).
* @param u32ErrCode The VM-entry exception error code.
* @param GCPtrFaultAddress The page-fault address for #PF exceptions.
* @param puIntrState Pointer to the current guest interruptibility-state.
* This interruptibility-state will be updated if
* necessary. This cannot not be NULL.
*
* @remarks No-long-jump zone!!!
* @remarks Requires CR0!
*/
static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
{
/* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
Assert(puIntrState);
uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
/* Cannot inject an NMI when block-by-MOV SS is in effect. */
Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
|| !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
/* We require CR0 to check if the guest is in real-mode. */
int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
/*
* Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
* mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
* See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
* See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
*/
if (CPUMIsGuestInRealModeEx(pMixedCtx))
{
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (!pVM->hm.s.vmx.fUnrestrictedGuest)
{
Assert(PDMVmmDevHeapIsEnabled(pVM));
Assert(pVM->hm.s.vmx.pRealModeTSS);
/* Save the required guest state bits from the VMCS. */
rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
/* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
const size_t cbIdtEntry = 4;
if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
{
/* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
if (uVector == X86_XCPT_DF)
return VINF_EM_RESET;
else if (uVector == X86_XCPT_GP)
{
/* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
}
/* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
/* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
}
/* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
uint16_t uGuestIp = pMixedCtx->ip;
if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
{
Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
/* #BP and #OF are both benign traps, we need to resume the next instruction. */
uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
}
else if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
/* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
uint16_t offIdtEntry = 0;
RTSEL selIdtEntry = 0;
RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
AssertRCReturn(rc, rc);
/* Construct the stack frame for the interrupt/exception handler. */
rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
AssertRCReturn(rc, rc);
/* Clear the required eflag bits and jump to the interrupt/exception handler. */
if (rc == VINF_SUCCESS)
{
pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
pMixedCtx->rip = offIdtEntry;
pMixedCtx->cs.Sel = selIdtEntry;
pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
&& uVector == X86_XCPT_PF)
{
pMixedCtx->cr2 = GCPtrFaultAddress;
}
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
| HM_CHANGED_GUEST_RIP
| HM_CHANGED_GUEST_RFLAGS
| HM_CHANGED_GUEST_RSP;
/* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
{
Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
&& uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
Log(("Clearing inhibition due to STI.\n"));
*puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
}
Log(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
}
Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
return rc;
}
else
{
/*
* For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
* See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
*/
u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
}
}
/* Validate. */
Assert(VMX_EXIT_INTERRUPTION_INFO_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
/* Inject. */
rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
&& uVector == X86_XCPT_PF)
{
pMixedCtx->cr2 = GCPtrFaultAddress;
}
Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RGv\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
AssertRCReturn(rc, rc);
return rc;
}
/**
* Enters the VT-x session.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCpu Pointer to the CPU info struct.
*/
VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
{
AssertPtr(pVM);
AssertPtr(pVCpu);
Assert(pVM->hm.s.vmx.fSupported);
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
NOREF(pCpu);
LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
/* Make sure we're in VMX root mode. */
RTCCUINTREG u32HostCR4 = ASMGetCR4();
if (!(u32HostCR4 & X86_CR4_VMXE))
{
LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
return VERR_VMX_X86_CR4_VMXE_CLEARED;
}
/* Load the active VMCS as the current one. */
int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
if (RT_FAILURE(rc))
return rc;
/** @todo this will change with preemption hooks where can can VMRESUME as long
* as we're no preempted. */
pVCpu->hm.s.fResumeVM = false;
return VINF_SUCCESS;
}
/**
* Leaves the VT-x session.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
{
AssertPtr(pVCpu);
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
NOREF(pVM);
NOREF(pCtx);
/** @todo this will change with preemption hooks where we only VMCLEAR when
* we are actually going to be preempted, not all the time like we
* currently do. */
/*
* Sync the current VMCS (writes back internal data back into the VMCS region in memory)
* and mark the VMCS launch-state as "clear".
*/
int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
return rc;
}
/**
* Saves the host state in the VMCS host-state.
* Sets up the VM-exit MSR-load area.
*
* The CPU state will be loaded from these fields on every successful VM-exit.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*
* @remarks No-long-jump zone!!!
*/
VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
{
AssertPtr(pVM);
AssertPtr(pVCpu);
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
/* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
return VINF_SUCCESS;
int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
return rc;
}
/**
* Loads the guest state into the VMCS guest-state area. The CPU state will be
* loaded from these fields on every successful VM-entry.
*
* Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
* Sets up the VM-entry controls.
* Sets up the appropriate VMX non-root function to execute guest code based on
* the guest CPU mode.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
*
* @remarks No-long-jump zone!!!
*/
VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
{
AssertPtr(pVM);
AssertPtr(pVCpu);
AssertPtr(pMixedCtx);
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
/* Determine real-on-v86 mode. */
pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
if ( !pVM->hm.s.vmx.fUnrestrictedGuest
&& CPUMIsGuestInRealModeEx(pMixedCtx))
{
pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
}
/** @todo if the order of loading is important, inform it via comments here */
int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxLoadGuestDebugRegs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
AssertMsg(!pVCpu->hm.s.fContextUseFlags,
("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
return rc;
}
/**
* Does the preparations before executing guest code in VT-x.
*
* This may cause longjmps to ring-3 and may even result in rescheduling to the
* recompiler. We must be cautious what we do here regarding committing
* guest-state information into the the VMCS assuming we assuredly execute the
* guest in VT-x. If we fall back to the recompiler after updating VMCS and
* clearing the common-state (TRPM/forceflags), we must undo those changes so
* that the recompiler can (and should) use them when it resumes guest
* execution. Otherwise such operations must be done when we can no longer
* exit to ring-3.
*
* @returns VBox status code (informational status codes included).
* @retval VINF_SUCCESS if we can proceed with running the guest.
* @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
* into the guest.
* @retval VINF_* scheduling changes, we have to go back to ring-3.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @param pVmxTransient Pointer to the VMX transient structure.
*
* @remarks Called with preemption disabled.
*/
DECLINLINE(int) hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
Assert(VMMRZCallRing3IsEnabled(pVCpu));
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
PGMRZDynMapFlushAutoSet(pVCpu);
#endif
/* Check force flag actions that might require us to go back to ring-3. */
int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
if (rc != VINF_SUCCESS)
return rc;
/* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
&& (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
{
Assert(pVM->hm.s.vmx.HCPhysApicAccess);
RTGCPHYS GCPhysApicBase;
GCPhysApicBase = pMixedCtx->msrApicBase;
GCPhysApicBase &= PAGE_BASE_GC_MASK;
/* Unalias any existing mapping. */
rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
AssertRCReturn(rc, rc);
/* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
Log(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
AssertRCReturn(rc, rc);
pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
}
#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
/* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
pVmxTransient->uEFlags = ASMIntDisableFlags();
if (RTThreadPreemptIsPending(NIL_RTTHREAD))
{
ASMSetFlags(pVmxTransient->uEFlags);
STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
/* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
return VINF_EM_RAW_INTERRUPT;
}
VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
#endif
/*
* This clears force-flags, TRPM traps & pending HM events. We cannot safely restore the state if we exit to ring-3
* (before running guest code) after calling this function (e.g. how do we reverse the effects of calling PDMGetInterrupt()?)
* This is why this is done after all possible exits-to-ring-3 paths in this code.
*/
/** @todo r=bird: You reverse the effect of calling PDMGetInterrupt by
* handing it over to TPRM like we do in REMR3StateBack using
* TRPMAssertTrap and the other setters. */
rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
return rc;
}
/**
* Prepares to run guest code in VT-x and we've committed to doing so. This
* means there is no backing out to ring-3 or anywhere else at this
* point.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data may be
* out-of-sync. Make sure to update the required fields
* before using them.
* @param pVmxTransient Pointer to the VMX transient structure.
*
* @remarks Called with preemption disabled.
* @remarks No-long-jump zone!!!
*/
DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
Assert(!VMMRZCallRing3IsEnabled(pVCpu));
Assert(VMMR0IsLogFlushDisabled(pVCpu));
#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
/** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
pVmxTransient->uEFlags = ASMIntDisableFlags();
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
#endif
/* Load the required guest state bits (for guest-state changes in the inner execution loop). */
Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
Log4(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
#ifdef HMVMX_SYNC_FULL_GUEST_STATE
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
#endif
int rc = VINF_SUCCESS;
if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
{
rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
}
else if (pVCpu->hm.s.fContextUseFlags)
{
rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
}
AssertRC(rc);
AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
/* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
|| HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
{
hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
}
ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
/*
* TPR patching (only active for 32-bit guests on 64-bit capable CPUs) when the CPU does not supported virtualizing
* APIC accesses feature (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).
*/
if (pVM->hm.s.fTPRPatchingActive)
{
Assert(!CPUMIsGuestInLongMode(pVCpu));
/* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */
rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
AssertRC(rc);
/* The patch code uses the LSTAR as it's not used by a guest in 32-bit mode implicitly (i.e. SYSCALL is 64-bit only). */
pVmxTransient->u64LStarMsr = ASMRdMsr(MSR_K8_LSTAR);
ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); /* pMixedCtx->msrLSTAR contains the guest's TPR,
see hmR0VmxLoadGuestApicState(). */
}
#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
/*
* Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
* RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
*/
if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
&& !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
{
pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
uint64_t u64HostTscAux = 0;
int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
AssertRC(rc2);
ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
}
#endif
STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
to start executing. */
}
/**
* Performs some essential restoration of state after running guest code in
* VT-x.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
* @param pVmxTransient Pointer to the VMX transient structure.
* @param rcVMRun Return code of VMLAUNCH/VMRESUME.
*
* @remarks Called with interrupts disabled.
* @remarks No-long-jump zone!!! This function will however re-enable longjmps
* unconditionally when it is safe to do so.
*/
DECLINLINE(void) hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
{
Assert(!VMMRZCallRing3IsEnabled(pVCpu));
STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
{
#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
/* Restore host's TSC_AUX. */
if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
#endif
/** @todo Find a way to fix hardcoding a guestimate. */
TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
+ pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
}
TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
Assert(!(ASMGetFlags() & X86_EFL_IF));
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
/* Restore the effects of TPR patching if any. */
if (pVM->hm.s.fTPRPatchingActive)
{
int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
AssertRC(rc);
pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); /* MSR_K8_LSTAR contains the guest TPR. */
ASMWrMsr(MSR_K8_LSTAR, pVmxTransient->u64LStarMsr);
}
ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */
pVCpu->hm.s.fResumeVM = true; /* Use VMRESUME instead of VMLAUNCH in the next run. */
/* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
uint32_t uExitReason;
int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
AssertRC(rc);
pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
/* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
{
Log(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
return;
}
if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
{
/* Update the guest interruptibility-state from the VMCS. */
hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
AssertRC(rc);
#endif
/*
* If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
* we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why
* we do it outside of hmR0VmxSaveGuestState() which must never cause longjmps.
*/
if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
&& pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
{
rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
AssertRC(rc);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
}
}
}
/**
* Runs the guest code using VT-x.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*
* @remarks Called with preemption disabled.
*/
VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
{
Assert(VMMRZCallRing3IsEnabled(pVCpu));
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
VMXTRANSIENT VmxTransient;
VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
int rc = VERR_INTERNAL_ERROR_5;
uint32_t cLoops = 0;
hmR0VmxUpdatePendingEvent(pVCpu, pCtx);
for (;; cLoops++)
{
Assert(!HMR0SuspendPending());
AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
(unsigned)RTMpCpuId(), cLoops));
/* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
if (rc != VINF_SUCCESS)
break;
/*
* No longjmps to ring-3 from this point on!!!
* Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
* This also disables flushing of the R0-logger instance (if any).
*/
VMMRZCallRing3Disable(pVCpu);
VMMRZCallRing3RemoveNotification(pVCpu);
hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
/* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
/*
* Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
* This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
*/
hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
{
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
return rc;
}
/* Handle the VM-exit. */
STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
#ifdef HM_PROFILE_EXIT_DISPATCH
STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed);
#endif
#ifdef HMVMX_USE_FUNCTION_TABLE
rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
#else
rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
#endif
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
if (rc != VINF_SUCCESS)
break;
else if (cLoops > pVM->hm.s.cMaxResumeLoops)
{
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
rc = VINF_EM_RAW_INTERRUPT;
break;
}
}
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
if (rc == VERR_EM_INTERPRETER)
rc = VINF_EM_RAW_EMULATE_INSTR;
else if (rc == VINF_EM_RESET)
rc = VINF_EM_TRIPLE_FAULT;
hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
return rc;
}
#ifndef HMVMX_USE_FUNCTION_TABLE
DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
{
int rc;
switch (rcReason)
{
case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_XCPT_NMI: rc = hmR0VmxExitXcptNmi(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
case VMX_EXIT_VMCALL:
case VMX_EXIT_VMCLEAR:
case VMX_EXIT_VMLAUNCH:
case VMX_EXIT_VMPTRLD:
case VMX_EXIT_VMPTRST:
case VMX_EXIT_VMREAD:
case VMX_EXIT_VMRESUME:
case VMX_EXIT_VMWRITE:
case VMX_EXIT_VMXOFF:
case VMX_EXIT_VMXON:
case VMX_EXIT_INVEPT:
case VMX_EXIT_INVVPID:
case VMX_EXIT_VMFUNC:
rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
break;
default:
rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
break;
}
return rc;
}
#endif
/** Profiling macro. */
#ifdef HM_PROFILE_EXIT_DISPATCH
# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
#else
# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
#endif
#ifdef DEBUG
/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
# define VMX_ASSERT_PREEMPT_CPUID_VAR() \
RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
# define VMX_ASSERT_PREEMPT_CPUID() \
do \
{ \
RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
} while (0)
# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() \
do { \
AssertPtr(pVCpu); \
AssertPtr(pMixedCtx); \
AssertPtr(pVmxTransient); \
Assert(pVmxTransient->fVMEntryFailed == false); \
Assert(ASMIntAreEnabled()); \
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
VMX_ASSERT_PREEMPT_CPUID_VAR(); \
LogFunc(("vcpu[%u] vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n", \
(unsigned)pVCpu->idCpu)); \
Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
if (VMMR0IsLogFlushDisabled(pVCpu)) \
VMX_ASSERT_PREEMPT_CPUID(); \
HMVMX_STOP_EXIT_DISPATCH_PROF(); \
} while (0)
# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
do { \
LogFunc(("\n")); \
} while(0)
#else /* Release builds */
# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
#endif
/**
* Advances the guest RIP after reading it from the VMCS.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pMixedCtx Pointer to the guest-CPU context. The data maybe
* out-of-sync. Make sure to update the required fields
* before using them.
* @param pVmxTransient Pointer to the VMX transient structure.
*
* @remarks No-long-jump zone!!!
*/
DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
pMixedCtx->rip += pVmxTransient->cbInstr;
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
return rc;
}
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/**
* VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
*/
static DECLCALLBACK(int) hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
Assert(ASMIntAreEnabled());
return VINF_SUCCESS;
#else
return VINF_EM_RAW_INTERRUPT;
#endif
}
/**
* VM-exit handler for exceptions and NMIs (VMX_EXIT_XCPT_NMI).
*/
static DECLCALLBACK(int) hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT)
&& uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
{
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
return VINF_EM_RAW_INTERRUPT;
}
/* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
{
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
return VINF_SUCCESS;
}
else if (RT_UNLIKELY(rc == VINF_EM_RESET))
{
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
return rc;
}
uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
switch (uIntrType)
{
case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
/* no break */
case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
{
switch (uVector)
{
case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
#endif
default:
{
rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
{
Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
0 /* GCPtrFaultAddress */);
AssertRCReturn(rc, rc);
}
else
{
AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
rc = VERR_VMX_UNEXPECTED_EXCEPTION;
}
break;
}
}
break;
}
case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT:
default:
{
rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
break;
}
}
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
return rc;
}
/**
* VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
*/
static DECLCALLBACK(int) hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT);
pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
AssertRCReturn(rc, rc);
/* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
return VINF_SUCCESS;
}
/**
* VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
*/
static DECLCALLBACK(int) hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
AssertMsgFailed(("Unexpected NMI-window exit.\n"));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
}
/**
* VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
}
/**
* VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
PVM pVM = pVCpu->CTX_SUFF(pVM);
int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
if (RT_LIKELY(rc == VINF_SUCCESS))
{
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
Assert(pVmxTransient->cbInstr == 2);
}
else
{
AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
rc = VERR_EM_INTERPRETER;
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
return rc;
}
/**
* VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
if (pMixedCtx->cr4 & X86_CR4_SMXE)
return VINF_EM_RAW_EMULATE_INSTR;
AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
AssertRCReturn(rc, rc);
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
if (RT_LIKELY(rc == VINF_SUCCESS))
{
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
Assert(pVmxTransient->cbInstr == 2);
/* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
}
else
{
AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
rc = VERR_EM_INTERPRETER;
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
return rc;
}
/**
* VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
AssertRCReturn(rc, rc);
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
if (RT_LIKELY(rc == VINF_SUCCESS))
{
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
Assert(pVmxTransient->cbInstr == 3);
/* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
}
else
{
AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
rc = VERR_EM_INTERPRETER;
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
return rc;
}
/**
* VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
AssertRCReturn(rc, rc);
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
if (RT_LIKELY(rc == VINF_SUCCESS))
{
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
Assert(pVmxTransient->cbInstr == 2);
}
else
{
AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
rc = VERR_EM_INTERPRETER;
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
return rc;
}
/**
* VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
PVM pVM = pVCpu->CTX_SUFF(pVM);
Assert(!pVM->hm.s.fNestedPaging);
int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
rc = VBOXSTRICTRC_VAL(rc2);
if (RT_LIKELY(rc == VINF_SUCCESS))
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
else
{
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RGv failed with %Rrc\n",
pVmxTransient->uExitQualification, rc));
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
return rc;
}
/**
* VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
if (RT_LIKELY(rc == VINF_SUCCESS))
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
else
{
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
rc = VERR_EM_INTERPRETER;
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
return rc;
}
/**
* VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
PVM pVM = pVCpu->CTX_SUFF(pVM);
VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
rc = VBOXSTRICTRC_VAL(rc2);
if (RT_LIKELY( rc == VINF_SUCCESS
|| rc == VINF_EM_HALT))
{
int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
AssertRCReturn(rc3, rc3);
if ( rc == VINF_EM_HALT
&& EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
{
rc = VINF_SUCCESS;
}
}
else
{
AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
rc = VERR_EM_INTERPRETER;
}
AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
return rc;
}
/**
* VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
/*
* Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
* get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
* executing VMCALL in VMX root operation. If we get here something funny is going on.
* See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
*/
AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
/*
* This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
* root operation. If we get there there is something funny going on.
* See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
*/
AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
/* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
/*
* SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
* don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
* See Intel spec. 25.3 "Other Causes of VM-exits".
*/
AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
* VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
/*
* INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM. See Intel spec. "33.14.1 Default Treatment of
* SMI Delivery" and "29.3 VMX Instructions" for "VMXON". It is -NOT- blocked in VMX non-root operation so we can potentially
* still get these exits. See Intel spec. "23.8 Restrictions on VMX operation".
*/
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
return VINF_SUCCESS; /** @todo r=ramshankar: correct?. */
}
/**
* VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
* VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
return VINF_EM_RESET;
}
/**
* VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT);
int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
pMixedCtx->rip++;
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
rc = VINF_SUCCESS;
else
rc = VINF_EM_HALT;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
return rc;
}
/**
* VM-exit handler for instructions that result in a #UD exception delivered to the guest.
*/
static DECLCALLBACK(int) hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
return VINF_SUCCESS;
}
/**
* VM-exit handler for expiry of the VMX preemption timer.
*/
static DECLCALLBACK(int) hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
/* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
PVM pVM = pVCpu->CTX_SUFF(pVM);
bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
}
/**
* VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
/** @todo check if XSETBV is supported by the recompiler. */
return VERR_EM_INTERPRETER;
}
/**
* VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
/** @todo implement EMInterpretInvpcid() */
return VERR_EM_INTERPRETER;
}
/**
* VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
* Error VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
uint32_t uIntrState;
HMVMXHCUINTREG uHCReg;
uint64_t u64Val;
uint32_t u32Val;
int rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
Log(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
Log(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
PVM pVM = pVCpu->CTX_SUFF(pVM);
HMDumpRegs(pVM, pVCpu, pMixedCtx);
return VERR_VMX_INVALID_GUEST_STATE;
}
/**
* VM-exit handler for VM-entry failure due to an MSR-load
* (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for VM-entry failure due to a machine-check event
* (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for all undefined reasons. Should never ever happen.. in
* theory.
*/
static DECLCALLBACK(int) hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
return VERR_VMX_UNDEFINED_EXIT_CODE;
}
/**
* VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
* (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
* Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
return VERR_EM_INTERPRETER;
AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
return VERR_EM_INTERPRETER;
AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
*/
static DECLCALLBACK(int) hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
if (RT_LIKELY(rc == VINF_SUCCESS))
{
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
Assert(pVmxTransient->cbInstr == 2);
}
return rc;
}
/**
* VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
*/
static DECLCALLBACK(int) hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
PVM pVM = pVCpu->CTX_SUFF(pVM);
int rc = VINF_SUCCESS;
/* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
if ( pVM->hm.s.fTPRPatchingActive
&& pMixedCtx->ecx == MSR_K8_LSTAR)
{
Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* Requires EFER but it's always up-to-date. */
if ((pMixedCtx->eax & 0xff) != pVmxTransient->u8GuestTpr)
{
rc = PDMApicSetTPR(pVCpu, pMixedCtx->eax & 0xff);
AssertRC(rc);
}
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
Assert(pVmxTransient->cbInstr == 2);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
return VINF_SUCCESS;
}
/* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
Log(("ecx=%#RX32\n", pMixedCtx->ecx));
rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
if (RT_LIKELY(rc == VINF_SUCCESS))
{
rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
/* If this is an X2APIC WRMSR access, update the APIC state as well. */
if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
&& pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
{
Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_APIC_STATE);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
}
else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
{
rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
}
else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
/* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)))
{
switch (pMixedCtx->ecx)
{
case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
case MSR_K8_FS_BASE: /* no break */
case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
/* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */
}
}
#ifdef VBOX_STRICT
else
{
/* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
switch (pMixedCtx->ecx)
{
case MSR_IA32_SYSENTER_CS:
case MSR_IA32_SYSENTER_EIP:
case MSR_IA32_SYSENTER_ESP:
case MSR_K8_FS_BASE:
case MSR_K8_GS_BASE:
{
AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
case MSR_K8_LSTAR:
case MSR_K6_STAR:
case MSR_K8_SF_MASK:
case MSR_K8_TSC_AUX:
case MSR_K8_KERNEL_GS_BASE:
{
AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
pMixedCtx->ecx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
}
}
#endif /* VBOX_STRICT */
}
return rc;
}
/**
* VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT. */
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
return VERR_EM_INTERPRETER;
AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
/**
* VM-exit handler for when the TPR value is lowered below the specified
* threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW);
/*
* The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
* the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
* resume guest execution.
*/
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
return VINF_SUCCESS;
}
/**
* VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
* VM-exit.
*
* @retval VINF_SUCCESS when guest execution can continue.
* @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
* @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
* @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
* recompiler.
*/
static DECLCALLBACK(int) hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
PVM pVM = pVCpu->CTX_SUFF(pVM);
switch (uAccessType)
{
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
{
#if 0
/* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
#else
rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
#endif
AssertRCReturn(rc, rc);
rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
{
case 0: /* CR0 */
Log(("CRX CR0 write rc=%d CR0=%#RGv\n", rc, pMixedCtx->cr0));
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
break;
case 2: /* C2 **/
/* Nothing to do here, CR2 it's not part of the VMCS. */
break;
case 3: /* CR3 */
Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
Log(("CRX CR3 write rc=%d CR3=%#RGv\n", rc, pMixedCtx->cr3));
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
break;
case 4: /* CR4 */
Log(("CRX CR4 write rc=%d CR4=%#RGv\n", rc, pMixedCtx->cr4));
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
break;
case 8: /* CR8 */
Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
/* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
/* We don't need to update HM_CHANGED_VMX_GUEST_APIC_STATE here as this -cannot- happen with TPR shadowing. */
break;
default:
AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
break;
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
break;
}
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
{
/* EMInterpretCRxRead() requires EFER MSR, CS. */
rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
Assert( !pVM->hm.s.fNestedPaging
|| !CPUMIsGuestPagingEnabledEx(pMixedCtx)
|| VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
/* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
|| !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
Log(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
break;
}
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
{
rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
rc = EMInterpretCLTS(pVM, pVCpu);
AssertRCReturn(rc, rc);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
Log(("CRX CLTS write rc=%d\n", rc));
break;
}
case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
{
rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
if (RT_LIKELY(rc == VINF_SUCCESS))
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
Log(("CRX LMSW write rc=%d\n", rc));
break;
}
default:
{
AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
rc = VERR_VMX_UNEXPECTED_EXCEPTION;
}
}
/* Validate possible error codes. */
Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
|| rc == VERR_VMX_UNEXPECTED_EXCEPTION);
if (RT_SUCCESS(rc))
{
int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
AssertRCReturn(rc2, rc2);
}
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
return rc;
}
/**
* VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
* VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
/* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
AssertRCReturn(rc, rc);
Log(("CS:RIP=%04x:%#RGv\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
/* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
== VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
bool fIOString = (VMX_EXIT_QUALIFICATION_IO_STRING(pVmxTransient->uExitQualification) == 1);
Assert(uIOWidth == 0 || uIOWidth == 1 || uIOWidth == 3);
/* I/O operation lookup arrays. */
static const uint32_t s_aIOSize[4] = { 1, 2, 0, 4 }; /* Size of the I/O Accesses. */
static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
const uint32_t cbSize = s_aIOSize[uIOWidth];
const uint32_t cbInstr = pVmxTransient->cbInstr;
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (fIOString)
{
/* INS/OUTS - I/O String instruction. */
PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
/** @todo for now manually disassemble later optimize by getting the fields from
* the VMCS. */
/** @todo VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR contains the flat pointer
* operand of the instruction. VMX_VMCS32_RO_EXIT_INSTR_INFO contains
* segment prefix info. */
rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
if (RT_SUCCESS(rc))
{
if (fIOWrite)
{
VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
(DISCPUMODE)pDis->uAddrMode, cbSize);
rc = VBOXSTRICTRC_VAL(rc2);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
}
else
{
VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
(DISCPUMODE)pDis->uAddrMode, cbSize);
rc = VBOXSTRICTRC_VAL(rc2);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
}
}
else
{
AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));
rc = VINF_EM_RAW_EMULATE_INSTR;
}
}
else
{
/* IN/OUT - I/O instruction. */
const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
Assert(!VMX_EXIT_QUALIFICATION_IO_REP(pVmxTransient->uExitQualification));
if (fIOWrite)
{
VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbSize);
rc = VBOXSTRICTRC_VAL(rc2);
if (rc == VINF_IOM_R3_IOPORT_WRITE)
HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
}
else
{
uint32_t u32Result = 0;
VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbSize);
rc = VBOXSTRICTRC_VAL(rc2);
if (IOM_SUCCESS(rc))
{
/* Save result of I/O IN instr. in AL/AX/EAX. */
pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
}
else if (rc == VINF_IOM_R3_IOPORT_READ)
HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
}
}
if (IOM_SUCCESS(rc))
{
pMixedCtx->rip += cbInstr;
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
if (RT_LIKELY(rc == VINF_SUCCESS))
{
rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx); /* For DR7. */
AssertRCReturn(rc, rc);
/* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK)
{
STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
for (unsigned i = 0; i < 4; i++)
{
uint32_t uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
if ( ( uIOPort >= pMixedCtx->dr[i]
&& uIOPort < pMixedCtx->dr[i] + uBPLen)
&& (pMixedCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
&& (pMixedCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
{
Assert(CPUMIsGuestDebugStateActive(pVCpu));
uint64_t uDR6 = ASMGetDR6();
/* Clear all breakpoint status flags and set the one we just hit. */
uDR6 &= ~(X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3);
uDR6 |= (uint64_t)RT_BIT(i);
/*
* Note: AMD64 Architecture Programmer's Manual 13.1:
* Bits 15:13 of the DR6 register is never cleared by the processor and must
* be cleared by software after the contents have been read.
*/
ASMSetDR6(uDR6);
/* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
pMixedCtx->dr[7] &= ~X86_DR7_GD;
/* Paranoia. */
pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits reserved. */
pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
pMixedCtx->dr[7] |= 0x400; /* MB1. */
/* Resync DR7 */
/** @todo probably cheaper to just reload DR7, nothing else needs changing. */
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
/* Set #DB to be injected into the VM and continue guest execution. */
hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
break;
}
}
}
}
}
#ifdef DEBUG
if (rc == VINF_IOM_R3_IOPORT_READ)
Assert(!fIOWrite);
else if (rc == VINF_IOM_R3_IOPORT_WRITE)
Assert(fIOWrite);
else
{
AssertMsg( RT_FAILURE(rc)
|| rc == VINF_SUCCESS
|| rc == VINF_EM_RAW_EMULATE_INSTR
|| rc == VINF_EM_RAW_GUEST_TRAP
|| rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
}
#endif
STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
return rc;
}
/**
* VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
* VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* Check if this task-switch occurred while delivery an event through the guest IDT. */
int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
{
rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
AssertRCReturn(rc, rc);
if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
{
uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
/* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
&& uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
&& uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
{
uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
/* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
Assert(!pVCpu->hm.s.Event.fPending);
pVCpu->hm.s.Event.fPending = true;
pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
AssertRCReturn(rc, rc);
if (fErrorCodeValid)
pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
else
pVCpu->hm.s.Event.u32ErrCode = 0;
if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
&& uVector == X86_XCPT_PF)
{
pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
}
Log(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
}
}
}
/** @todo Emulate task switch someday, currently just going back to ring-3 for
* emulation. */
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
return VERR_EM_INTERPRETER;
}
/**
* VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG);
pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
AssertRCReturn(rc, rc);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
return VINF_EM_DBG_STOP;
}
/**
* VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
return VINF_SUCCESS;
else if (RT_UNLIKELY(rc == VINF_EM_RESET))
return rc;
#if 0
/** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
* just sync the whole thing. */
rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
#else
/* Aggressive state sync. for now. */
rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
#endif
rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
/* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
switch (uAccessType)
{
case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
{
if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
&& VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
{
AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
}
RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
GCPhys &= PAGE_BASE_GC_MASK;
GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
PVM pVM = pVCpu->CTX_SUFF(pVM);
Log(("ApicAccess uAccessType=%#x GCPhys=%RGp Off=%#x\n", uAccessType, GCPhys,
VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
(uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
CPUMCTX2CORE(pMixedCtx), GCPhys);
rc = VBOXSTRICTRC_VAL(rc2);
Log(("ApicAccess rc=%d\n", rc));
if ( rc == VINF_SUCCESS
|| rc == VERR_PAGE_TABLE_NOT_PRESENT
|| rc == VERR_PAGE_NOT_PRESENT)
{
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
| HM_CHANGED_VMX_GUEST_APIC_STATE;
rc = VINF_SUCCESS;
}
break;
}
default:
Log(("ApicAccess uAccessType=%#x\n", uAccessType));
rc = VINF_EM_RAW_EMULATE_INSTR;
break;
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
return rc;
}
/**
* VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
* VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
/* We should -not- get this VM-exit if the guest is debugging. */
if (CPUMIsGuestDebugStateActive(pVCpu))
{
AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
return VERR_VMX_UNEXPECTED_EXIT_CODE;
}
int rc = VERR_INTERNAL_ERROR_5;
if ( !DBGFIsStepping(pVCpu)
&& !CPUMIsHyperDebugStateActive(pVCpu))
{
/* Don't intercept MOV DRx. */
pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
AssertRCReturn(rc, rc);
/* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
AssertRC(rc);
Assert(CPUMIsGuestDebugStateActive(pVCpu));
#ifdef VBOX_WITH_STATISTICS
rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
else
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
#endif
STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
return VINF_SUCCESS;
}
/*
* EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
* hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
*/
rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
PVM pVM = pVCpu->CTX_SUFF(pVM);
if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
{
rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
if (RT_SUCCESS(rc))
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
}
else
{
rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
}
Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
if (RT_SUCCESS(rc))
{
int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
AssertRCReturn(rc2, rc2);
}
return rc;
}
/**
* VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
* Conditional VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
/* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
return VINF_SUCCESS;
else if (RT_UNLIKELY(rc == VINF_EM_RESET))
return rc;
RTGCPHYS GCPhys = 0;
rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
#if 0
rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
#else
/* Aggressive state sync. for now. */
rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
#endif
AssertRCReturn(rc, rc);
/*
* If we succeed, resume guest execution.
* If we fail in interpreting the instruction because we couldn't get the guest physical address
* of the page containing the instruction via the guest's page tables (we would invalidate the guest page
* in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
* weird case. See @bugref{6043}.
*/
PVM pVM = pVCpu->CTX_SUFF(pVM);
VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
rc = VBOXSTRICTRC_VAL(rc2);
Log(("EPT misconfig at %#RGv RIP=%#RGv rc=%d\n", GCPhys, pMixedCtx->rip, rc));
if ( rc == VINF_SUCCESS
|| rc == VERR_PAGE_TABLE_NOT_PRESENT
|| rc == VERR_PAGE_NOT_PRESENT)
{
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
| HM_CHANGED_VMX_GUEST_APIC_STATE;
return VINF_SUCCESS;
}
return rc;
}
/**
* VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
* VM-exit.
*/
static DECLCALLBACK(int) hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_HANDLER_PARAMS();
Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
/* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
return VINF_SUCCESS;
else if (RT_UNLIKELY(rc == VINF_EM_RESET))
return rc;
RTGCPHYS GCPhys = 0;
rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
#if 0
rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
#else
/* Aggressive state sync. for now. */
rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
#endif
AssertRCReturn(rc, rc);
/* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RGv", pVmxTransient->uExitQualification));
RTGCUINT uErrorCode = 0;
if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
uErrorCode |= X86_TRAP_PF_ID;
if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
uErrorCode |= X86_TRAP_PF_RW;
if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
uErrorCode |= X86_TRAP_PF_P;
TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
Log(("EPT violation %#x at %#RGv ErrorCode %#x CS:EIP=%04x:%#RX64\n", (uint32_t)pVmxTransient->uExitQualification, GCPhys,
uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
/* Handle the pagefault trap for the nested shadow table. */
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
TRPMResetTrap(pVCpu);
/* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
if ( rc == VINF_SUCCESS
|| rc == VERR_PAGE_TABLE_NOT_PRESENT
|| rc == VERR_PAGE_NOT_PRESENT)
{
/* Successfully synced our shadow page tables or emulation MMIO instruction. */
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
| HM_CHANGED_VMX_GUEST_APIC_STATE;
return VINF_SUCCESS;
}
Log(("EPT return to ring-3 rc=%d\n"));
return rc;
}
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
/**
* VM-exit exception handler for #MF (Math Fault: floating point exception).
*/
static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
if (!(pMixedCtx->cr0 & X86_CR0_NE))
{
/* Old-style FPU error reporting needs some extra work. */
/** @todo don't fall back to the recompiler, but do it manually. */
return VERR_EM_INTERPRETER;
}
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
return rc;
}
/**
* VM-exit exception handler for #BP (Breakpoint exception).
*/
static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
/** @todo Try optimize this by not saving the entire guest state unless
* really needed. */
int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
if (rc == VINF_EM_RAW_GUEST_TRAP)
{
rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
}
Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
return rc;
}
/**
* VM-exit exception handler for #DB (Debug exception).
*/
static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
/* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
uint64_t uDR6 = X86_DR6_INIT_VAL;
uDR6 |= (pVmxTransient->uExitQualification
& (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
if (rc == VINF_EM_RAW_GUEST_TRAP)
{
/* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
pMixedCtx->dr[6] = uDR6;
if (CPUMIsGuestDebugStateActive(pVCpu))
ASMSetDR6(pMixedCtx->dr[6]);
rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
/* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
pMixedCtx->dr[7] &= ~X86_DR7_GD;
/* Paranoia. */
pMixedCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
pMixedCtx->dr[7] |= 0x400; /* must be one */
rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
AssertRCReturn(rc,rc);
int rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
rc2 |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc2, rc2);
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
rc = VINF_SUCCESS;
}
return rc;
}
/**
* VM-exit exception handler for #NM (Device-not-available exception: floating
* point exception).
*/
static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
Assert(!CPUMIsGuestFPUStateActive(pVCpu));
#endif
/* We require CR0 and EFER. EFER is always up-to-date. */
int rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
/* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
if (rc == VINF_SUCCESS)
{
Assert(CPUMIsGuestFPUStateActive(pVCpu));
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
return VINF_SUCCESS;
}
/* Forward #NM to the guest. */
Assert(rc == VINF_EM_RAW_GUEST_TRAP);
rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
return rc;
}
/**
* VM-exit exception handler for #GP (General-protection exception).
*
* @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
*/
static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
int rc = VERR_INTERNAL_ERROR_5;
if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
{
#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
/* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
Log(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RGv CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
return rc;
#else
/* We don't intercept #GP. */
AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
return VERR_VMX_UNEXPECTED_EXCEPTION;
#endif
}
Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
/* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
uint32_t cbOp = 0;
PVM pVM = pVCpu->CTX_SUFF(pVM);
rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
if (RT_SUCCESS(rc))
{
rc = VINF_SUCCESS;
Assert(cbOp == pDis->cbInstr);
Log(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
switch (pDis->pCurInstr->uOpcode)
{
case OP_CLI:
pMixedCtx->eflags.Bits.u1IF = 0;
pMixedCtx->rip += pDis->cbInstr;
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
break;
case OP_STI:
pMixedCtx->eflags.Bits.u1IF = 1;
pMixedCtx->rip += pDis->cbInstr;
EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
break;
case OP_HLT:
rc = VINF_EM_HALT;
pMixedCtx->rip += pDis->cbInstr;
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
break;
case OP_POPF:
{
Log(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
uint32_t cbParm = 0;
uint32_t uMask = 0;
if (pDis->fPrefix & DISPREFIX_OPSIZE)
{
cbParm = 4;
uMask = 0xffffffff;
}
else
{
cbParm = 2;
uMask = 0xffff;
}
/* Get the stack pointer & pop the contents of the stack onto EFlags. */
RTGCPTR GCPtrStack = 0;
X86EFLAGS uEflags;
rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
&GCPtrStack);
if (RT_SUCCESS(rc))
{
Assert(sizeof(uEflags.u32) >= cbParm);
uEflags.u32 = 0;
rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u32, cbParm);
}
if (RT_FAILURE(rc))
{
rc = VERR_EM_INTERPRETER;
break;
}
Log(("POPF %x -> %#RGv mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
| (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
/* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
pMixedCtx->eflags.Bits.u1RF = 0;
pMixedCtx->esp += cbParm;
pMixedCtx->esp &= uMask;
pMixedCtx->rip += pDis->cbInstr;
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
break;
}
case OP_PUSHF:
{
uint32_t cbParm = 0;
uint32_t uMask = 0;
if (pDis->fPrefix & DISPREFIX_OPSIZE)
{
cbParm = 4;
uMask = 0xffffffff;
}
else
{
cbParm = 2;
uMask = 0xffff;
}
/* Get the stack pointer & push the contents of eflags onto the stack. */
RTGCPTR GCPtrStack = 0;
rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
if (RT_FAILURE(rc))
{
rc = VERR_EM_INTERPRETER;
break;
}
X86EFLAGS uEflags;
uEflags = pMixedCtx->eflags;
/* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
uEflags.Bits.u1RF = 0;
uEflags.Bits.u1VM = 0;
rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u, cbParm);
if (RT_FAILURE(rc))
{
rc = VERR_EM_INTERPRETER;
break;
}
Log(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack));
pMixedCtx->esp -= cbParm;
pMixedCtx->esp &= uMask;
pMixedCtx->rip += pDis->cbInstr;
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
break;
}
case OP_IRET:
{
/** @todo Handle 32-bit operand sizes and check stack limits. See Intel
* instruction reference. */
RTGCPTR GCPtrStack = 0;
uint32_t uMask = 0xffff;
uint16_t aIretFrame[3];
if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
{
rc = VERR_EM_INTERPRETER;
break;
}
rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
&GCPtrStack);
if (RT_SUCCESS(rc))
rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
if (RT_FAILURE(rc))
{
rc = VERR_EM_INTERPRETER;
break;
}
pMixedCtx->eip = 0;
pMixedCtx->ip = aIretFrame[0];
pMixedCtx->cs.Sel = aIretFrame[1];
pMixedCtx->cs.ValidSel = aIretFrame[1];
pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
| (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
pMixedCtx->sp += sizeof(aIretFrame);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
| HM_CHANGED_GUEST_RFLAGS;
Log(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
break;
}
case OP_INT:
{
uint16_t uVector = pDis->Param1.uValue & 0xff;
hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
break;
}
case OP_INTO:
{
if (pMixedCtx->eflags.Bits.u1OF)
{
hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
}
break;
}
default:
{
VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
EMCODETYPE_SUPERVISOR);
rc = VBOXSTRICTRC_VAL(rc2);
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
Log(("#GP rc=%Rrc\n", rc));
break;
}
}
}
else
rc = VERR_EM_INTERPRETER;
AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
("#GP Unexpected rc=%Rrc\n", rc));
return rc;
}
/**
* VM-exit exception handler wrapper for generic exceptions. Simply re-injects
* the exception reported in the VMX transient structure back into the VM.
*
* @remarks Requires uExitIntrInfo, uExitIntrErrorCode, cbInstr fields in the
* VMX transient structure to be up-to-date.
*/
static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
/* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
hmR0VmxCheckExitDueToEventDelivery(). */
int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
return VINF_SUCCESS;
}
/**
* VM-exit exception handler for #PF (Page-fault exception).
*/
static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
{
VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
PVM pVM = pVCpu->CTX_SUFF(pVM);
int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
AssertRCReturn(rc, rc);
#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
if (pVM->hm.s.fNestedPaging)
{
if (RT_LIKELY(!pVmxTransient->fVectoringPF))
{
pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
}
else
{
/* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
pVCpu->hm.s.Event.fPending = false; /* A vectoring #PF. */
hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
Log(("Pending #DF due to vectoring #PF. NP\n"));
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
return rc;
}
#else
Assert(!pVM->hm.s.fNestedPaging);
#endif
#ifdef VBOX_HM_WITH_GUEST_PATCHING
rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
/* Shortcut for APIC TPR access, only for 32-bit guests. */
if ( pVM->hm.s.fTRPPatchingAllowed
&& pVM->hm.s.pGuestPatchMem
&& (pVmxTransient->uExitQualification & 0xfff) == 0x80 /* TPR offset */
&& !(pVmxTransient->uExitIntrErrorCode & X86_TRAP_PF_P) /* Page not present */
&& CPUMGetGuestCPL(pVCpu) == 0 /* Requires CR0, EFLAGS, segments. */
&& !CPUMIsGuestInLongModeEx(pMixedCtx) /* Requires EFER. */
&& pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
{
RTGCPHYS GCPhys;
RTGCPHYS GCPhysApicBase = (pMixedCtx->msrApicBase & PAGE_BASE_GC_MASK);
rc = PGMGstGetPage(pVCpu, (RTGCPTR)pVmxTransient->uExitQualification, NULL /* pfFlags */, &GCPhys);
if ( rc == VINF_SUCCESS
&& GCPhys == GCPhysApicBase)
{
rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
/* Only attempt to patch the instruction once. */
PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pMixedCtx->eip);
if (!pPatch)
return VINF_EM_HM_PATCH_TPR_INSTR;
}
}
#endif
rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
AssertRCReturn(rc, rc);
Log(("#PF: cr2=%#RGv cs:rip=%#04x:%#RGv uErrCode %#RX32 cr3=%#RGv\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,
pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
(RTGCPTR)pVmxTransient->uExitQualification);
Log(("#PF: rc=%Rrc\n", rc));
if (rc == VINF_SUCCESS)
{
/* Successfully synced shadow pages tables or emulated an MMIO instruction. */
/** @todo this isn't quite right, what if guest does lgdt with some MMIO
* memory? We don't update the whole state here... */
pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
| HM_CHANGED_VMX_GUEST_APIC_STATE;
TRPMResetTrap(pVCpu);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
return rc;
}
else if (rc == VINF_EM_RAW_GUEST_TRAP)
{
if (!pVmxTransient->fVectoringPF)
{
/* It's a guest page fault and needs to be reflected to the guest. */
uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
TRPMResetTrap(pVCpu);
pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
}
else
{
/* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
TRPMResetTrap(pVCpu);
pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF for replace it with #DF. */
hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
Log(("#PF: Pending #DF due to vectoring #PF\n"));
}
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
return VINF_SUCCESS;
}
TRPMResetTrap(pVCpu);
STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
return rc;
}