HWSVMR0.cpp revision 7f62f04e2a04748a89d153bfbde59762a3ac33ce
5b281ba489ca18f0380d7efc7a5108b606cce449vboxsync * HM SVM (AMD-V) - Host Context Ring-0.
8e3f7f941e446e203bd8f3d0782d761aeb5af6c9vboxsync * Copyright (C) 2006-2012 Oracle Corporation
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync * available from http://www.virtualbox.org. This file is free software;
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync * you can redistribute it and/or modify it under the terms of the GNU
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync * General Public License (GPL) as published by the Free Software
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync/*******************************************************************************
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync* Header Files *
bd8e360cd1db83dcb2694ea9122ce3bc5bae678avboxsync*******************************************************************************/
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync/*******************************************************************************
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync* Internal Functions *
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync*******************************************************************************/
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsyncstatic int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsyncstatic int hmR0SvmEmulateTprVMMCall(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsyncstatic void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite);
dccbafa70b5a9a6f933c5566e4542caf9f379b97vboxsync/*******************************************************************************
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync* Defined Constants And Macros *
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync*******************************************************************************/
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync/** Convert hidden selector attribute word between VMX and SVM formats. */
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync#define SVM_HIDSEGATTR_VMX2SVM(a) (a & 0xFF) | ((a & 0xF000) >> 4)
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync#define SVM_HIDSEGATTR_SVM2VMX(a) (a & 0xFF) | ((a & 0x0F00) << 4)
81587231c9c584851518872e197f6f02dffe68cavboxsync Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync pvVMCB->guest.REG.u16Attr = SVM_HIDSEGATTR_VMX2SVM(pCtx->reg.Attr.u); \
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync } while (0)
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync pCtx->reg.Attr.u = SVM_HIDSEGATTR_SVM2VMX(pvVMCB->guest.REG.u16Attr); \
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync } while (0)
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync/*******************************************************************************
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync* Global Variables *
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync*******************************************************************************/
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync/* IO operation lookup arrays. */
c882ddf98a60a4aab1218108b334083f98b7d66avboxsyncstatic uint32_t const g_aIOSize[8] = {0, 1, 2, 0, 4, 0, 0, 0};
c882ddf98a60a4aab1218108b334083f98b7d66avboxsyncstatic uint32_t const g_aIOOpAnd[8] = {0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0};
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync * Sets up and activates AMD-V on the current CPU.
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync * @returns VBox status code.
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync * @param pCpu Pointer to the CPU info struct.
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync * @param pVM Pointer to the VM (can be NULL after a resume!).
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync * @param pvCpuPage Pointer to the global CPU page.
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync * @param HCPhysCpuPage Physical address of the global CPU page.
c882ddf98a60a4aab1218108b334083f98b7d66avboxsyncVMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync * We must turn on AMD-V and setup the host state physical address, as those MSRs are per cpu/core.
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync * If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V.
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync /* Turn on AMD-V in the EFER MSR. */
c882ddf98a60a4aab1218108b334083f98b7d66avboxsync /* Write the physical page address where the CPU will store the host state while executing the VM. */
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * to flush the TLB with before using a new ASID.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * Deactivates AMD-V on the current CPU.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * @returns VBox status code.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * @param pCpu Pointer to the CPU info struct.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * @param pvCpuPage Pointer to the global CPU page.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * @param HCPhysCpuPage Physical address of the global CPU page.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsyncVMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync /* Turn off AMD-V in the EFER MSR. */
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync /* Invalidate host state physical address. */
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * Does Ring-0 per VM AMD-V init.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * @returns VBox status code.
b6097213cd07c5383c49168bfb61f915dbd4587avboxsync * @param pVM Pointer to the VM.
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync rc = RTR0MemObjAllocCont(&pVM->hm.s.svm.hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync pVM->hm.s.svm.pvIOBitmap = RTR0MemObjAddress(pVM->hm.s.svm.hMemObjIOBitmap);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync pVM->hm.s.svm.HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.hMemObjIOBitmap, 0);
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync /* Set all bits to intercept all IO accesses. */
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync ASMMemFill32(pVM->hm.s.svm.pvIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * Erratum 170 which requires a forced TLB flush for each world switch:
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * All BH-G1/2 and DH-G1/2 models include a fix:
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * Athlon X2: 0x6b 1/2
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * Athlon 64: 0x7f 1
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * Sempron: 0x7f 1/2
fd546afe09073de92e5422c1334f1c27b4108687vboxsync * Turion 64: 0x68 2
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
8e3f7f941e446e203bd8f3d0782d761aeb5af6c9vboxsync u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
fd546afe09073de92e5422c1334f1c27b4108687vboxsync u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
fd546afe09073de92e5422c1334f1c27b4108687vboxsync && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
fd546afe09073de92e5422c1334f1c27b4108687vboxsync && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
fd546afe09073de92e5422c1334f1c27b4108687vboxsync Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
fd546afe09073de92e5422c1334f1c27b4108687vboxsync /* Allocate VMCBs for all guest CPUs. */
8e3f7f941e446e203bd8f3d0782d761aeb5af6c9vboxsync /* Allocate one page for the host context */
fd546afe09073de92e5422c1334f1c27b4108687vboxsync rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVMCBHost, 1 << PAGE_SHIFT, false /* fExecutable */);
fd546afe09073de92e5422c1334f1c27b4108687vboxsync pVCpu->hm.s.svm.pvVMCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCBHost);
fd546afe09073de92e5422c1334f1c27b4108687vboxsync pVCpu->hm.s.svm.HCPhysVMCBHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCBHost, 0);
8e3f7f941e446e203bd8f3d0782d761aeb5af6c9vboxsync /* Allocate one page for the VM control block (VMCB). */
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVMCB, 1 << PAGE_SHIFT, false /* fExecutable */);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync pVCpu->hm.s.svm.pvVMCB = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCB);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync pVCpu->hm.s.svm.HCPhysVMCB = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCB, 0);
8e3f7f941e446e203bd8f3d0782d761aeb5af6c9vboxsync /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0);
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync /* Set all bits to intercept all MSR accesses. */
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * Does Ring-0 per VM AMD-V termination.
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * @returns VBox status code.
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * @param pVM Pointer to the VM.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync if (pVCpu->hm.s.svm.hMemObjVMCBHost != NIL_RTR0MEMOBJ)
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVMCBHost, false);
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVMCB, false);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync if (pVM->hm.s.svm.hMemObjIOBitmap != NIL_RTR0MEMOBJ)
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync RTR0MemObjFree(pVM->hm.s.svm.hMemObjIOBitmap, false);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * Sets up AMD-V for the specified VM.
8f78050183f7368c70cfc93edd1180326edc1208vboxsync * @returns VBox status code.
8f78050183f7368c70cfc93edd1180326edc1208vboxsync * @param pVM Pointer to the VM.
8f78050183f7368c70cfc93edd1180326edc1208vboxsync SVM_VMCB *pvVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pvVMCB;
8f78050183f7368c70cfc93edd1180326edc1208vboxsync AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_SVM_INVALID_PVMCB);
8f78050183f7368c70cfc93edd1180326edc1208vboxsync * Program the control fields. Most of them never have to be changed again.
8f78050183f7368c70cfc93edd1180326edc1208vboxsync * CR0/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's.
8f78050183f7368c70cfc93edd1180326edc1208vboxsync * Note: CR0 & CR4 can be safely read when guest and shadow copies are identical.
8f78050183f7368c70cfc93edd1180326edc1208vboxsync pvVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
8f78050183f7368c70cfc93edd1180326edc1208vboxsync /* CR0/4 writes must be intercepted for obvious reasons. */
8f78050183f7368c70cfc93edd1180326edc1208vboxsync pvVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
8f78050183f7368c70cfc93edd1180326edc1208vboxsync /* Intercept all DRx reads and writes by default. Changed later on. */
4fa08e9052161e65bfe8d2e3bb38fa227af628c0vboxsync /* Intercept traps; only #NM is always intercepted. */
4fa08e9052161e65bfe8d2e3bb38fa227af628c0vboxsync pvVMCB->ctrl.u32InterceptException = RT_BIT(X86_XCPT_NM);
4fa08e9052161e65bfe8d2e3bb38fa227af628c0vboxsync pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
4fa08e9052161e65bfe8d2e3bb38fa227af628c0vboxsync pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP)
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync /* Set up instruction and miscellaneous intercepts. */
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync pvVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync pvVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync guest (host thinks the cpu load is high) */
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync Log(("pvVMCB->ctrl.u32InterceptException = %x\n", pvVMCB->ctrl.u32InterceptException));
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync Log(("pvVMCB->ctrl.u32InterceptCtrl1 = %x\n", pvVMCB->ctrl.u32InterceptCtrl1));
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync Log(("pvVMCB->ctrl.u32InterceptCtrl2 = %x\n", pvVMCB->ctrl.u32InterceptCtrl2));
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync /* Ignore the priority in the TPR; just deliver it when we tell it to. */
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync /* Set IO and MSR bitmap addresses. */
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync pvVMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.HCPhysIOBitmap;
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync pvVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync /* No LBR virtualization. */
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync /* The ASID must start at 1; the host uses 0. */
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync * Setup the PAT MSR (nested paging only)
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
1a05eb58618c8f646bd1304d10c73792e0034f20vboxsync * so choose type 6 for all PAT slots.
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync /* If nested paging is not in use, additional intercepts have to be set up. */
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * We must also intercept:
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * - INVLPG (must go through shadow paging)
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * - task switches (may change CR3/EFLAGS/LDT)
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync /* Page faults must be intercepted to implement shadow paging. */
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * The following MSRs are saved automatically by vmload/vmsave, so we allow the guest
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * to modify them directly.
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_CSTAR, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K6_STAR, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * Sets the permission bits for the specified MSR.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pVCpu Pointer to the VMCPU.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param ulMSR MSR value.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param fRead Whether reading is allowed.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param fWrite Whether writing is allowed.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsyncstatic void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync uint8_t *pvMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync /* Pentium-compatible MSRs */
fd546afe09073de92e5422c1334f1c27b4108687vboxsync /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
81587231c9c584851518872e197f6f02dffe68cavboxsync /* AMD Seventh and Eighth Generation Processor MSRs */
81587231c9c584851518872e197f6f02dffe68cavboxsync * Posts a pending event (trap or external interrupt). An injected event should only
81587231c9c584851518872e197f6f02dffe68cavboxsync * be written to the VMCB immediately before VMRUN, otherwise we might have stale events
81587231c9c584851518872e197f6f02dffe68cavboxsync * injected across VM resets and suchlike. See @bugref{6220}.
81587231c9c584851518872e197f6f02dffe68cavboxsync * @param pVCpu Pointer to the VMCPU.
81587231c9c584851518872e197f6f02dffe68cavboxsync * @param pCtx Pointer to the guest CPU context.
81587231c9c584851518872e197f6f02dffe68cavboxsync * @param pIntInfo Pointer to the SVM interrupt info.
81587231c9c584851518872e197f6f02dffe68cavboxsyncDECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, SVM_EVENT *pEvent)
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync Log(("SVM: Set pending event: intInfo=%016llx\n", pEvent->au64[0]));
81587231c9c584851518872e197f6f02dffe68cavboxsync /* If there's an event pending already, we're in trouble... */
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync /* Set pending event state. */
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * Injects an event (trap or external interrupt).
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pVCpu Pointer to the VMCPU.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pvVMCB Pointer to the VMCB.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pCtx Pointer to the guest CPU context.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pIntInfo Pointer to the SVM interrupt info.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsyncDECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx, SVM_EVENT *pEvent)
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector,
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0]));
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync Log(("SVM: Inject int %d at %RGv error code=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode));
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync Log(("INJ-EI: %x at %RGv\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip));
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync /* Set event injection state. */
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync pvVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * Checks for pending guest interrupts and injects them.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @returns VBox status code.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pVM Pointer to the VM.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pVCpu Pointer to the VMCPU.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pvVMCB Pointer to the VMCB.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * @param pCtx Pointer to the guest CPU Context.
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsyncstatic int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx)
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely).
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode,
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync * If an active trap is already pending, we must forward it first!
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
da97b7ac000d0f02e86c31a4f2767a00d83c6167vboxsync /** @todo SMI interrupts. */
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync * When external interrupts are pending, we should exit the VM when IF is set.
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
81587231c9c584851518872e197f6f02dffe68cavboxsync if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n",
fd546afe09073de92e5422c1334f1c27b4108687vboxsync /** @todo Use virtual interrupt method to inject a pending IRQ; dispatched as
fd546afe09073de92e5422c1334f1c27b4108687vboxsync * soon as guest.IF is set. */
fd546afe09073de92e5422c1334f1c27b4108687vboxsync pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
7e0c3d180b978b8f4b5b33f8e924520248ee3ab3vboxsync pvVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */
fd546afe09073de92e5422c1334f1c27b4108687vboxsync Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
fd546afe09073de92e5422c1334f1c27b4108687vboxsync rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
b1cf57acefb5d1ce6ae2466f43e225a81083ff34vboxsync Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
fd546afe09073de92e5422c1334f1c27b4108687vboxsync /* Just continue */
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
81587231c9c584851518872e197f6f02dffe68cavboxsync /* If a new event is pending, then dispatch it now. */
81587231c9c584851518872e197f6f02dffe68cavboxsync rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &u32ErrorCode, 0);
81587231c9c584851518872e197f6f02dffe68cavboxsync Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
81587231c9c584851518872e197f6f02dffe68cavboxsync /* Clear the pending trap. */
fd546afe09073de92e5422c1334f1c27b4108687vboxsync /* Valid error codes. */
45e9c1c72518aeba6673332bdd4d70b59e1c11a4vboxsync } /* if (interrupts can be dispatched) */
ea6c70405e39fa563a55780ef25e0933d8c73a1avboxsync * Save the host state.
4d6dcfe00aab559241d9ed05b89f803ab5ddf611vboxsync * @returns VBox status code.
aa4bcf0a4b2db3ac352b56a291d49cb8d4b66d32vboxsync * @param pVM Pointer to the VM.
4d6dcfe00aab559241d9ed05b89f803ab5ddf611vboxsync * @param pVCpu Pointer to the VMCPU.
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsyncVMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync /* Nothing to do here. */
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * Loads the guest state.
dc88ff18a0306635c385b14211765e618351b253vboxsync * NOTE: Don't do anything here that can cause a jump back to ring-3!!!
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * @returns VBox status code.
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * @param pVM Pointer to the VM.
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * @param pVCpu Pointer to the VMCPU.
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync * @param pCtx Pointer to the guest CPU context.
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsyncVMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
f6f8bfbbbc6a59ba94b01886ed5a8d6e5813073bvboxsync /* Setup AMD SVM. */
03c2683d4e59cc500217b1d5d76fdf9b8cdfefa5vboxsync AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_SVM_INVALID_PVMCB);
03c2683d4e59cc500217b1d5d76fdf9b8cdfefa5vboxsync /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
03c2683d4e59cc500217b1d5d76fdf9b8cdfefa5vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
03c2683d4e59cc500217b1d5d76fdf9b8cdfefa5vboxsync /* Guest CPU context: LDTR. */
01b2e367e947049139a5d5813ccc1fa162b11f76vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
03c2683d4e59cc500217b1d5d76fdf9b8cdfefa5vboxsync /* Guest CPU context: TR. */
03c2683d4e59cc500217b1d5d76fdf9b8cdfefa5vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
03c2683d4e59cc500217b1d5d76fdf9b8cdfefa5vboxsync /* Guest CPU context: GDTR. */
/* Also catch floating point exceptions as we need to report them to the guest in a different way. */
val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
* Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level.
* Note: In nested paging mode, the guest is allowed to run with paging disabled; the guest-physical to host-physical
val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
val |= X86_CR0_WP; /* Must set this as we rely on protecting various pages and supervisor writes must be caught. */
Assert(pvVMCB->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
case PGMMODE_REAL:
AssertFailed();
#ifdef VBOX_ENABLE_64_BITS_GUESTS
AssertFailed();
AssertFailed();
#ifdef DEBUG
#if !defined(VBOX_ENABLE_64_BITS_GUESTS)
# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
/* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
#ifdef DEBUG
return VINF_SUCCESS;
* Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
* If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
bool fNewAsid = false;
fNewAsid = true;
if (fNewAsid)
bool fHitASIDLimit = false;
fHitASIDLimit = true;
if ( !fHitASIDLimit
("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
#ifdef VBOX_WITH_STATISTICS
int rc2;
bool fSyncTPR = false;
unsigned cResume = 0;
#ifdef VBOX_STRICT
goto end;
Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
* Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
* force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
* break the guest. Sounds very unlikely, but such timing sensitive problems are not as rare as you might think.
if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
| VMCPU_FF_REQUEST))
rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
goto end;
#ifdef DEBUG
rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
goto end;
/* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
goto end;
goto end;
goto end;
* Exit to ring-3 preemption/work is pending.
goto end;
goto end;
* TPR caching using CR8 is only available in 64-bit mode or with 32-bit guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is
* Note: we can't do this in LoddGuestState as PDMApicGetTPR can jump back to ring 3 (lock)! (no longer true)
bool fPending;
if (fPending)
if (fPending)
#ifdef LOG_ENABLED
LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
#ifdef VBOX_STRICT
goto end;
#ifdef VBOX_STRICT
#ifdef VBOX_WITH_KERNEL_USING_XMM
pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu);
/* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
* IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING-3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#ifdef DEBUG
goto end;
Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.u64IntrInfo, (RTGCPTR)pCtx->rip, exitCode));
#ifdef LOG_ENABLED
#ifdef VBOX_WITH_STATISTICS
if (fSyncTPR)
VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pvVMCB->ctrl.u64ExitInfo1, pvVMCB->ctrl.u64ExitInfo2,
switch (exitCode)
case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_A: case SVM_EXIT_EXCEPTION_B:
case SVM_EXIT_EXCEPTION_C: case SVM_EXIT_EXCEPTION_D: case SVM_EXIT_EXCEPTION_E: case SVM_EXIT_EXCEPTION_F:
case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13:
case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17:
case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B:
case SVM_EXIT_EXCEPTION_1C: case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
switch (vector)
case X86_XCPT_DB:
goto ResumeExecution;
Log(("Debugger hardware BP at %04x:%RGv (rc=%Rrc)\n", pCtx->cs.Sel, pCtx->rip, VBOXSTRICTRC_VAL(rc)));
case X86_XCPT_NM:
/* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
goto ResumeExecution;
goto ResumeExecution;
#ifdef VBOX_ALWAYS_TRAP_PF
* A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip,
goto ResumeExecution;
#ifdef VBOX_HM_WITH_GUEST_PATCHING
if (!pPatch)
Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
goto ResumeExecution;
* A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
goto ResumeExecution;
#ifdef VBOX_STRICT
goto ResumeExecution;
#ifdef VBOX_STRICT
switch (vector)
case X86_XCPT_GP:
case X86_XCPT_BP:
case X86_XCPT_DE:
case X86_XCPT_UD:
case X86_XCPT_SS:
case X86_XCPT_NP:
goto ResumeExecution;
case SVM_EXIT_NPF:
/* EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault. */
LogFlow(("Nested page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
#ifdef VBOX_HM_WITH_GUEST_PATCHING
|| (errCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD) /* mmio optimization */)
if (!pPatch)
rc = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmShwPagingMode, CPUMCTX2CORE(pCtx), GCPhysFault, errCode);
* of the page containing the instruction via the guest's page tables (we would invalidate the guest page
* in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
Log2(("PGMR0Trap0eHandlerNPMisconfig(,,,%RGp) at %RGv -> resume\n", GCPhysFault, (RTGCPTR)pCtx->rip));
goto ResumeExecution;
Log2(("PGMR0Trap0eHandlerNPMisconfig(,,,%RGp) at %RGv -> resume\n", GCPhysFault, (RTGCPTR)pCtx->rip));
rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmShwPagingMode, errCode, CPUMCTX2CORE(pCtx), GCPhysFault);
Log2(("PGMR0Trap0eHandlerNestedPaging %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc)));
Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
goto ResumeExecution;
#ifdef VBOX_STRICT
case SVM_EXIT_VINTR:
goto ResumeExecution;
case SVM_EXIT_FERR_FREEZE:
case SVM_EXIT_INTR:
case SVM_EXIT_NMI:
case SVM_EXIT_SMI:
case SVM_EXIT_INIT:
case SVM_EXIT_WBINVD:
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
AssertFailed();
goto ResumeExecution;
goto ResumeExecution;
case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
/* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo r=ramshankar: would this really fall back to the recompiler and work? */
Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal,
uIOSize));
Log2(("IOMIOPortRead %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal,
uIOSize));
goto ResumeExecution;
goto ResumeExecution;
Log2(("EM status from IO at %RGv %x size %d: %Rrc\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize,
#ifdef VBOX_STRICT
case SVM_EXIT_HLT:
goto ResumeExecution;
case SVM_EXIT_MWAIT_UNCOND:
goto ResumeExecution;
AssertMsg(rc == VERR_EM_INTERPRETER || rc == VINF_EM_HALT, ("EMU: mwait failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
case SVM_EXIT_MONITOR:
goto ResumeExecution;
case SVM_EXIT_VMMCALL:
case SVM_EXIT_RSM:
case SVM_EXIT_INVLPGA:
case SVM_EXIT_VMRUN:
case SVM_EXIT_VMLOAD:
case SVM_EXIT_VMSAVE:
case SVM_EXIT_STGI:
case SVM_EXIT_CLGI:
case SVM_EXIT_SKINIT:
goto ResumeExecution;
case SVM_EXIT_MSR:
goto ResumeExecution;
STAM_COUNTER_INC((pvVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);
goto ResumeExecution;
AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (pvVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr",
case SVM_EVENT_EXTERNAL_IRQ:
case SVM_EVENT_NMI:
case SVM_EXIT_PAUSE:
case SVM_EXIT_MWAIT_ARMED:
case SVM_EXIT_SHUTDOWN:
case SVM_EXIT_IDTR_READ:
case SVM_EXIT_GDTR_READ:
case SVM_EXIT_LDTR_READ:
case SVM_EXIT_TR_READ:
case SVM_EXIT_IDTR_WRITE:
case SVM_EXIT_GDTR_WRITE:
case SVM_EXIT_LDTR_WRITE:
case SVM_EXIT_TR_WRITE:
case SVM_EXIT_CR0_SEL_WRITE:
end:
* If we executed vmrun and an external IRQ was pending, then we don't have to do a full sync the next time.
int rc;
bool fPending;
if (!pPatch)
case HMTPRINSTR_READ:
case HMTPRINSTR_WRITE_REG:
case HMTPRINSTR_WRITE_IMM:
return VINF_SUCCESS;
LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentAsid));
return VINF_SUCCESS;
#ifdef DEBUG
return VINF_SUCCESS;
return VERR_EM_INTERPRETER;
case DISQPV_TYPE_IMMEDIATE:
case DISQPV_TYPE_ADDRESS:
return VERR_EM_INTERPRETER;
return VERR_EM_INTERPRETER;
return VINF_SUCCESS;
return rc;
return rc;
return VERR_EM_INTERPRETER;
if (!fFlushPending)
/* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invlpga takes only 32 bits addresses. */
return VINF_SUCCESS;
/* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */
return VINF_SUCCESS;
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVMCBHost, RTHCPHYS HCPhysVMCB, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
int rc;
rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
return rc;