HWSVMR0.cpp revision fb9d92987061bb5b851897b2e6730125e6c77ced
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * HM SVM (AMD-V) - Host Context Ring-0.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Copyright (C) 2006-2012 Oracle Corporation
82bcaaf8077ba892f39afb721dca149353c63d2cvboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
82bcaaf8077ba892f39afb721dca149353c63d2cvboxsync * available from http://www.virtualbox.org. This file is free software;
82bcaaf8077ba892f39afb721dca149353c63d2cvboxsync * you can redistribute it and/or modify it under the terms of the GNU
82bcaaf8077ba892f39afb721dca149353c63d2cvboxsync * General Public License (GPL) as published by the Free Software
82bcaaf8077ba892f39afb721dca149353c63d2cvboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
82bcaaf8077ba892f39afb721dca149353c63d2cvboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync/*******************************************************************************
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync* Header Files *
b6cba2c351e97805b97998ebe48e03ecef16b59avboxsync*******************************************************************************/
8112e0942f1128329b99b22a20b395963d4abceavboxsync/*******************************************************************************
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync* Internal Functions *
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync*******************************************************************************/
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncstatic int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncstatic int hmR0SvmEmulateTprVMMCall(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncstatic void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync/*******************************************************************************
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync* Defined Constants And Macros *
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync*******************************************************************************/
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync/** Convert hidden selector attribute word between VMX and SVM formats. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync#define SVM_HIDSEGATTR_VMX2SVM(a) (a & 0xFF) | ((a & 0xF000) >> 4)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync#define SVM_HIDSEGATTR_SVM2VMX(a) (a & 0xFF) | ((a & 0x0F00) << 4)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->guest.REG.u16Attr = SVM_HIDSEGATTR_VMX2SVM(pCtx->reg.Attr.u); \
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync } while (0)
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync pCtx->reg.Attr.u = SVM_HIDSEGATTR_SVM2VMX(pvVMCB->guest.REG.u16Attr); \
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync } while (0)
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync/*******************************************************************************
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync* Global Variables *
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync*******************************************************************************/
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync/* IO operation lookup arrays. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncstatic uint32_t const g_aIOSize[8] = {0, 1, 2, 0, 4, 0, 0, 0};
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncstatic uint32_t const g_aIOOpAnd[8] = {0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0};
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Sets up and activates AMD-V on the current CPU.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @returns VBox status code.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pCpu Pointer to the CPU info struct.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVM Pointer to the VM (can be NULL after a resume!).
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pvCpuPage Pointer to the global CPU page.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param HCPhysCpuPage Physical address of the global CPU page.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncVMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * We must turn on AMD-V and setup the host state physical address, as those MSRs are per cpu/core.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Turn on AMD-V in the EFER MSR. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Write the physical page address where the CPU will store the host state while executing the VM. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * to flush the TLB with before using a new ASID.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Deactivates AMD-V on the current CPU.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @returns VBox status code.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pCpu Pointer to the CPU info struct.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pvCpuPage Pointer to the global CPU page.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param HCPhysCpuPage Physical address of the global CPU page.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncVMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* Turn off AMD-V in the EFER MSR. */
6cd65034f702d9b4122249011835e9639a7bc660vboxsync /* Invalidate host state physical address. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Does Ring-0 per VM AMD-V init.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @returns VBox status code.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVM Pointer to the VM.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync rc = RTR0MemObjAllocCont(&pVM->hm.s.svm.hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pVM->hm.s.svm.pvIOBitmap = RTR0MemObjAddress(pVM->hm.s.svm.hMemObjIOBitmap);
8112e0942f1128329b99b22a20b395963d4abceavboxsync pVM->hm.s.svm.HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.hMemObjIOBitmap, 0);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Set all bits to intercept all IO accesses. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync ASMMemFill32(pVM->hm.s.svm.pvIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Erratum 170 which requires a forced TLB flush for each world switch:
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * All BH-G1/2 and DH-G1/2 models include a fix:
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Athlon X2: 0x6b 1/2
33f7a1bbfb625f3401368928cd5e9317ca04881dvboxsync * Athlon 64: 0x7f 1
8112e0942f1128329b99b22a20b395963d4abceavboxsync * Sempron: 0x7f 1/2
0e3950d85821ff3f0f9191f7bf0efe7b3510a808vboxsync * Turion 64: 0x68 2
8112e0942f1128329b99b22a20b395963d4abceavboxsync uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
8112e0942f1128329b99b22a20b395963d4abceavboxsync ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
8112e0942f1128329b99b22a20b395963d4abceavboxsync u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* Allocate VMCBs for all guest CPUs. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* Allocate one page for the host context */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVMCBHost, 1 << PAGE_SHIFT, false /* fExecutable */);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pVCpu->hm.s.svm.pvVMCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCBHost);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pVCpu->hm.s.svm.HCPhysVMCBHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCBHost, 0);
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* Allocate one page for the VM control block (VMCB). */
8112e0942f1128329b99b22a20b395963d4abceavboxsync rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVMCB, 1 << PAGE_SHIFT, false /* fExecutable */);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pVCpu->hm.s.svm.pvVMCB = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCB);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pVCpu->hm.s.svm.HCPhysVMCB = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCB, 0);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Set all bits to intercept all MSR accesses. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff);
8112e0942f1128329b99b22a20b395963d4abceavboxsync * Does Ring-0 per VM AMD-V termination.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @returns VBox status code.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @param pVM Pointer to the VM.
8112e0942f1128329b99b22a20b395963d4abceavboxsync if (pVCpu->hm.s.svm.hMemObjVMCBHost != NIL_RTR0MEMOBJ)
8112e0942f1128329b99b22a20b395963d4abceavboxsync RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVMCBHost, false);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVMCB, false);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
500d759609a43a472c7a29b58f3479ed319a5a76vboxsync if (pVM->hm.s.svm.hMemObjIOBitmap != NIL_RTR0MEMOBJ)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync RTR0MemObjFree(pVM->hm.s.svm.hMemObjIOBitmap, false);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Sets up AMD-V for the specified VM.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @returns VBox status code.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @param pVM Pointer to the VM.
8112e0942f1128329b99b22a20b395963d4abceavboxsync SVM_VMCB *pvVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pvVMCB;
8112e0942f1128329b99b22a20b395963d4abceavboxsync AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
8112e0942f1128329b99b22a20b395963d4abceavboxsync * Program the control fields. Most of them never have to be changed again.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * CR0/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * Note: CR0 & CR4 can be safely read when guest and shadow copies are identical.
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
500d759609a43a472c7a29b58f3479ed319a5a76vboxsync /* CR0/4 writes must be intercepted for obvious reasons. */
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* Intercept all DRx reads and writes by default. Changed later on. */
0e3950d85821ff3f0f9191f7bf0efe7b3510a808vboxsync /* Intercept traps; only #NM is always intercepted. */
0e3950d85821ff3f0f9191f7bf0efe7b3510a808vboxsync pvVMCB->ctrl.u32InterceptException = RT_BIT(X86_XCPT_NM);
0e3950d85821ff3f0f9191f7bf0efe7b3510a808vboxsync pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
9d7572b5c359aff94b3c20fc0177e24567bdde6evboxsync pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP)
0e3950d85821ff3f0f9191f7bf0efe7b3510a808vboxsync /* Set up instruction and miscellaneous intercepts. */
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync pvVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync guest (host thinks the cpu load is high) */
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync Log(("pvVMCB->ctrl.u32InterceptException = %x\n", pvVMCB->ctrl.u32InterceptException));
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync Log(("pvVMCB->ctrl.u32InterceptCtrl1 = %x\n", pvVMCB->ctrl.u32InterceptCtrl1));
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync Log(("pvVMCB->ctrl.u32InterceptCtrl2 = %x\n", pvVMCB->ctrl.u32InterceptCtrl2));
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync /* Ignore the priority in the TPR; just deliver it when we tell it to. */
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync /* Set IO and MSR bitmap addresses. */
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync pvVMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.HCPhysIOBitmap;
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync pvVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
37b83e04cd69f1fdddc315640acef2cdfddb0f46vboxsync /* No LBR virtualization. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* The ASID must start at 1; the host uses 0. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Setup the PAT MSR (nested paging only)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * so choose type 6 for all PAT slots.
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* If nested paging is not in use, additional intercepts have to be set up. */
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */
8112e0942f1128329b99b22a20b395963d4abceavboxsync * We must also intercept:
8112e0942f1128329b99b22a20b395963d4abceavboxsync * - INVLPG (must go through shadow paging)
8112e0942f1128329b99b22a20b395963d4abceavboxsync * - task switches (may change CR3/EFLAGS/LDT)
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* Page faults must be intercepted to implement shadow paging. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * The following MSRs are saved automatically by vmload/vmsave, so we allow the guest
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * to modify them directly.
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_CSTAR, true, true);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K6_STAR, true, true);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true);
8112e0942f1128329b99b22a20b395963d4abceavboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Sets the permission bits for the specified MSR.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @param pVCpu Pointer to the VMCPU.
d28bd01ede12da4a939e81a155d227ee4d6ff634vboxsync * @param ulMSR MSR value.
d28bd01ede12da4a939e81a155d227ee4d6ff634vboxsync * @param fRead Whether reading is allowed.
d28bd01ede12da4a939e81a155d227ee4d6ff634vboxsync * @param fWrite Whether writing is allowed.
8112e0942f1128329b99b22a20b395963d4abceavboxsyncstatic void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
8112e0942f1128329b99b22a20b395963d4abceavboxsync uint8_t *pvMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* Pentium-compatible MSRs */
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* AMD Seventh and Eighth Generation Processor MSRs */
8112e0942f1128329b99b22a20b395963d4abceavboxsync * Injects an event (trap or external interrupt).
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @param pVCpu Pointer to the VMCPU.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @param pvVMCB Pointer to the VMCB.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @param pCtx Pointer to the guest CPU context.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @param pIntInfo Pointer to the SVM interrupt info.
8112e0942f1128329b99b22a20b395963d4abceavboxsyncDECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx, SVM_EVENT *pEvent)
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector,
8112e0942f1128329b99b22a20b395963d4abceavboxsync (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0]));
8112e0942f1128329b99b22a20b395963d4abceavboxsync Log(("SVM: Inject int %d at %RGv error code=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode));
8112e0942f1128329b99b22a20b395963d4abceavboxsync Log(("INJ-EI: %x at %RGv\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip));
8112e0942f1128329b99b22a20b395963d4abceavboxsync Assert(!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
874be5c5b701726b68fa1391022ae2f5c7768894vboxsync /* Set event injection state. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Checks for pending guest interrupts and injects them.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @returns VBox status code.
8112e0942f1128329b99b22a20b395963d4abceavboxsync * @param pVM Pointer to the VM.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVCpu Pointer to the VMCPU.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pvVMCB Pointer to the VMCB.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pCtx Pointer to the guest CPU Context.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncstatic int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx)
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely).
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hm.s.Event.intInfo, pVCpu->hm.s.Event.errCode,
8112e0942f1128329b99b22a20b395963d4abceavboxsync * If an active trap is already pending, we must forward it first!
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
8112e0942f1128329b99b22a20b395963d4abceavboxsync /** @todo SMI interrupts. */
8112e0942f1128329b99b22a20b395963d4abceavboxsync * When external interrupts are pending, we should exit the VM when IF is set.
8112e0942f1128329b99b22a20b395963d4abceavboxsync if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
8112e0942f1128329b99b22a20b395963d4abceavboxsync || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
8112e0942f1128329b99b22a20b395963d4abceavboxsync if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
8112e0942f1128329b99b22a20b395963d4abceavboxsync Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n",
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /** @todo Use virtual interrupt method to inject a pending IRQ; dispatched as
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * soon as guest.IF is set. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync pvVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc\n", u8Interrupt, u8Interrupt, rc));
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
8112e0942f1128329b99b22a20b395963d4abceavboxsync Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* Just continue */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* If a new event is pending, then dispatch it now. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &u32ErrorCode, 0);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Clear the pending trap. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Valid error codes. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync } /* if (interrupts can be dispatched) */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Save the host state.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @returns VBox status code.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVM Pointer to the VM.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVCpu Pointer to the VMCPU.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncVMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Nothing to do here. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Loads the guest state.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * NOTE: Don't do anything here that can cause a jump back to ring-3!!!
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @returns VBox status code.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVM Pointer to the VM.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVCpu Pointer to the VMCPU.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pCtx Pointer to the guest CPU context.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsyncVMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Setup AMD SVM. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Guest CPU context: LDTR. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Guest CPU context: TR. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Guest CPU context: GDTR. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Guest CPU context: IDTR. */
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Sysenter MSRs (unconditional)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Control registers */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Always use #NM exceptions to load the FPU/XMM state on demand. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync /** @todo check if we support the old style mess correctly. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Always enable caching. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Note: In nested paging mode, the guest is allowed to run with paging disabled; the guest-physical to host-physical
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * translation will remain active.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync val |= X86_CR0_WP; /* Must set this as we rely on protecting various pages and supervisor writes must be caught. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* CR2 as well */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync /* Save our shadow CR3 register. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Assert(pvVMCB->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync case PGMMODE_PROTECTED: /* Protected mode, no paging. */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /** Must use PAE paging as we could use physical memory > 4 GB */
9d7572b5c359aff94b3c20fc0177e24567bdde6evboxsync case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync default: /* shut up gcc */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync /* Debug registers. */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync pCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* Sync the hypervisor debug state now if any breakpoint is armed. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync if ( CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK|X86_DR7_GD)
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* Save the host and load the hypervisor debug state. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync int rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync /* DRx intercepts remain enabled. */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync /* Override dr6 & dr7 with the hypervisor values. */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync /* Sync the debug state now if any breakpoint is armed. */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync if ( (pCtx->dr[7] & (X86_DR7_ENABLED_MASK|X86_DR7_GD))
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync /* Disable drx move intercepts. */
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync /* Save the host and load the guest debug state. */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, false /* exclude DR6 */);
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync /* EIP, ESP and EFLAGS */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Set CPL */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* vmrun will fail without MSR_K6_EFER_SVME. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* 64 bits guest mode? */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync#elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Filter out the MSR_K6_LME bit or else AMD-V expects amd64 shadow paging. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* TSC offset. */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync if (TMCpuTickCanUseRealTSC(pVCpu, &pvVMCB->ctrl.u64TSCOffset))
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync if (u64CurTSC + pvVMCB->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
8112e0942f1128329b99b22a20b395963d4abceavboxsync /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */
8112e0942f1128329b99b22a20b395963d4abceavboxsync LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC,
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u64TSCOffset, u64CurTSC + pvVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu),
8112e0942f1128329b99b22a20b395963d4abceavboxsync TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pvVMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu)));
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
8112e0942f1128329b99b22a20b395963d4abceavboxsync STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow);
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync pvVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync /* Sync the various MSRs for 64-bit mode. */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync pvVMCB->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64-bit mode syscall rip */
8112e0942f1128329b99b22a20b395963d4abceavboxsync pvVMCB->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync pvVMCB->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync pvVMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* SWAPGS exchange value */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync /* Intercept X86_XCPT_DB if stepping is enabled */
689866c0e5e611f2db46822ae47724b55a27a1a0vboxsync pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_DB);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.u32InterceptException &= ~RT_BIT(X86_XCPT_DB);
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync /* Done. */
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Setup TLB for ASID.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVM Pointer to the VM.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * @param pVCpu Pointer to the VMCPU.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync SVM_VMCB *pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * This can happen both for start & resume due to long jumps back to ring-3.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync * so we cannot reuse the ASIDs without flushing.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync bool fNewAsid = false;
3fe24a3690526efc4cceece3819d628caadf3140vboxsync * Set TLB flush state as checked until we return from the world switch.
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Check for TLB shootdown flushes.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * not be executed. See hmQueueInvlPage() where it is commented
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * out. Support individual entry flushing someday. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pvVMCB->ctrl.TLBCtrl.n.u32ASID);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Update VMCB with the ASID. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync if (pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync else if ( pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync || pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch);
3aab9f739cebfaa93f6a5a51e293310619d55a5bvboxsync * Runs guest code in an AMD-V VM.
874be5c5b701726b68fa1391022ae2f5c7768894vboxsync * @returns VBox status code.
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * @param pVM Pointer to the VM.
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * @param pVCpu Pointer to the VMCPU.
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * @param pCtx Pointer to the guest CPU context.
874be5c5b701726b68fa1391022ae2f5c7768894vboxsyncVMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
874be5c5b701726b68fa1391022ae2f5c7768894vboxsync STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync bool fSyncTPR = false;
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync unsigned cResume = 0;
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync uint8_t u8LastTPR = 0; /* Initialized for potentially stupid compilers. */
874be5c5b701726b68fa1391022ae2f5c7768894vboxsync AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * We can jump to this point to resume execution after determining that a VM-exit is innocent.
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry))
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * Safety precaution; looping for too long here can have a very bad effect on the host.
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops))
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * Check for IRQ inhibition due to instruction fusing (sti, mov ss).
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
8112e0942f1128329b99b22a20b395963d4abceavboxsync Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync * Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
8112e0942f1128329b99b22a20b395963d4abceavboxsync * break the guest. Sounds very unlikely, but such timing sensitive problems are not as rare as you might think.
874be5c5b701726b68fa1391022ae2f5c7768894vboxsync VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
874be5c5b701726b68fa1391022ae2f5c7768894vboxsync /* Irq inhibition is no longer active; clear the corresponding SVM state. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Irq inhibition is no longer active; clear the corresponding SVM state. */
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync * Check for pending actions that force us to go back to ring-3.
8865816a3d875ec1a036af307fbb484af107f014vboxsync if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync /* Check if a sync operation is pending. */
8865816a3d875ec1a036af307fbb484af107f014vboxsync if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
8865816a3d875ec1a036af307fbb484af107f014vboxsync rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
8865816a3d875ec1a036af307fbb484af107f014vboxsync Log(("Pending pool sync is forcing us back to ring 3; rc=%d\n", VBOXSTRICTRC_VAL(rc)));
8865816a3d875ec1a036af307fbb484af107f014vboxsync /* Intercept X86_XCPT_DB if stepping is enabled */
8865816a3d875ec1a036af307fbb484af107f014vboxsync || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8865816a3d875ec1a036af307fbb484af107f014vboxsync rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
8865816a3d875ec1a036af307fbb484af107f014vboxsync /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
8865816a3d875ec1a036af307fbb484af107f014vboxsync /* Check if a pgm pool flush is in progress. */
8865816a3d875ec1a036af307fbb484af107f014vboxsync if (VM_FF_ISPENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
8865816a3d875ec1a036af307fbb484af107f014vboxsync /* Check if DMA work is pending (2nd+ run). */
8865816a3d875ec1a036af307fbb484af107f014vboxsync if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA) && cResume > 1)
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync * Exit to ring-3 preemption/work is pending.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Interrupts are disabled before the call to make sure we don't miss any interrupt
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * further down, but hmR0SvmCheckPendingInterrupt makes that impossible.)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * shootdowns rely on this.
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * When external interrupts are pending, we should exit the VM when IF is set.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Note: *After* VM_FF_INHIBIT_INTERRUPTS check!!
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync rc = hmR0SvmCheckPendingInterrupt(pVM, pVCpu, pvVMCB, pCtx);
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync * TPR caching using CR8 is only available in 64-bit mode or with 32-bit guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync * supported.
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync * Note: we can't do this in LoddGuestState as PDMApicGetTPR can jump back to ring 3 (lock)! (no longer true)
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync /** @todo query and update the TPR only when it could have been changed (mmio access)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* TPR caching in CR8 */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Our patch code uses LSTAR for TPR caching. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* A TPR change could activate a pending interrupt, so catch lstar writes. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, false);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * No interrupts are pending, so we don't need to be explicitely notified.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * There are enough world switches for detecting pending interrupts.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync /* A TPR change could activate a pending interrupt, so catch cr8 writes. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * No interrupts are pending, so we don't need to be explicitly notified.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * There are enough world switches for detecting pending interrupts.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* All done! Let's start VM execution. */
3fe24a3690526efc4cceece3819d628caadf3140vboxsync /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
3fe24a3690526efc4cceece3819d628caadf3140vboxsync pvVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
3fe24a3690526efc4cceece3819d628caadf3140vboxsync else if (pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
3fe24a3690526efc4cceece3819d628caadf3140vboxsync LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
3fe24a3690526efc4cceece3819d628caadf3140vboxsync else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3!
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync * (until the actual world switch)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * Load the guest state; *must* be here as it sets up the shadow CR0 for lazy FPU syncing!
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * Disable interrupts to make sure a poke will interrupt execution.
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync /* Setup TLB control and ASID in the VMCB. */
3fe24a3690526efc4cceece3819d628caadf3140vboxsync /* In case we execute a goto ResumeExecution later on. */
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync pVCpu->hm.s.fForceTLBFlush = pVM->hm.s.svm.fAlwaysFlushTLB;
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync Assert(pvVMCB->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.HCPhysIOBitmap);
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync Assert(pvVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysMsrBitmap);
8112e0942f1128329b99b22a20b395963d4abceavboxsync * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
3fe24a3690526efc4cceece3819d628caadf3140vboxsync if ( (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync && !(pvVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
3fe24a3690526efc4cceece3819d628caadf3140vboxsync rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu,
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu);
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
3fe24a3690526efc4cceece3819d628caadf3140vboxsync if (!(pvVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
eb02731cc4b3e9d61f26f793a4e27602826b0d52vboxsync /* Restore host's TSC_AUX. */
3fe24a3690526efc4cceece3819d628caadf3140vboxsync if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync pvVMCB->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
3fe24a3690526efc4cceece3819d628caadf3140vboxsync * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING-3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
3fe24a3690526efc4cceece3819d628caadf3140vboxsync * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
3fe24a3690526efc4cceece3819d628caadf3140vboxsync /* Reason for the VM exit */
3fe24a3690526efc4cceece3819d628caadf3140vboxsync if (RT_UNLIKELY(exitCode == (uint64_t)SVM_EXIT_INVALID)) /* Invalid guest state. */
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u16InterceptRdCRx %x\n", pvVMCB->ctrl.u16InterceptRdCRx));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.u16InterceptWrCRx %x\n", pvVMCB->ctrl.u16InterceptWrCRx));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u16InterceptRdDRx %x\n", pvVMCB->ctrl.u16InterceptRdDRx));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u16InterceptWrDRx %x\n", pvVMCB->ctrl.u16InterceptWrDRx));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u32InterceptException %x\n", pvVMCB->ctrl.u32InterceptException));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u32InterceptCtrl1 %x\n", pvVMCB->ctrl.u32InterceptCtrl1));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u32InterceptCtrl2 %x\n", pvVMCB->ctrl.u32InterceptCtrl2));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64IOPMPhysAddr %RX64\n", pvVMCB->ctrl.u64IOPMPhysAddr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64MSRPMPhysAddr %RX64\n", pvVMCB->ctrl.u64MSRPMPhysAddr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64TSCOffset %RX64\n", pvVMCB->ctrl.u64TSCOffset));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.TLBCtrl.u32ASID %x\n", pvVMCB->ctrl.TLBCtrl.n.u32ASID));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.TLBCtrl.u8TLBFlush %x\n", pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.TLBCtrl.u24Reserved %x\n", pvVMCB->ctrl.TLBCtrl.n.u24Reserved));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.IntCtrl.u8VTPR %x\n", pvVMCB->ctrl.IntCtrl.n.u8VTPR));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pvVMCB->ctrl.IntCtrl.n.u1VIrqValid));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.IntCtrl.u7Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u7Reserved));
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pvVMCB->ctrl.IntCtrl.n.u4VIrqPriority));
fdd17dc68230654ee0796918863927fca10a9f0avboxsync Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pvVMCB->ctrl.IntCtrl.n.u1IgnoreTPR));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.IntCtrl.u3Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u3Reserved));
bb2f3a7f00e605b890f8a8a74969f551cc9a0477vboxsync Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking));
fdd17dc68230654ee0796918863927fca10a9f0avboxsync Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pvVMCB->ctrl.IntCtrl.n.u7Reserved2));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pvVMCB->ctrl.IntCtrl.n.u8VIrqVector));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.IntCtrl.u24Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u24Reserved));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64IntShadow %RX64\n", pvVMCB->ctrl.u64IntShadow));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64ExitCode %RX64\n", pvVMCB->ctrl.u64ExitCode));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64ExitInfo1 %RX64\n", pvVMCB->ctrl.u64ExitInfo1));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64ExitInfo2 %RX64\n", pvVMCB->ctrl.u64ExitInfo2));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.ExitIntInfo.u8Vector %x\n", pvVMCB->ctrl.ExitIntInfo.n.u8Vector));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.ExitIntInfo.u3Type %x\n", pvVMCB->ctrl.ExitIntInfo.n.u3Type));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pvVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pvVMCB->ctrl.ExitIntInfo.n.u19Reserved));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.ExitIntInfo.u1Valid %x\n", pvVMCB->ctrl.ExitIntInfo.n.u1Valid));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pvVMCB->ctrl.ExitIntInfo.n.u32ErrorCode));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.NestedPaging %RX64\n", pvVMCB->ctrl.NestedPaging.au64));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.EventInject.u8Vector %x\n", pvVMCB->ctrl.EventInject.n.u8Vector));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.EventInject.u3Type %x\n", pvVMCB->ctrl.EventInject.n.u3Type));
6cd65034f702d9b4122249011835e9639a7bc660vboxsync Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pvVMCB->ctrl.EventInject.n.u1ErrorCodeValid));
0cbaa6b7062076428f638cc5afba9d16200c6076vboxsync Log(("ctrl.EventInject.u19Reserved %x\n", pvVMCB->ctrl.EventInject.n.u19Reserved));
6cd65034f702d9b4122249011835e9639a7bc660vboxsync Log(("ctrl.EventInject.u1Valid %x\n", pvVMCB->ctrl.EventInject.n.u1Valid));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.EventInject.u32ErrorCode %x\n", pvVMCB->ctrl.EventInject.n.u32ErrorCode));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64NestedPagingCR3 %RX64\n", pvVMCB->ctrl.u64NestedPagingCR3));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("ctrl.u64LBRVirt %RX64\n", pvVMCB->ctrl.u64LBRVirt));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.CS.u16Sel %04X\n", pvVMCB->guest.CS.u16Sel));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.CS.u16Attr %04X\n", pvVMCB->guest.CS.u16Attr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.CS.u32Limit %X\n", pvVMCB->guest.CS.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.CS.u64Base %RX64\n", pvVMCB->guest.CS.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.DS.u16Sel %04X\n", pvVMCB->guest.DS.u16Sel));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.DS.u16Attr %04X\n", pvVMCB->guest.DS.u16Attr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.DS.u32Limit %X\n", pvVMCB->guest.DS.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.DS.u64Base %RX64\n", pvVMCB->guest.DS.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.ES.u16Sel %04X\n", pvVMCB->guest.ES.u16Sel));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.ES.u16Attr %04X\n", pvVMCB->guest.ES.u16Attr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.ES.u32Limit %X\n", pvVMCB->guest.ES.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.ES.u64Base %RX64\n", pvVMCB->guest.ES.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.FS.u16Sel %04X\n", pvVMCB->guest.FS.u16Sel));
ad48e47654d22f79b025dc4b21cb162cb123801avboxsync Log(("guest.FS.u16Attr %04X\n", pvVMCB->guest.FS.u16Attr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.FS.u32Limit %X\n", pvVMCB->guest.FS.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.FS.u64Base %RX64\n", pvVMCB->guest.FS.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.GS.u16Sel %04X\n", pvVMCB->guest.GS.u16Sel));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.GS.u16Attr %04X\n", pvVMCB->guest.GS.u16Attr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.GS.u32Limit %X\n", pvVMCB->guest.GS.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.GS.u64Base %RX64\n", pvVMCB->guest.GS.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.GDTR.u32Limit %X\n", pvVMCB->guest.GDTR.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.GDTR.u64Base %RX64\n", pvVMCB->guest.GDTR.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.LDTR.u16Sel %04X\n", pvVMCB->guest.LDTR.u16Sel));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.LDTR.u16Attr %04X\n", pvVMCB->guest.LDTR.u16Attr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.LDTR.u32Limit %X\n", pvVMCB->guest.LDTR.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.LDTR.u64Base %RX64\n", pvVMCB->guest.LDTR.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.IDTR.u32Limit %X\n", pvVMCB->guest.IDTR.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.IDTR.u64Base %RX64\n", pvVMCB->guest.IDTR.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.TR.u16Sel %04X\n", pvVMCB->guest.TR.u16Sel));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.TR.u16Attr %04X\n", pvVMCB->guest.TR.u16Attr));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.TR.u32Limit %X\n", pvVMCB->guest.TR.u32Limit));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.TR.u64Base %RX64\n", pvVMCB->guest.TR.u64Base));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64CR0 %RX64\n", pvVMCB->guest.u64CR0));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64CR2 %RX64\n", pvVMCB->guest.u64CR2));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64CR3 %RX64\n", pvVMCB->guest.u64CR3));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64CR4 %RX64\n", pvVMCB->guest.u64CR4));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64DR6 %RX64\n", pvVMCB->guest.u64DR6));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64DR7 %RX64\n", pvVMCB->guest.u64DR7));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64RIP %RX64\n", pvVMCB->guest.u64RIP));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64RSP %RX64\n", pvVMCB->guest.u64RSP));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64RAX %RX64\n", pvVMCB->guest.u64RAX));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64RFlags %RX64\n", pvVMCB->guest.u64RFlags));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64SysEnterCS %RX64\n", pvVMCB->guest.u64SysEnterCS));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64SysEnterEIP %RX64\n", pvVMCB->guest.u64SysEnterEIP));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64SysEnterESP %RX64\n", pvVMCB->guest.u64SysEnterESP));
1ab072338283b0c700d1ef4958511296a4d0ca41vboxsync Log(("guest.u64EFER %RX64\n", pvVMCB->guest.u64EFER));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64STAR %RX64\n", pvVMCB->guest.u64STAR));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64LSTAR %RX64\n", pvVMCB->guest.u64LSTAR));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64CSTAR %RX64\n", pvVMCB->guest.u64CSTAR));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64SFMASK %RX64\n", pvVMCB->guest.u64SFMASK));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64KernelGSBase %RX64\n", pvVMCB->guest.u64KernelGSBase));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64GPAT %RX64\n", pvVMCB->guest.u64GPAT));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64DBGCTL %RX64\n", pvVMCB->guest.u64DBGCTL));
ae0f2178b9a5aded928e0245cb830ba1d3d04c57vboxsync Log(("guest.u64BR_FROM %RX64\n", pvVMCB->guest.u64BR_FROM));
goto end;
Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
#ifdef LOG_ENABLED
#ifdef VBOX_WITH_STATISTICS
if (fSyncTPR)
VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pvVMCB->ctrl.u64ExitInfo1, pvVMCB->ctrl.u64ExitInfo2,
switch (exitCode)
case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:
case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:
case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_A: case SVM_EXIT_EXCEPTION_B:
case SVM_EXIT_EXCEPTION_C: case SVM_EXIT_EXCEPTION_D: case SVM_EXIT_EXCEPTION_E: case SVM_EXIT_EXCEPTION_F:
case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11: case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13:
case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15: case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17:
case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19: case SVM_EXIT_EXCEPTION_1A: case SVM_EXIT_EXCEPTION_1B:
case SVM_EXIT_EXCEPTION_1C: case SVM_EXIT_EXCEPTION_1D: case SVM_EXIT_EXCEPTION_1E: case SVM_EXIT_EXCEPTION_1F:
switch (vector)
case X86_XCPT_DB:
goto ResumeExecution;
Log(("Debugger hardware BP at %04x:%RGv (rc=%Rrc)\n", pCtx->cs.Sel, pCtx->rip, VBOXSTRICTRC_VAL(rc)));
case X86_XCPT_NM:
/* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
goto ResumeExecution;
goto ResumeExecution;
#ifdef VBOX_ALWAYS_TRAP_PF
* A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip,
goto ResumeExecution;
#ifdef VBOX_HM_WITH_GUEST_PATCHING
if (!pPatch)
Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
goto ResumeExecution;
* A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
goto ResumeExecution;
#ifdef VBOX_STRICT
goto ResumeExecution;
#ifdef VBOX_STRICT
switch (vector)
case X86_XCPT_GP:
case X86_XCPT_BP:
case X86_XCPT_DE:
case X86_XCPT_UD:
case X86_XCPT_SS:
case X86_XCPT_NP:
goto ResumeExecution;
case SVM_EXIT_NPF:
/* EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault. */
LogFlow(("Nested page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
#ifdef VBOX_HM_WITH_GUEST_PATCHING
|| (errCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD) /* mmio optimization */)
if (!pPatch)
rc = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmShwPagingMode, CPUMCTX2CORE(pCtx), GCPhysFault, errCode);
* of the page containing the instruction via the guest's page tables (we would invalidate the guest page
* in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
Log2(("PGMR0Trap0eHandlerNPMisconfig(,,,%RGp) at %RGv -> resume\n", GCPhysFault, (RTGCPTR)pCtx->rip));
goto ResumeExecution;
Log2(("PGMR0Trap0eHandlerNPMisconfig(,,,%RGp) at %RGv -> resume\n", GCPhysFault, (RTGCPTR)pCtx->rip));
rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmShwPagingMode, errCode, CPUMCTX2CORE(pCtx), GCPhysFault);
Log2(("PGMR0Trap0eHandlerNestedPaging %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc)));
Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
goto ResumeExecution;
#ifdef VBOX_STRICT
case SVM_EXIT_VINTR:
goto ResumeExecution;
case SVM_EXIT_FERR_FREEZE:
case SVM_EXIT_INTR:
case SVM_EXIT_NMI:
case SVM_EXIT_SMI:
case SVM_EXIT_INIT:
case SVM_EXIT_WBINVD:
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
AssertFailed();
goto ResumeExecution;
goto ResumeExecution;
case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
goto ResumeExecution;
/* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo r=ramshankar: would this really fall back to the recompiler and work? */
Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal,
uIOSize));
Log2(("IOMIOPortRead %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal,
uIOSize));
goto ResumeExecution;
goto ResumeExecution;
Log2(("EM status from IO at %RGv %x size %d: %Rrc\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize,
#ifdef VBOX_STRICT
case SVM_EXIT_HLT:
goto ResumeExecution;
case SVM_EXIT_MWAIT_UNCOND:
goto ResumeExecution;
AssertMsg(rc == VERR_EM_INTERPRETER || rc == VINF_EM_HALT, ("EMU: mwait failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
case SVM_EXIT_MONITOR:
goto ResumeExecution;
case SVM_EXIT_VMMCALL:
case SVM_EXIT_RSM:
case SVM_EXIT_INVLPGA:
case SVM_EXIT_VMRUN:
case SVM_EXIT_VMLOAD:
case SVM_EXIT_VMSAVE:
case SVM_EXIT_STGI:
case SVM_EXIT_CLGI:
case SVM_EXIT_SKINIT:
goto ResumeExecution;
case SVM_EXIT_MSR:
goto ResumeExecution;
STAM_COUNTER_INC((pvVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);
goto ResumeExecution;
AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (pvVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr",
case SVM_EVENT_EXTERNAL_IRQ:
case SVM_EVENT_NMI:
case SVM_EXIT_PAUSE:
case SVM_EXIT_MWAIT_ARMED:
case SVM_EXIT_SHUTDOWN:
case SVM_EXIT_IDTR_READ:
case SVM_EXIT_GDTR_READ:
case SVM_EXIT_LDTR_READ:
case SVM_EXIT_TR_READ:
case SVM_EXIT_IDTR_WRITE:
case SVM_EXIT_GDTR_WRITE:
case SVM_EXIT_LDTR_WRITE:
case SVM_EXIT_TR_WRITE:
case SVM_EXIT_CR0_SEL_WRITE:
end:
* If we executed vmrun and an external IRQ was pending, then we don't have to do a full sync the next time.
int rc;
bool fPending;
if (!pPatch)
case HMTPRINSTR_READ:
case HMTPRINSTR_WRITE_REG:
case HMTPRINSTR_WRITE_IMM:
return VINF_SUCCESS;
LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentAsid));
return VINF_SUCCESS;
#ifdef DEBUG
return VINF_SUCCESS;
return VERR_EM_INTERPRETER;
case DISQPV_TYPE_IMMEDIATE:
case DISQPV_TYPE_ADDRESS:
return VERR_EM_INTERPRETER;
return VERR_EM_INTERPRETER;
return VINF_SUCCESS;
return rc;
return rc;
return VERR_EM_INTERPRETER;
if (!fFlushPending)
/* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invlpga takes only 32 bits addresses. */
return VINF_SUCCESS;
/* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */
return VINF_SUCCESS;
#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVMCBHost, RTHCPHYS HCPhysVMCB, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
int rc;
rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
return rc;