Lines Matching refs:pVCpu

178 # define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
179 # define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
186 #define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
191 #define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
192 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
194 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
199 pVCpu->hm.s.u32HMError = pVmxTransient->uExitReason; \
330 * @param pVCpu Pointer to the VMCPU.
337 typedef int FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
339 typedef DECLCALLBACK(int) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
348 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
349 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
350 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
354 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
357 DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
362 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
415 static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
416 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
417 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
418 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
419 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
420 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
422 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
424 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
547 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
552 static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
558 AssertPtrReturnVoid(pVCpu);
559 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
659 * @param pVCpu Pointer to the VMCPU.
698 * @param pVCpu Pointer to the VMCPU (required for the VMCS cache
702 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
706 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
888 PVMCPU pVCpu = &pVM->aCpus[i];
889 AssertPtr(pVCpu);
891 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
892 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
895 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
897 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
898 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
925 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
926 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
927 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
937 PVMCPU pVCpu = &pVM->aCpus[i];
978 PVMCPU pVCpu = &pVM->aCpus[i];
979 AssertPtr(pVCpu);
982 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
989 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
990 &pVCpu->hm.s.vmx.HCPhysVirtApic);
1004 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
1005 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1008 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1012 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1017 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1094 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
1132 * @param pVCpu Pointer to the VMCPU.
1137 static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1140 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1182 * @param pVCpu Pointer to the VMCPU.
1187 static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1192 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1226 * @param pVCpu Pointer to the VMCPU.
1229 DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1232 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1236 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1248 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1261 * @param pVCpu Pointer to the VMCPU.
1267 static bool hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr)
1269 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1270 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1283 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1287 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1288 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1298 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1311 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1326 * @param pVCpu Pointer to the VMCPU.
1329 static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1331 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1332 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1346 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1351 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1352 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1363 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1365 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1369 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1370 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1372 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1385 * @param pVCpu Pointer to the VMCPU.
1388 static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1390 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1391 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1405 * @param pVCpu Pointer to the VMCPU.
1409 static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1412 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1413 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1414 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1425 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1430 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1439 * @param pVCpu Pointer to the VMCPU.
1443 static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1450 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1452 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1453 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1454 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1455 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1456 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1466 * @param pVCpu Pointer to the VMCPU.
1469 static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1471 NOREF(pVCpu);
1487 * @param pVCpu Pointer to the VMCPU.
1494 static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1497 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1499 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1501 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1518 * @param pVCpu Pointer to the VMCPU.
1525 static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1528 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1532 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
1535 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
1538 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1539 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1545 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1563 * @param pVCpu Pointer to the VMCPU.
1569 static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1572 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1574 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1576 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1577 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1578 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1579 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1580 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1582 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1592 * @param pVCpu Pointer to the VMCPU.
1594 static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1599 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1604 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1609 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1614 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1617 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1621 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1634 * @param pVCpu Pointer to the VMCPU.
1636 static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1640 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1656 * @param pVCpu Pointer to the VMCPU.
1658 static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1665 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1668 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1671 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1673 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1674 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1686 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1690 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1714 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1723 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
1730 Assert(pVCpu);
1731 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1736 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1739 && pVCpu)
1741 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1751 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1759 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
1773 AssertPtr(pVCpu);
1774 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1775 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1776 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1782 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1784 && pVCpu)
1786 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1797 * @param pVCpu Pointer to the VMCPU.
1800 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1803 AssertPtr(pVCpu);
1804 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1806 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1820 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
1821 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1824 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1827 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1840 * @param pVCpu Pointer to the VMCPU.
1843 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1853 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1854 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1864 * @param pVCpu Pointer to the VMCPU.
1869 static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1871 AssertPtr(pVCpu);
1875 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1879 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1880 pVCpu->hm.s.TlbShootdown.cPages = 0;
1884 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1885 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1886 pVCpu->hm.s.fForceTLBFlush = false;
1895 * @param pVCpu Pointer to the VMCPU.
1903 static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1910 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1919 AssertPtr(pVCpu);
1931 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1932 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1942 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1943 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1944 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1950 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1951 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1953 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1957 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1966 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1967 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1975 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1977 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1985 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1986 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1989 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1992 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1993 pVCpu->hm.s.TlbShootdown.cPages = 0;
1997 pVCpu->hm.s.fForceTLBFlush = false;
2001 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
2002 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
2003 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2004 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2006 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2007 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2008 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2009 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2012 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2024 * @param pVCpu Pointer to the VMCPU.
2029 static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2032 AssertPtr(pVCpu);
2042 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2043 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2045 pVCpu->hm.s.fForceTLBFlush = true;
2046 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2050 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2052 pVCpu->hm.s.fForceTLBFlush = true;
2053 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2056 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2057 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2059 if (pVCpu->hm.s.fForceTLBFlush)
2061 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2062 pVCpu->hm.s.fForceTLBFlush = false;
2070 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2073 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
2074 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2077 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2079 pVCpu->hm.s.TlbShootdown.cPages = 0;
2080 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2091 * @param pVCpu Pointer to the VMCPU.
2096 static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2099 AssertPtr(pVCpu);
2110 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2111 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2113 pVCpu->hm.s.fForceTLBFlush = true;
2114 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2118 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2125 pVCpu->hm.s.fForceTLBFlush = true;
2126 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2129 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2130 if (pVCpu->hm.s.fForceTLBFlush)
2140 pVCpu->hm.s.fForceTLBFlush = false;
2141 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2142 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2146 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2149 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2165 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
2167 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
2170 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2175 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
2176 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
2179 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2181 pVCpu->hm.s.TlbShootdown.cPages = 0;
2182 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2185 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2189 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2190 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2192 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2193 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2194 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2195 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2197 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2205 * @param pVCpu Pointer to the VMCPU.
2208 DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2211 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2213 PVM pVM = pVCpu->CTX_SUFF(pVM);
2216 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
2217 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu); break;
2218 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
2219 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
2226 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN));
2328 * @param pVCpu Pointer to the VMCPU.
2330 static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
2333 AssertPtr(pVCpu);
2355 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2362 pVCpu->hm.s.vmx.u32PinCtls = val;
2374 static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
2377 AssertPtr(pVCpu);
2396 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2412 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2413 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2415 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2441 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2442 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2443 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2450 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2451 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2452 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2453 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2454 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2462 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2463 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2464 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2465 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2478 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2485 pVCpu->hm.s.vmx.u32ProcCtls = val;
2490 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
2536 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2543 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
2549 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2563 * @param pVCpu Pointer to the VMCPU.
2565 static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
2569 AssertPtr(pVCpu);
2599 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2600 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2601 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2603 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2606 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2607 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2608 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2633 * @param pVCpu Pointer to the VMCPU.
2635 static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
2638 AssertPtr(pVCpu);
2640 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2642 uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
2648 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2660 * @param pVCpu Pointer to the VMCPU.
2662 static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2665 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
2775 PVMCPU pVCpu = &pVM->aCpus[i];
2776 AssertPtr(pVCpu);
2777 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2780 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2783 Assert(!pVCpu->hm.s.idxExitHistoryFree);
2784 HMCPU_EXIT_HISTORY_RESET(pVCpu);
2787 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2790 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2792 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2795 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2797 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2799 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2801 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2803 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2805 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2807 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2809 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2811 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2813 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2815 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2817 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2820 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2822 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2826 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2828 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2830 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2832 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2845 * @param pVCpu Pointer to the VMCPU.
2847 DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2849 NOREF(pVM); NOREF(pVCpu);
2893 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2894 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2907 * @param pVCpu Pointer to the VMCPU.
2909 DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2918 AssertMsgReturn(!(pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED),
2938 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2992 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
3040 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3042 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3059 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3061 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3102 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3105 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
3106 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3109 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3131 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3132 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3133 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3134 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3149 * @param pVCpu Pointer to the VMCPU.
3153 DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
3157 AssertPtr(pVCpu);
3158 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3163 hmR0VmxLazySaveHostMsrs(pVCpu);
3223 * @param pVCpu Pointer to the VMCPU.
3231 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3239 if (CPUMIsGuestInLongMode(pVCpu))
3243 PVM pVM = pVCpu->CTX_SUFF(pVM);
3251 if ( CPUMIsGuestInLongMode(pVCpu)
3283 * @param pVCpu Pointer to the VMCPU.
3291 DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3294 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
3296 PVM pVM = pVCpu->CTX_SUFF(pVM);
3307 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));
3314 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3317 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));
3333 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3340 pVCpu->hm.s.vmx.u32EntryCtls = val;
3341 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
3352 * @param pVCpu Pointer to the VMCPU.
3359 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3364 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
3366 PVM pVM = pVCpu->CTX_SUFF(pVM);
3381 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3390 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3398 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3402 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));
3420 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3427 pVCpu->hm.s.vmx.u32ExitCtls = val;
3428 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
3439 * @param pVCpu Pointer to the VMCPU.
3444 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3449 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
3452 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3454 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3459 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3468 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
3486 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
3496 * @param pVCpu Pointer to the VMCPU.
3503 DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3509 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3512 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
3513 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
3514 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3531 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3532 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3546 * @param pVCpu Pointer to the VMCPU.
3549 static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
3551 NOREF(pVCpu);
3564 * @param pVCpu Pointer to the VMCPU.
3569 static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3573 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
3576 if (pVCpu->hm.s.fGIMTrapXcptUD)
3577 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
3581 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD);
3585 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3588 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3589 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
3590 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
3600 * @param pVCpu Pointer to the VMCPU.
3607 static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3610 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
3615 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
3616 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,
3617 HMCPU_CF_VALUE(pVCpu)));
3627 * @param pVCpu Pointer to the VMCPU.
3634 static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3637 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
3642 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
3643 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp));
3653 * @param pVCpu Pointer to the VMCPU.
3660 static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3663 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
3683 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3685 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3686 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3687 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
3695 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
3696 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32));
3706 * @param pVCpu Pointer to the VMCPU.
3713 DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3715 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
3717 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
3719 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
3731 * @param pVCpu Pointer to the VMCPU.
3738 static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3745 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
3749 PVM pVM = pVCpu->CTX_SUFF(pVM);
3754 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0));
3763 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3769 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3775 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3777 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3790 if (CPUMIsGuestFPUStateActive(pVCpu))
3809 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3813 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3820 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3822 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3825 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3827 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3830 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3832 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3836 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3850 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3853 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3870 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
3905 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3908 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask));
3910 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
3922 * @param pVCpu Pointer to the VMCPU.
3929 static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3932 PVM pVM = pVCpu->CTX_SUFF(pVM);
3942 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
3947 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3950 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3951 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3952 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3955 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3959 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3960 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3961 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3963 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3965 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));
3973 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3974 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3975 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3976 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3977 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
4002 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));
4008 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
4010 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));
4015 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
4022 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
4030 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4));
4038 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4063 switch (pVCpu->hm.s.enmShadowMode)
4098 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));
4110 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
4115 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
4117 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
4130 * @param pVCpu Pointer to the VMCPU.
4137 static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4139 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
4144 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4153 PVM pVM = pVCpu->CTX_SUFF(pVM);
4156 if ( pVCpu->hm.s.fSingleInstruction
4157 || DBGFIsStepping(pVCpu))
4162 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4163 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4170 pVCpu->hm.s.fClearTrapFlag = true;
4171 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
4177 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4188 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4190 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4191 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4192 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4196 if (!CPUMIsHyperDebugStateActive(pVCpu))
4198 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4199 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4200 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4204 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
4207 pVCpu->hm.s.fUsingHyperDR7 = true;
4221 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4223 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4224 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4225 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4226 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4230 if (!CPUMIsGuestDebugStateActive(pVCpu))
4232 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4233 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4234 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4235 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4245 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4246 && !CPUMIsGuestDebugStateActive(pVCpu))
4248 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4259 pVCpu->hm.s.fUsingHyperDR7 = false;
4266 || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4268 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
4269 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
4274 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
4275 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
4283 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4285 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4286 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4289 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
4300 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4421 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4474 * @param pVCpu Pointer to the VMCPU.
4483 static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
4494 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4498 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4499 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4537 static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4540 PVM pVM = pVCpu->CTX_SUFF(pVM);
4545 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
4548 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4550 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4551 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4552 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4553 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4554 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4555 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4563 if ( pVCpu->hm.s.vmx.fWasInRealMode
4564 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4569 Log4(("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));
4570 pVCpu->hm.s.vmx.fWasInRealMode = false;
4574 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
4577 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
4580 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
4583 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
4586 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
4589 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
4595 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
4598 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
4599 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,
4606 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
4618 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4665 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
4666 Log4(("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu, u64Base));
4672 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
4680 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
4681 Log4(("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));
4687 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
4716 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
4717 Log4(("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));
4723 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
4731 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
4732 Log4(("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));
4751 * @param pVCpu Pointer to the VMCPU.
4758 static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4760 AssertPtr(pVCpu);
4761 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4766 PVM pVM = pVCpu->CTX_SUFF(pVM);
4767 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
4773 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */);
4774 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */);
4775 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */);
4776 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
4778 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4779 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4781 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr,
4787 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4795 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
4798 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4801 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
4804 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4807 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
4810 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4813 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
4815 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4825 Log4(("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));
4829 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
4831 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4832 Log4(("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,
4833 pMixedCtx->msrEFER, pVCpu->hm.s.vmx.cMsrs));
4837 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4838 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4849 * @param pVCpu Pointer to the VMCPU.
4856 static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
4861 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
4866 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
4876 * @param pVCpu Pointer to the VMCPU.
4883 static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4890 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4893 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4895 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4898 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4900 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4902 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4906 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4913 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4915 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4918 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4920 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4922 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4925 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4928 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4938 * @param pVCpu Pointer to the VMCPU.
4943 DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4950 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4953 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4955 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4964 * @param pVCpu Pointer to the VMCPU.
4970 static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4973 Assert(pVCpu);
4987 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4988 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4989 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4992 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4997 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
5000 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
5001 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
5002 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
5005 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
5006 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
5220 * @param pVCpu Pointer to the VMCPU.
5226 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,
5236 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5237 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5240 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5241 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5243 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5244 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5252 CPUMR0SetLApic(pVCpu, idHostCpu);
5259 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5266 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5267 CPUMSetHyperEIP(pVCpu, enmOp);
5269 CPUMPushHyper(pVCpu, paParam[i]);
5271 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5274 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5275 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5290 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5307 * @param pVCpu Pointer to the VMCPU.
5309 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5337 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5338 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
5339 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5343 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
5350 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
5360 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5361 pVCpu->hm.s.vmx.HCPhysVmcs));
5366 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5367 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5386 * @param pVCpu Pointer to the VMCPU.
5388 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
5399 AssertPtr(pVCpu);
5400 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5478 * @param pVCpu Pointer to the VMCPU.
5482 VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5553 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5554 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5561 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5575 * @param pVCpu Pointer to the VMCPU.
5579 VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5581 AssertPtr(pVCpu);
5582 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5608 * @param pVCpu Pointer to the VMCPU.
5613 VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
5628 * @param pVCpu Pointer to the VMCPU.
5633 VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
5654 * @param pVCpu Pointer to the VMCPU.
5658 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu)
5665 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset,
5669 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
5678 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
5686 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5692 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
5694 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5695 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5696 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5701 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5702 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5703 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5738 * @param pVCpu Pointer to the VMCPU.
5751 DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5754 Assert(!pVCpu->hm.s.Event.fPending);
5755 pVCpu->hm.s.Event.fPending = true;
5756 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5757 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5758 pVCpu->hm.s.Event.cbInstr = cbInstr;
5759 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5761 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5768 * @param pVCpu Pointer to the VMCPU.
5773 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5779 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5793 * @param pVCpu Pointer to the VMCPU.
5801 static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5834 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5841 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5843 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
5865 Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5890 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
5891 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5893 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5913 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5916 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5917 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
5924 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5926 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5927 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
5935 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5948 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5955 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5958 pVCpu->idCpu, VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
5959 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5972 * @param pVCpu Pointer to the VMCPU.
5979 static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5987 VMMRZCallRing3Disable(pVCpu);
5990 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
6000 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
6001 CPUMSetGuestCR0(pVCpu, uVal);
6002 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
6006 VMMRZCallRing3Enable(pVCpu);
6015 * @param pVCpu Pointer to the VMCPU.
6022 static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6027 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
6036 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
6037 CPUMSetGuestCR4(pVCpu, uVal);
6038 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
6048 * @param pVCpu Pointer to the VMCPU.
6055 static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6058 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
6065 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
6075 * @param pVCpu Pointer to the VMCPU.
6082 static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6085 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
6092 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
6102 * @param pVCpu Pointer to the VMCPU.
6109 static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6111 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
6118 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
6120 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
6124 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6127 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
6137 DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6139 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6140 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6141 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6150 * @param pVCpu Pointer to the VMCPU.
6157 static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6159 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE))
6167 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6168 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6170 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6171 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6178 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6180 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
6183 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
6184 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6186 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6187 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6191 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6192 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6194 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6195 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6198 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE);
6207 * @param pVCpu Pointer to the VMCPU.
6214 static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6218 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
6228 * @param pVCpu Pointer to the VMCPU.
6235 static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6238 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
6243 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
6247 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
6251 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
6253 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
6257 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
6268 * @param pVCpu Pointer to the VMCPU.
6275 static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6278 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
6281 VMMRZCallRing3Disable(pVCpu);
6285 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
6287 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
6288 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6292 VMMRZCallRing3Enable(pVCpu);
6295 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6298 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6310 * @param pVCpu Pointer to the VMCPU.
6317 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6319 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
6322 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6323 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
6329 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break;
6340 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6346 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
6356 * @param pVCpu Pointer to the VMCPU.
6363 static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6366 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6370 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
6375 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
6377 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
6378 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
6380 PVM pVM = pVCpu->CTX_SUFF(pVM);
6389 CPUMSetGuestCR3(pVCpu, u64Val);
6390 if (VMMRZCallRing3IsEnabled(pVCpu))
6392 PGMUpdateCR3(pVCpu, u64Val);
6393 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6398 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6405 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
6406 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
6407 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
6408 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
6410 if (VMMRZCallRing3IsEnabled(pVCpu))
6412 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6413 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6418 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6423 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
6438 if (VMMRZCallRing3IsEnabled(pVCpu))
6440 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6441 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6443 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6444 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6446 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6447 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6459 * @param pVCpu Pointer to the VMCPU.
6471 DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6474 NOREF(pVCpu);
6535 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6539 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6549 * @param pVCpu Pointer to the VMCPU.
6556 static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6559 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
6561 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
6570 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6572 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6573 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6574 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6575 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6576 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6577 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6579 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
6591 * @param pVCpu Pointer to the VMCPU.
6598 static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6603 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
6607 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
6613 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
6619 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
6623 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
6629 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
6633 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
6635 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6639 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6644 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
6657 * @param pVCpu Pointer to the VMCPU.
6664 static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6666 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
6668 if (!pVCpu->hm.s.fUsingHyperDR7)
6676 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
6686 * @param pVCpu Pointer to the VMCPU.
6693 static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6698 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
6710 * @param pVCpu Pointer to the VMCPU.
6715 static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6717 Assert(pVCpu);
6720 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
6725 if (VMMRZCallRing3IsEnabled(pVCpu))
6726 VMMR0LogFlushDisable(pVCpu);
6728 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6729 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
6731 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6732 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6734 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6735 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6737 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6738 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6740 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6741 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6743 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
6744 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6746 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
6747 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6749 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6750 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6752 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6753 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6755 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
6756 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6758 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
6759 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6761 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
6762 ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
6764 if (VMMRZCallRing3IsEnabled(pVCpu))
6765 VMMR0LogFlushEnable(pVCpu);
6775 * @param pVCpu Pointer to the cross context CPU data for the calling
6782 static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp)
6801 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6802 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6804 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6805 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6807 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6809 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6821 * @param pVCpu Pointer to the VMCPU of the calling EMT.
6824 * @thread EMT(pVCpu)
6826 VMMR0_INT_DECL(int) HMR0EnsureCompleteBasicContext(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6830 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
6831 return hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6853 * @param pVCpu Pointer to the VMCPU.
6858 static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6860 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6862 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
6864 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
6868 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6872 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6874 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
6877 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6881 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6883 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6884 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6888 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6890 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6891 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6902 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6904 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6912 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6941 * @param pVCpu Pointer to the VMCPU.
6943 static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6945 Assert(TRPMHasTrap(pVCpu));
6946 Assert(!pVCpu->hm.s.Event.fPending);
6954 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6993 rc = TRPMResetTrap(pVCpu);
6998 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6999 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
7009 static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
7011 Assert(pVCpu->hm.s.Event.fPending);
7013 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7014 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
7015 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
7016 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
7019 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
7047 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
7051 TRPMSetErrorCode(pVCpu, uErrorCode);
7056 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
7065 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
7067 pVCpu->hm.s.Event.fPending = false;
7077 * @param pVCpu Pointer to the VMCPU.
7085 static int hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
7088 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7100 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
7102 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7104 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7108 if (CPUMIsGuestFPUStateActive(pVCpu))
7113 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7116 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
7117 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
7118 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
7123 if (CPUMIsHyperDebugStateActive(pVCpu))
7124 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
7126 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
7127 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
7128 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
7129 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
7133 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7134 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7136 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
7137 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7139 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7145 && pVCpu->hm.s.vmx.fLazyMsrs)
7150 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
7153 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
7154 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7155 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
7160 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7162 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
7163 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
7164 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
7165 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
7166 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
7167 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
7168 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
7169 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7171 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7178 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7180 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7183 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7186 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7198 * @param pVCpu Pointer to the VMCPU.
7205 DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7209 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7214 if (!pVCpu->hm.s.fLeaveDone)
7216 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */);
7218 pVCpu->hm.s.fLeaveDone = true;
7220 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7230 VMMR0ThreadCtxHooksDeregister(pVCpu);
7233 int rc = HMR0LeaveCpu(pVCpu);
7245 * @param pVCpu Pointer to the VMCPU.
7252 DECLINLINE(int) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7254 return hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7268 * @param pVCpu Pointer to the VMCPU.
7275 static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
7278 Assert(pVCpu);
7284 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7285 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7286 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7291 VMMRZCallRing3Disable(pVCpu);
7292 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
7295 if (pVCpu->hm.s.Event.fPending)
7297 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7298 Assert(!pVCpu->hm.s.Event.fPending);
7303 Assert(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu));
7304 Assert(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu));
7307 int rc = hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7309 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7313 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7314 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7320 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
7324 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7327 Assert(!pVCpu->hm.s.fClearTrapFlag);
7331 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7333 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7336 VMMRZCallRing3RemoveNotification(pVCpu);
7337 VMMRZCallRing3Enable(pVCpu);
7348 * @param pVCpu Pointer to the VMCPU.
7354 DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7364 VMMRZCallRing3RemoveNotification(pVCpu);
7365 VMMRZCallRing3Disable(pVCpu);
7368 PVM pVM = pVCpu->CTX_SUFF(pVM);
7369 if (CPUMIsGuestFPUStateActive(pVCpu))
7370 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
7372 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7376 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7377 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7378 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7379 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7383 && pVCpu->hm.s.vmx.fLazyMsrs)
7384 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7387 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7388 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7389 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7391 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7392 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7395 VMMR0ThreadCtxHooksDeregister(pVCpu);
7396 HMR0LeaveCpu(pVCpu);
7401 Assert(pVCpu);
7403 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7406 VMMRZCallRing3Disable(pVCpu);
7407 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7409 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu,
7412 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
7415 VMMRZCallRing3Enable(pVCpu);
7424 * @param pVCpu Pointer to the VMCPU.
7426 DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7428 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7430 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7432 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7433 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7444 * @param pVCpu Pointer to the VMCPU.
7446 DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7448 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7449 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7450 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7460 * @param pVCpu Pointer to the VMCPU.
7462 DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7464 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7466 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7468 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7469 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7480 * @param pVCpu Pointer to the VMCPU.
7482 DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7484 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7485 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7486 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7496 * @param pVCpu Pointer to the VMCPU.
7501 static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7503 Assert(!pVCpu->hm.s.Event.fPending);
7506 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7511 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7514 Assert(!TRPMHasTrap(pVCpu));
7521 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7528 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
7532 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7533 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7536 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7542 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7543 && !pVCpu->hm.s.fSingleInstruction)
7545 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7553 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7556 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
7560 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7565 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
7566 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7570 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7579 * @param pVCpu Pointer to the VMCPU.
7584 DECLINLINE(void) hmR0VmxSetPendingDebugXcpt(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7586 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7600 * @param pVCpu Pointer to the VMCPU.
7608 static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
7611 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7614 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7618 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7621 Assert(!TRPMHasTrap(pVCpu));
7624 if (pVCpu->hm.s.Event.fPending)
7631 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7632 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7635 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7636 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7655 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7657 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7658 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, &uIntrState);
7668 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7670 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7678 if ( !pVCpu->hm.s.fSingleInstruction
7679 && !DBGFIsStepping(pVCpu))
7686 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7688 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
7696 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7705 int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
7717 * @param pVCpu Pointer to the VMCPU.
7722 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7726 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7734 * @param pVCpu Pointer to the VMCPU.
7746 DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
7751 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
7759 * @param pVCpu Pointer to the VMCPU.
7764 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7769 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7776 * @param pVCpu Pointer to the VMCPU.
7783 DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7788 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7796 * @param pVCpu Pointer to the VMCPU.
7812 DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7819 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
7828 * @param pVCpu Pointer to the VMCPU.
7834 DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7840 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7847 * @param pVCpu Pointer to the VMCPU.
7855 DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7864 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7902 * @param pVCpu Pointer to the VMCPU.
7924 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
7961 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7964 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7975 PVM pVM = pVCpu->CTX_SUFF(pVM);
7982 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7983 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
7984 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7986 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
7998 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, puIntrState);
8002 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */,
8043 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
8061 pVCpu->hm.s.Event.fPending = false;
8093 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
8106 * @param pVCpu Pointer to the VMCPU.
8112 static void hmR0VmxClearEventVmcs(PVMCPU pVCpu)
8115 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
8117 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
8119 hmR0VmxClearIntWindowExitVmcs(pVCpu);
8120 Assert(!pVCpu->hm.s.Event.fPending);
8123 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
8125 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
8126 Assert(!pVCpu->hm.s.Event.fPending);
8129 if (!pVCpu->hm.s.Event.fPending)
8155 * @param pVCpu Pointer to the VMCPU.
8158 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
8161 AssertPtr(pVCpu);
8166 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8167 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8182 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
8183 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8187 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8188 pVCpu->hm.s.fLeaveDone = false;
8199 * @param pVCpu Pointer to the VMCPU.
8201 * @thread EMT(pVCpu)
8203 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
8212 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8213 VMCPU_ASSERT_EMT(pVCpu);
8215 PVM pVM = pVCpu->CTX_SUFF(pVM);
8216 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
8219 VMMRZCallRing3Disable(pVCpu);
8225 if (!pVCpu->hm.s.fLeaveDone)
8229 hmR0VmxLeave(pVM, pVCpu, pMixedCtx, false /* fSaveGuestState */);
8230 pVCpu->hm.s.fLeaveDone = true;
8234 int rc = HMR0LeaveCpu(pVCpu);
8238 VMMRZCallRing3Enable(pVCpu);
8239 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptPreempting);
8246 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8247 VMCPU_ASSERT_EMT(pVCpu);
8250 VMMRZCallRing3Disable(pVCpu);
8255 int rc = HMR0EnterCpu(pVCpu);
8257 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8260 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8262 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8264 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8267 pVCpu->hm.s.fLeaveDone = false;
8270 VMMRZCallRing3Enable(pVCpu);
8288 * @param pVCpu Pointer to the VMCPU.
8292 static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
8296 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8299 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
8300 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8302 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
8303 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8305 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
8306 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8308 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
8318 * @param pVCpu Pointer to the VMCPU.
8322 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
8325 AssertPtr(pVCpu);
8327 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8332 return hmR0VmxSaveHostState(pVM, pVCpu);
8349 * @param pVCpu Pointer to the VMCPU.
8356 static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8359 AssertPtr(pVCpu);
8363 VMMRZCallRing3Disable(pVCpu);
8364 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8366 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8368 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
8371 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8375 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8383 int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
8384 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8387 rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
8388 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8391 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
8392 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8394 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
8395 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8397 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
8398 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8401 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
8402 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8406 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
8407 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8409 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
8410 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8412 rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx);
8413 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8419 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
8420 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8423 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
8425 VMMRZCallRing3Enable(pVCpu);
8427 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
8436 * @param pVCpu Pointer to the VMCPU.
8441 static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8446 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8448 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
8450 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
8454 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
8456 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
8460 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
8462 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
8467 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
8471 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8473 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
8477 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
8479 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
8481 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
8484 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
8485 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8493 * @param pVCpu Pointer to the VMCPU.
8498 DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8502 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8504 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
8507 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
8509 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
8511 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
8513 else if (HMCPU_CF_VALUE(pVCpu))
8515 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
8517 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
8521 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
8522 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
8523 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8550 * @param pVCpu Pointer to the VMCPU.
8560 static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
8562 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8565 PGMRZDynMapFlushAutoSet(pVCpu);
8569 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
8575 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
8576 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
8589 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8592 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
8596 if (TRPMHasTrap(pVCpu))
8597 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8598 else if (!pVCpu->hm.s.Event.fPending)
8599 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8605 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping);
8619 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
8626 VMMRZCallRing3Disable(pVCpu);
8640 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8642 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8644 hmR0VmxClearEventVmcs(pVCpu);
8646 VMMRZCallRing3Enable(pVCpu);
8647 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8653 hmR0VmxClearEventVmcs(pVCpu);
8655 VMMRZCallRing3Enable(pVCpu);
8656 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8661 pVCpu->hm.s.Event.fPending = false;
8673 * @param pVCpu Pointer to the VMCPU.
8682 static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8684 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8685 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8688 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8689 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
8692 if (!CPUMIsGuestFPUStateActive(pVCpu))
8693 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8694 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8697 if ( pVCpu->hm.s.fPreloadGuestFpu
8698 && !CPUMIsGuestFPUStateActive(pVCpu))
8700 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8701 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
8702 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8708 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8709 && pVCpu->hm.s.vmx.cMsrs > 0)
8711 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8720 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8723 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
8725 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState);
8727 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
8732 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
8733 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
8734 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8740 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8741 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8746 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8747 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8749 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
8754 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8755 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
8760 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8762 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu);
8766 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
8767 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8768 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8769 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8771 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8773 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8779 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8781 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8783 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8785 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
8786 bool fMsrUpdated = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu),
8788 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8790 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8794 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8795 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8800 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8801 hmR0VmxCheckHostEferMsr(pVCpu);
8802 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8805 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
8817 * @param pVCpu Pointer to the VMCPU.
8829 static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8833 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8835 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
8836 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
8837 HMVMXCPU_GST_RESET_TO(pVCpu, 0); /* Exits/longjmps to ring-3 requires saving the guest state. */
8842 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8843 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);
8845 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
8846 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8848 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8851 if (CPUMIsGuestFPUStateActive(pVCpu))
8853 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8854 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
8855 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8860 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8862 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8864 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8867 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
8878 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmxTransient->uExitReason);
8883 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
8894 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
8897 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8900 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8910 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8911 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
8913 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
8915 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
8926 * @param pVCpu Pointer to the VMCPU.
8931 static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8945 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8946 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /* fStepping */);
8950 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8951 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8956 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8961 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8962 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8968 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8969 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8970 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8973 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
8976 hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
8977 hmR0VmxSaveGuestState(pVCpu, pCtx);
8978 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
8983 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8985 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8987 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8992 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
8998 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
9008 * @param pVCpu Pointer to the VMCPU.
9013 static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9029 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
9030 rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, true /* fStepping */);
9034 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
9035 rcStrict = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
9040 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
9045 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
9046 hmR0VmxReportWorldSwitchError(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict), pCtx, &VmxTransient);
9052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
9053 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
9054 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
9057 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
9060 hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
9061 hmR0VmxSaveGuestState(pVCpu, pCtx);
9062 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
9066 rcStrict = hmR0VmxHandleExitStep(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, uCsStart, uRipStart);
9067 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
9072 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
9081 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
9082 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
9090 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
9096 if (pVCpu->hm.s.fClearTrapFlag)
9098 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
9100 pVCpu->hm.s.fClearTrapFlag = false;
9107 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
9117 * @param pVCpu Pointer to the VMCPU.
9120 VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9122 Assert(VMMRZCallRing3IsEnabled(pVCpu));
9123 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
9126 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
9129 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
9130 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
9132 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
9139 int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
9142 pVCpu->hm.s.u32HMError = rc;
9145 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
9151 DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
9154 # define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
9155 # define LDVMCS() do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
9160 case VMX_EXIT_EPT_MISCONFIG: /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9161 case VMX_EXIT_EPT_VIOLATION: /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9162 case VMX_EXIT_IO_INSTR: /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9163 case VMX_EXIT_CPUID: /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9164 case VMX_EXIT_RDTSC: /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9165 case VMX_EXIT_RDTSCP: /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9166 case VMX_EXIT_APIC_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9167 case VMX_EXIT_XCPT_OR_NMI: /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9168 case VMX_EXIT_MOV_CRX: /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9169 case VMX_EXIT_EXT_INT: /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9170 case VMX_EXIT_INT_WINDOW: /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9171 case VMX_EXIT_MWAIT: /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9172 case VMX_EXIT_MONITOR: /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9173 case VMX_EXIT_TASK_SWITCH: /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9174 case VMX_EXIT_PREEMPT_TIMER: /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9175 case VMX_EXIT_RDMSR: /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9176 case VMX_EXIT_WRMSR: /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9177 case VMX_EXIT_MOV_DRX: /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9178 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9179 case VMX_EXIT_HLT: /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9180 case VMX_EXIT_INVD: /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9181 case VMX_EXIT_INVLPG: /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9182 case VMX_EXIT_RSM: /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9183 case VMX_EXIT_MTF: /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9184 case VMX_EXIT_PAUSE: /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9185 case VMX_EXIT_XDTR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9186 case VMX_EXIT_TR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9187 case VMX_EXIT_WBINVD: /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9188 case VMX_EXIT_XSETBV: /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9189 case VMX_EXIT_RDRAND: /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9190 case VMX_EXIT_INVPCID: /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9191 case VMX_EXIT_GETSEC: /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9192 case VMX_EXIT_RDPMC: /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9193 case VMX_EXIT_VMCALL: /* SVVMCS(); */ rc = hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9195 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
9196 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
9197 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
9198 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
9199 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9200 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9201 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
9202 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
9203 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
9219 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
9225 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
9241 * @param pVCpu The virtual CPU of the calling EMT.
9248 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9260 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
9308 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9309 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9322 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
9324 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9343 AssertPtr(pVCpu); \
9350 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
9352 if (VMMR0IsLogFlushDisabled(pVCpu)) \
9365 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
9375 * @param pVCpu Pointer to the VMCPU.
9383 DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9386 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9387 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9391 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
9399 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
9414 * @param pVCpu Pointer to the VMCPU.
9420 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9475 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9485 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
9487 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
9564 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9583 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
9594 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
9618 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
9625 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
9768 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9903 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9935 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9938 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
10021 pVCpu->hm.s.u32HMError = uError;
10039 HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10042 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
10044 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
10053 HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10056 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
10062 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
10076 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
10077 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10082 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
10087 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10105 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
10106 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
10107 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
10108 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
10109 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
10110 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
10112 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
10113 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10114 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
10115 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10116 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
10117 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10118 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
10119 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10120 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
10121 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10122 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
10123 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10127 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10130 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
10131 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10133 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
10134 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
10140 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
10148 pVCpu->hm.s.u32HMError = uVector;
10159 pVCpu->hm.s.u32HMError = uExitIntInfo;
10165 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10173 HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10178 hmR0VmxClearIntWindowExitVmcs(pVCpu);
10181 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
10189 HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10192 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
10198 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
10210 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
10212 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
10216 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
10226 HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10229 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
10230 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10237 HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10240 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
10241 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10248 HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10251 PVM pVM = pVCpu->CTX_SUFF(pVM);
10252 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10255 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10263 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
10271 HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10274 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10288 HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10291 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10294 PVM pVM = pVCpu->CTX_SUFF(pVM);
10295 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10298 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10301 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10306 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10314 HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10317 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10318 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
10321 PVM pVM = pVCpu->CTX_SUFF(pVM);
10322 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
10325 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10328 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10336 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10344 HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10347 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10348 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
10351 PVM pVM = pVCpu->CTX_SUFF(pVM);
10352 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10355 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10363 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
10371 HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10374 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
10376 if (pVCpu->hm.s.fHypercallsEnabled)
10379 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10383 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10384 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* For long-mode checks in gimKvmHypercall(). */
10388 rc = GIMHypercall(pVCpu, pMixedCtx);
10393 hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10398 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10406 HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10409 PVM pVM = pVCpu->CTX_SUFF(pVM);
10412 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10413 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10416 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
10419 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10425 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
10433 HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10436 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10437 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10438 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10441 PVM pVM = pVCpu->CTX_SUFF(pVM);
10442 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10444 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10450 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
10458 HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10461 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10462 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10463 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10466 PVM pVM = pVCpu->CTX_SUFF(pVM);
10467 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10472 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10476 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
10488 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
10496 HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10505 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10513 HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10522 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10530 HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10534 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10542 HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10550 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10559 HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10577 HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10587 HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10590 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
10591 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10592 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10596 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10597 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
10602 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10604 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
10613 HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10616 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10624 HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10632 PVM pVM = pVCpu->CTX_SUFF(pVM);
10633 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
10634 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
10642 HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10647 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
10648 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10651 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
10652 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
10654 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
10663 HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10677 HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10679 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10682 rc = hmR0VmxCheckVmcsCtls(pVCpu);
10685 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10722 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10731 HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10734 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10743 HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10746 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10755 HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10757 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
10758 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
10768 HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10773 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
10774 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
10776 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10784 HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10789 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
10790 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
10792 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10800 HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10805 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10806 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10807 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10808 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10810 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10811 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10817 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
10819 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
10826 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
10827 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10836 PVM pVM = pVCpu->CTX_SUFF(pVM);
10837 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10840 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
10843 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10853 HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10856 PVM pVM = pVCpu->CTX_SUFF(pVM);
10860 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10861 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10862 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10863 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10865 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10866 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10871 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10873 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
10877 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10886 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10897 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
10901 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10905 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
10906 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
10907 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
10909 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
10913 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10914 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
10916 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10917 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
10942 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10954 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10973 HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10978 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
10979 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
10981 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10990 HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10993 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
11000 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
11001 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
11016 HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11019 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
11020 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11026 PVM pVM = pVCpu->CTX_SUFF(pVM);
11028 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/);
11033 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11036 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
11044 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11052 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
11056 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
11058 VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
11061 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
11063 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
11070 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
11076 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11085 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
11087 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
11091 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
11100 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
11102 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11103 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
11111 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
11114 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
11124 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
11125 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
11134 HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11137 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
11139 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11141 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
11142 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
11143 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
11144 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
11165 PVM pVM = pVCpu->CTX_SUFF(pVM);
11181 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11189 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
11200 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
11206 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11208 rcStrict = IEMExecOne(pVCpu);
11211 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11214 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11215 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
11220 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11222 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
11226 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11228 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
11248 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
11250 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11251 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
11256 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
11263 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11264 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
11273 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11283 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
11286 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11293 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11304 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
11307 VMMRZCallRing3Disable(pVCpu);
11310 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
11312 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
11319 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11321 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
11329 VMMRZCallRing3Enable(pVCpu);
11352 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
11361 HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11366 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11380 Assert(!pVCpu->hm.s.Event.fPending);
11381 pVCpu->hm.s.Event.fPending = true;
11382 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo;
11386 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
11388 pVCpu->hm.s.Event.u32ErrCode = 0;
11392 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
11396 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11403 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11411 HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11414 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
11415 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
11416 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11418 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
11426 HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11431 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11442 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11445 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11446 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11447 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11449 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11459 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
11466 PVM pVM = pVCpu->CTX_SUFF(pVM);
11470 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
11479 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11494 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
11496 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
11505 HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11512 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11517 if ( !DBGFIsStepping(pVCpu)
11518 && !pVCpu->hm.s.fSingleInstruction
11522 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
11523 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11526 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11529 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
11530 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
11535 VMMRZCallRing3Disable(pVCpu);
11539 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
11540 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
11543 VMMRZCallRing3Enable(pVCpu);
11546 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11549 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11551 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11553 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
11561 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11562 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11563 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11567 PVM pVM = pVCpu->CTX_SUFF(pVM);
11570 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11574 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11575 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11579 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11582 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11588 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11599 HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11602 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11605 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11617 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11620 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11621 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11622 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11633 PVM pVM = pVCpu->CTX_SUFF(pVM);
11634 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
11642 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11656 HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11659 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11662 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11672 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11674 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11677 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11678 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11679 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11694 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
11700 PVM pVM = pVCpu->CTX_SUFF(pVM);
11701 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
11702 TRPMResetTrap(pVCpu);
11710 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
11711 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11734 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11737 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
11739 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11745 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
11750 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11755 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11764 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11767 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
11771 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11774 PVM pVM = pVCpu->CTX_SUFF(pVM);
11775 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11783 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11795 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11798 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
11805 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11813 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
11821 VMMRZCallRing3Disable(pVCpu);
11826 if (CPUMIsGuestDebugStateActive(pVCpu))
11830 VMMRZCallRing3Enable(pVCpu);
11832 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11858 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11869 CPUMSetHyperDR6(pVCpu, uDR6);
11879 static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11884 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11888 VMMRZCallRing3Disable(pVCpu);
11895 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
11902 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11903 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
11907 VMMRZCallRing3Enable(pVCpu);
11912 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11913 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
11914 pVCpu->hm.s.fPreloadGuestFpu = true;
11922 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11924 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11936 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11939 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
11942 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11949 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11952 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
11953 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11965 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
11968 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11971 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11973 PVM pVM = pVCpu->CTX_SUFF(pVM);
11974 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
11987 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11988 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11989 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
12001 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
12002 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
12004 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12005 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12006 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
12015 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12016 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
12040 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
12059 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12064 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12066 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
12087 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
12110 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12113 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12114 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
12131 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
12148 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12154 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12156 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
12163 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
12165 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
12173 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
12175 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
12180 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
12188 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
12191 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
12216 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12228 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12233 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12243 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12246 PVM pVM = pVCpu->CTX_SUFF(pVM);
12247 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12255 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
12259 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12265 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12268 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12280 Assert(pVCpu->hm.s.Event.fPending);
12284 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12290 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
12291 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
12300 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12304 TRPMResetTrap(pVCpu);
12305 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
12314 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
12315 TRPMResetTrap(pVCpu);
12316 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
12318 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12324 TRPMResetTrap(pVCpu);
12325 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
12326 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12330 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12334 TRPMResetTrap(pVCpu);
12335 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);