Lines Matching defs:pVM
308 static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
309 static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
310 static int hmR3InitCPU(PVM pVM);
311 static int hmR3InitFinalizeR0(PVM pVM);
312 static int hmR3InitFinalizeR0Intel(PVM pVM);
313 static int hmR3InitFinalizeR0Amd(PVM pVM);
314 static int hmR3TermCPU(PVM pVM);
330 * @param pVM Pointer to the VM.
335 VMMR3_INT_DECL(int) HMR3Init(PVM pVM)
343 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
348 int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HM_SAVED_STATE_VERSION, sizeof(HM),
358 PCFGMNODE pCfgHM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM/");
367 AssertLogRelMsgReturn(!fHMForced || pVM->fHMEnabled, ("Configuration error: HM forced but not enabled!\n"),
370 if (pVM->fHMEnabled)
373 AssertLogRelMsgReturn(pVM->cCpus == 1 || pVM->fHMEnabled, ("Configuration error: SMP requires HM to be enabled!\n"),
375 if (pVM->cCpus > 1)
378 AssertRelease(pVM->fHMEnabled);
384 rc = CFGMR3QueryBoolDef(pCfgHM, "EnableNestedPaging", &pVM->hm.s.fAllowNestedPaging, false);
389 rc = CFGMR3QueryBoolDef(pCfgHM, "EnableUX", &pVM->hm.s.vmx.fAllowUnrestricted, true);
395 rc = CFGMR3QueryBoolDef(pCfgHM, "EnableLargePages", &pVM->hm.s.fLargePages, false);
400 rc = CFGMR3QueryBoolDef(pCfgHM, "EnableVPID", &pVM->hm.s.vmx.fAllowVpid, false);
405 rc = CFGMR3QueryBoolDef(pCfgHM, "TPRPatchingEnabled", &pVM->hm.s.fTprPatchingAllowed, false);
413 rc = CFGMR3QueryBoolDef(pCfgHM, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, HC_ARCH_BITS == 64);
416 pVM->hm.s.fAllow64BitGuests = false;
427 rc = CFGMR3QueryBoolDef(pCfgHM, "Exclusive", &pVM->hm.s.fGlobalInit,
440 rc = CFGMR3QueryU32Def(pCfgHM, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoops, 0 /* set by R0 later */);
446 rc = CFGMR3QueryBoolDef(pCfgHM, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true);
454 if (pVM->fHMEnabled)
463 pVM->hm.s.svm.fSupported = true;
473 pVM->hm.s.vmx.fSupported = true;
483 return VMSetError(pVM, rc, RT_SRC_POS, "The host kernel does not support VT-x.%s\n", pszMinReq);
487 pVM->fHMEnabled = false;
497 pVM->fHMNeedRawModeCtx = HC_ARCH_BITS == 32
498 && pVM->fHMEnabled
499 && pVM->hm.s.fAllow64BitGuests;
505 Assert(!pVM->hm.s.fNestedPaging);
506 if (pVM->hm.s.fAllowNestedPaging)
509 pVM->hm.s.fNestedPaging = true;
511 pVM->hm.s.fAllowNestedPaging = false;
516 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
517 if (pVM->hm.s.vmx.fAllowUnrestricted)
520 && pVM->hm.s.fNestedPaging)
521 pVM->hm.s.vmx.fUnrestrictedGuest = true;
523 pVM->hm.s.vmx.fAllowUnrestricted = false;
565 return VM_SET_ERROR(pVM, rc, pszMsg);
567 return VMSetError(pVM, rc, RT_SRC_POS, "SUPR3QueryVTCaps failed with %Rrc", rc);
571 pVM->fHMEnabled = false;
576 pVM->fHMEnabledFixed = true;
585 * @param pVM Pointer to the VM.
587 static int hmR3InitCPU(PVM pVM)
591 if (!HMIsEnabled(pVM))
594 for (VMCPUID i = 0; i < pVM->cCpus; i++)
596 PVMCPU pVCpu = &pVM->aCpus[i];
601 STAM_REG(pVM, &pVM->hm.s.StatTprPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
602 STAM_REG(pVM, &pVM->hm.s.StatTprPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
603 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceSuccess, STAMTYPE_COUNTER, "/HM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");
604 STAM_REG(pVM, &pVM->hm.s.StatTprReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");
610 for (VMCPUID i = 0; i < pVM->cCpus; i++)
612 PVMCPU pVCpu = &pVM->aCpus[i];
616 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
620 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
624 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
628 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
632 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
636 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
641 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitIO, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
645 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitMovCRx, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
649 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitXcptNmi, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
654 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatLoadGuestState, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
658 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL,
664 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED,
671 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitDispatch, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_USED,
679 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, desc, b, i); \
797 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
801 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
811 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT * sizeof(*pVCpu->hm.s.paStatExitReason), 0 /* uAlignment */, MM_TAG_HM,
821 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
826 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitReasonNpf, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
830 pVCpu->hm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatExitReason);
832 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
837 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatInjectedIrqs);
839 pVCpu->hm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatInjectedIrqs);
841 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !HMIsEnabled(pVM));
847 STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
859 for (VMCPUID i = 0; i < pVM->cCpus; i++)
861 PVMCPU pVCpu = &pVM->aCpus[i];
877 * @param pVM The VM.
880 VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
885 return hmR3InitCPU(pVM);
887 return hmR3InitFinalizeR0(pVM);
897 * @param pVM Pointer to the VM.
899 static void hmR3DisableRawMode(PVM pVM)
902 for (VMCPUID i = 0; i < pVM->cCpus; i++)
904 PVMCPU pVCpu = &pVM->aCpus[i];
906 PGMR3ChangeMode(pVM, pVCpu, PGMMODE_REAL);
915 * @param pVM Pointer to the VM.
917 static int hmR3InitFinalizeR0(PVM pVM)
921 if (!HMIsEnabled(pVM))
928 if ( !pVM->hm.s.vmx.fSupported
929 && !pVM->hm.s.svm.fSupported
930 && pVM->hm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
934 pVM->hm.s.svm.fSupported = true;
935 pVM->hm.s.svm.fIgnoreInUseError = true;
936 pVM->hm.s.lLastError = VINF_SUCCESS;
942 if ( !pVM->hm.s.vmx.fSupported
943 && !pVM->hm.s.svm.fSupported)
945 LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.lLastError));
946 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.Msrs.u64FeatureCtrl));
947 switch (pVM->hm.s.lLastError)
950 return VM_SET_ERROR(pVM, VERR_VMX_IN_VMX_ROOT_MODE, "VT-x is being used by another hypervisor.");
952 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is not available.");
954 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is disabled in the BIOS.");
956 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "VT-x is disabled in the BIOS for all CPU modes.");
958 return VM_SET_ERROR(pVM, VERR_VMX_NO_VMX, "Failed to enable and lock VT-x features.");
961 return VM_SET_ERROR(pVM, VERR_SVM_IN_USE, "AMD-V is being used by another hypervisor.");
963 return VM_SET_ERROR(pVM, VERR_SVM_NO_SVM, "AMD-V is not available.");
965 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
967 return VMSetError(pVM, pVM->hm.s.lLastError, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.lLastError);
973 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_ENABLE, 0, NULL);
984 pVM->hm.s.fHasIoApic = PDMHasIoApic(pVM);
985 if (!pVM->hm.s.fHasIoApic)
987 Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */
988 pVM->hm.s.fTprPatchingAllowed = false;
998 AssertLogRelReturn(!pVM->hm.s.fInitialized, VERR_HM_IPE_5);
1000 if (pVM->hm.s.vmx.fSupported)
1001 rc = hmR3InitFinalizeR0Intel(pVM);
1003 rc = hmR3InitFinalizeR0Amd(pVM);
1004 LogRel(("HM: VT-x/AMD-V init method: %s\n", (pVM->hm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
1006 pVM->hm.s.fInitialized = true;
1016 * @param pVM The cross context VM structure.
1018 static int hmR3InitFinalizeR0Intel(PVM pVM)
1022 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
1023 AssertLogRelReturn(pVM->hm.s.vmx.Msrs.u64FeatureCtrl != 0, VERR_HM_IPE_4);
1030 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4));
1031 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostEfer));
1032 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", pVM->hm.s.vmx.Msrs.u64FeatureCtrl));
1033 LogRel(("HM: MSR_IA32_VMX_BASIC_INFO = %#RX64\n", pVM->hm.s.vmx.Msrs.u64BasicInfo));
1034 LogRel(("HM: VMCS id = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo)));
1035 LogRel(("HM: VMCS size = %u bytes\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo)));
1036 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.Msrs.u64BasicInfo) ? "< 4 GB" : "None"));
1037 LogRel(("HM: VMCS memory type = %#x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.Msrs.u64BasicInfo)));
1038 LogRel(("HM: Dual-monitor treatment support = %RTbool\n", RT_BOOL(MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.Msrs.u64BasicInfo))));
1039 LogRel(("HM: OUTS & INS instruction-info = %RTbool\n", RT_BOOL(MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))));
1040 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops));
1042 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxPinCtls.u));
1043 val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;
1044 zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;
1050 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls.u));
1051 val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;
1052 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;
1074 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1076 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.u));
1077 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;
1078 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0;
1097 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxEntry.u));
1098 val = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;
1099 zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0;
1108 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %#RX64\n", pVM->hm.s.vmx.Msrs.VmxExit.u));
1109 val = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;
1110 zap = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;
1121 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps)
1123 val = pVM->hm.s.vmx.Msrs.u64EptVpidCaps;
1152 val = pVM->hm.s.vmx.Msrs.u64Misc;
1154 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val) == pVM->hm.s.vmx.cPreemptTimerShift)
1159 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(val), pVM->hm.s.vmx.cPreemptTimerShift));
1172 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc) >= 512);
1174 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed0));
1175 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr0Fixed1));
1176 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed0));
1177 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %#RX64\n", pVM->hm.s.vmx.Msrs.u64Cr4Fixed1));
1179 val = pVM->hm.s.vmx.Msrs.u64VmcsEnum;
1183 val = pVM->hm.s.vmx.Msrs.u64Vmfunc;
1190 LogRel(("HM: APIC-access page physaddr = %#RHp\n", pVM->hm.s.vmx.HCPhysApicAccess));
1192 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1194 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
1195 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));
1201 AssertLogRelReturn( !pVM->hm.s.fNestedPaging
1202 || (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT),
1204 AssertLogRelReturn( !pVM->hm.s.vmx.fUnrestrictedGuest
1205 || ( (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST)
1206 && pVM->hm.s.fNestedPaging),
1212 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
1213 pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid;
1220 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1221 && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
1223 CPUMClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP);
1227 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
1230 rc = PDMR3VmmDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
1236 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
1237 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
1242 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
1247 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE * 2);
1248 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
1255 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
1258 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;
1259 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US
1265 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1269 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
1276 pVM->hm.s.vmx.pRealModeTSS = NULL;
1277 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
1278 return VMSetError(pVM, rc, RT_SRC_POS,
1283 LogRel((pVM->hm.s.fAllow64BitGuests
1290 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /* idCpu */, VMMR0_DO_HM_SETUP_VM, 0 /* u64Arg */, NULL /* pReqHdr */);
1295 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1297 PVMCPU pVCpu = &pVM->aCpus[i];
1301 return VMSetError(pVM, rc, RT_SRC_POS, "VT-x setup failed: %Rrc", rc);
1304 LogRel(("HM: Supports VMCS EFER fields = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));
1306 pVM->hm.s.vmx.fEnabled = true;
1308 hmR3DisableRawMode(pVM); /** @todo make this go away! */
1313 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1314 if (pVM->hm.s.fAllow64BitGuests)
1316 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1317 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1318 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */
1319 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1320 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1325 else if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1327 if (pVM->hm.s.vmx.u64HostEfer & MSR_K6_EFER_NXE)
1328 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1336 if (pVM->hm.s.fNestedPaging)
1339 if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_SINGLE_CONTEXT)
1341 else if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_ALL_CONTEXTS)
1343 else if (pVM->hm.s.vmx.enmFlushEpt == VMXFLUSHEPT_NOT_SUPPORTED)
1346 LogRel(("HM: EPT flush type = %d\n", pVM->hm.s.vmx.enmFlushEpt));
1348 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1352 if (pVM->hm.s.fLargePages)
1355 PGMSetLargePageUsage(pVM, true);
1361 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
1363 if (pVM->hm.s.vmx.fVpid)
1366 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_INDIV_ADDR)
1368 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
1370 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
1372 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1375 LogRel(("HM: VPID flush type = %d\n", pVM->hm.s.vmx.enmFlushVpid));
1377 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_NOT_SUPPORTED)
1380 if (pVM->hm.s.vmx.fUsePreemptTimer)
1381 LogRel(("HM: VMX-preemption timer enabled (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
1393 * @param pVM The cross context VM structure.
1395 static int hmR3InitFinalizeR0Amd(PVM pVM)
1397 Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
1406 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops));
1407 LogRel(("HM: CPUID 0x80000001.u32AMDFeatureECX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX));
1408 LogRel(("HM: CPUID 0x80000001.u32AMDFeatureEDX = %#RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX));
1409 LogRel(("HM: AMD HWCR MSR = %#RX64\n", pVM->hm.s.svm.u64MsrHwcr));
1410 LogRel(("HM: AMD-V revision = %#x\n", pVM->hm.s.svm.u32Rev));
1411 LogRel(("HM: AMD-V max ASID = %RU32\n", pVM->hm.s.uMaxAsid));
1412 LogRel(("HM: AMD-V features = %#x\n", pVM->hm.s.svm.u32Features));
1434 uint32_t fSvmFeatures = pVM->hm.s.svm.u32Features;
1449 AssertLogRelReturn( !pVM->hm.s.fNestedPaging
1450 || (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
1456 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HM_SETUP_VM, 0, NULL);
1461 return VMSetError(pVM, rc, RT_SRC_POS, "AMD-V setup failed: %Rrc", rc);
1465 pVM->hm.s.svm.fEnabled = true;
1467 if (pVM->hm.s.fNestedPaging)
1475 if (pVM->hm.s.fLargePages)
1477 PGMSetLargePageUsage(pVM, true);
1483 hmR3DisableRawMode(pVM);
1488 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
1489 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
1490 if (pVM->hm.s.fAllow64BitGuests)
1492 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
1493 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
1494 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1495 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF);
1498 else if (CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
1499 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
1501 LogRel(("HM: TPR patching %s\n", (pVM->hm.s.fTprPatchingAllowed) ? "enabled" : "disabled"));
1503 LogRel((pVM->hm.s.fAllow64BitGuests
1516 * @param pVM The VM.
1518 VMMR3_INT_DECL(void) HMR3Relocate(PVM pVM)
1520 Log(("HMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
1523 if (VMR3GetState(pVM) == VMSTATE_LOADING)
1525 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1527 PVMCPU pVCpu = &pVM->aCpus[i];
1532 if (HMIsEnabled(pVM))
1534 switch (PGMGetHostMode(pVM))
1537 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
1542 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
1561 * @param pVM Pointer to the VM.
1566 VMMR3_INT_DECL(void) HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
1604 * @param pVM Pointer to the VM.
1606 VMMR3_INT_DECL(int) HMR3Term(PVM pVM)
1608 if (pVM->hm.s.vmx.pRealModeTSS)
1610 PDMR3VmmDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
1611 pVM->hm.s.vmx.pRealModeTSS = 0;
1613 hmR3TermCPU(pVM);
1622 * @param pVM Pointer to the VM.
1624 static int hmR3TermCPU(PVM pVM)
1626 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1628 PVMCPU pVCpu = &pVM->aCpus[i]; NOREF(pVCpu);
1633 MMHyperFree(pVM, pVCpu->hm.s.paStatExitReason);
1639 MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedIrqs);
1694 * @param pVM Pointer to the VM.
1696 VMMR3_INT_DECL(void) HMR3Reset(PVM pVM)
1700 if (HMIsEnabled(pVM))
1701 hmR3DisableRawMode(pVM);
1703 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1705 PVMCPU pVCpu = &pVM->aCpus[i];
1711 pVM->hm.s.pGuestPatchMem = 0;
1712 pVM->hm.s.pFreeGuestPatchMem = 0;
1713 pVM->hm.s.cbGuestPatchMem = 0;
1714 pVM->hm.s.cPatches = 0;
1715 pVM->hm.s.PatchTree = 0;
1716 pVM->hm.s.fTPRPatchingActive = false;
1717 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
1725 * @param pVM Pointer to the VM.
1729 DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
1738 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
1741 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
1748 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1772 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, CPUMGetGuestCS(pVCpu), pInstrGC, DBGF_DISAS_FLAGS_DEFAULT_MODE,
1778 pVM->hm.s.cPatches = 0;
1779 pVM->hm.s.PatchTree = 0;
1780 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
1781 pVM->hm.s.fTPRPatchingActive = false;
1790 * @param pVM Pointer to the VM.
1795 static int hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
1797 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
1800 pVM->hm.s.pGuestPatchMem = pPatchMem;
1801 pVM->hm.s.pFreeGuestPatchMem = pPatchMem;
1802 pVM->hm.s.cbGuestPatchMem = cbPatchMem;
1811 * @param pVM Pointer to the VM.
1815 VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1817 VM_ASSERT_EMT(pVM);
1819 if (pVM->cCpus > 1)
1822 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
1823 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1827 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
1835 * @param pVM Pointer to the VM.
1839 VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
1843 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
1844 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
1847 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches,
1848 (void *)(uintptr_t)VMMGetCpuId(pVM));
1851 pVM->hm.s.pGuestPatchMem = 0;
1852 pVM->hm.s.pFreeGuestPatchMem = 0;
1853 pVM->hm.s.cbGuestPatchMem = 0;
1854 pVM->hm.s.fTPRPatchingActive = false;
1863 * @param pVM Pointer to the VM.
1868 DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
1884 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
1890 uint32_t const idx = pVM->hm.s.cPatches;
1891 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
1896 pPatch = &pVM->hm.s.aPatches[idx];
1906 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1958 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1968 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
2011 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2014 pVM->hm.s.cPatches++;
2015 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceSuccess);
2025 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2027 pVM->hm.s.cPatches++;
2028 STAM_COUNTER_INC(&pVM->hm.s.StatTprReplaceFailure);
2037 * @param pVM Pointer to the VM.
2042 DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
2058 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
2064 uint32_t const idx = pVM->hm.s.cPatches;
2065 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
2070 pPatch = &pVM->hm.s.aPatches[idx];
2080 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
2199 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
2202 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
2205 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
2210 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
2211 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
2215 rc = DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pCtx->cs.Sel, GCPtrInstr, DBGF_DISAS_FLAGS_DEFAULT_MODE,
2225 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
2233 pVM->hm.s.pFreeGuestPatchMem += off;
2237 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2240 pVM->hm.s.cPatches++;
2241 pVM->hm.s.fTPRPatchingActive = true;
2242 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess);
2255 pPatch = &pVM->hm.s.aPatches[idx];
2258 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
2260 pVM->hm.s.cPatches++;
2261 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchFailure);
2270 * @param pVM Pointer to the VM.
2274 VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2277 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
2278 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
2438 * @param pVM Pointer to the VM.
2441 VMMR3_INT_DECL(int) HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
2443 PVMCPU pVCpu = VMMGetCpu(pVM);
2445 Assert(HMIsEnabled(pVM));
2465 * @param pVM Pointer to the VM.
2468 VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
2470 PVMCPU pVCpu = VMMGetCpu(pVM);
2472 Assert(HMIsEnabled(pVM));
2484 if (pVM->hm.s.svm.fEnabled)
2493 Assert( (pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
2494 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
2496 bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
2497 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
2578 && !pVM->hm.s.vmx.fUnrestrictedGuest)
2580 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
2606 if (pVM->hm.s.vmx.fEnabled)
2611 mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
2629 mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
2634 mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
2640 mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
2656 * @param pVM Pointer to the VM.
2659 VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
2665 if ( pVM->hm.s.vmx.fEnabled
2666 && !pVM->hm.s.vmx.fUnrestrictedGuest
2668 && !PDMVmmDevHeapIsEnabled(pVM))
2722 PVM pVM = pUVM->pVM;
2723 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2724 return pVM->fHMEnabled; /* Don't use the macro as the GUI may query us very very early. */
2738 PVM pVM = pUVM->pVM;
2739 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2740 return pVM->hm.s.vmx.fEnabled
2741 && pVM->hm.s.vmx.fSupported
2742 && pVM->fHMEnabled;
2756 PVM pVM = pUVM->pVM;
2757 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2758 return pVM->hm.s.svm.fEnabled
2759 && pVM->hm.s.svm.fSupported
2760 && pVM->fHMEnabled;
2773 PVM pVM = pUVM->pVM;
2774 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2775 return pVM->hm.s.fNestedPaging;
2788 PVM pVM = pUVM->pVM;
2789 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2790 return pVM->hm.s.vmx.fVpid;
2804 PVM pVM = pUVM->pVM;
2805 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
2806 return pVM->hm.s.vmx.fUnrestrictedGuest;
2814 * @param pVM Pointer to the VM.
2826 * @param pVM Pointer to the VM.
2828 VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
2830 return HMIsEnabled(pVM)
2831 && pVM->hm.s.vmx.fEnabled
2832 && pVM->hm.s.vmx.fUsePreemptTimer;
2846 * @param pVM Pointer to the VM.
2850 VMMR3_INT_DECL(VBOXSTRICTRC) HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2868 rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->hm.s.PendingIO.s.Port.uPort,
2881 rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->hm.s.PendingIO.s.Port.uPort,
2901 || DBGFBpIsHwIoArmed(pVM))
2903 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, pVCpu->hm.s.PendingIO.s.Port.uPort,
2920 * @param pVM Pointer to the VM.
2923 VMMR3_INT_DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
2925 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2927 PVMCPU pVCpu = &pVM->aCpus[i];
2949 if ( pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS
2950 || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS)
2955 else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
3001 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %#RX32\n", pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1));
3002 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %#RX32\n", pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0));
3011 * @param pVM Pointer to the VM.
3014 static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
3020 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3025 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.fPending);
3027 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.u32ErrCode);
3029 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hm.s.Event.u64IntInfo);
3033 /** @todo We only need to save pVM->aCpus[i].hm.s.vmx.fWasInRealMode and
3045 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
3047 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
3049 rc = SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
3053 rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
3056 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
3058 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3100 * @param pVM Pointer to the VM.
3105 static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
3122 for (VMCPUID i = 0; i < pVM->cCpus; i++)
3124 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending);
3126 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.u32ErrCode);
3128 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.u64IntInfo);
3146 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
3148 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
3150 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
3154 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
3157 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
3159 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
3180 pVM->hm.s.fTPRPatchingActive = true;
3182 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false);
3205 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);