Lines Matching refs:guest

83         pVmcb->guest.REG.u16Sel     = pCtx->reg.Sel; \
84 pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \
85 pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \
86 pVmcb->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \
92 pMixedCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \
93 pMixedCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \
95 pMixedCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \
96 pMixedCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \
97 pMixedCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
103 * event in the guest. */
205 /** The guest's TPR value used for TPR shadowing. */
210 /** Whether the guest FPU state was active at the time of #VMEXIT. */
212 /** Whether the guest debug state was active at the time of #VMEXIT. */
232 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
243 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
258 * @param pMixedCtx Pointer to the guest-CPU context.
553 * Allocate one page for the guest-state VMCB.
731 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
758 /* The host ASID MBZ, for the guest start with 1. */
763 * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
766 pVmcb->guest.u64GPAT = UINT64_C(0x0006060606060606);
774 /* CR3 reads/writes must be intercepted; our shadow values differ from the guest values. */
796 * Don't intercept guest read/write accesses to these MSRs.
815 * Invalidates a guest page by guest virtual address.
838 /* If we get a flush in 64-bit guest mode, then force a full TLB flush. INVLPGA takes only 32-bit addresses. */
1011 /** @name 64-bit guest on 32-bit host OS helper functions.
1025 * @param pCtx Pointer to the guest-CPU context.
1051 * @param pCtx Pointer to the guest-CPU context.
1126 * Loads the guest CR0 control register into the guest-state area in the VMCB.
1127 * Although the guest CR0 is a separate field in the VMCB we have to consider
1128 * the FPU state itself which is shared between the host and the guest.
1133 * @param pCtx Pointer to the guest-CPU context.
1167 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
1194 pVmcb->guest.u64CR0 = u64GuestCR0;
1202 * Loads the guest control registers (CR2, CR3, CR4) into the VMCB.
1207 * @param pCtx Pointer to the guest-CPU context.
1220 pVmcb->guest.u64CR2 = pCtx->cr2;
1243 pVmcb->guest.u64CR3 = pCtx->cr3;
1246 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
1293 pVmcb->guest.u64CR4 = u64GuestCR4;
1296 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
1307 * Loads the guest segment registers into the VMCB.
1312 * @param pCtx Pointer to the guest-CPU context.
1328 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;
1350 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
1351 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt;
1359 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
1360 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt;
1368 * Loads the guest MSRs into the VMCB.
1372 * @param pCtx Pointer to the guest-CPU context.
1379 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs;
1380 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
1381 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
1385 * AMD-V requires guest EFER.SVME to be set. Weird.
1390 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
1398 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
1399 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
1403 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
1406 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
1414 pVmcb->guest.u64STAR = pCtx->msrSTAR;
1415 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR;
1416 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR;
1417 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK;
1418 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
1423 * Loads the guest state into the VMCB and programs the necessary intercepts
1428 * @param pCtx Pointer to the guest-CPU context.
1445 * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
1452 pVmcb->guest.u64RFlags |= X86_EFL_TF;
1454 fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
1461 * Use the combined guest and host DRx values found in the hypervisor
1465 * Note! DBGF expects a clean DR6 state before executing guest code.
1485 if ( pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
1486 || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
1488 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
1489 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
1494 /** @todo If we cared, we could optimize to allow the guest to read registers
1503 * Update DR6, DR7 with the guest values if necessary.
1505 if ( pVmcb->guest.u64DR7 != pCtx->dr[7]
1506 || pVmcb->guest.u64DR6 != pCtx->dr[6])
1508 pVmcb->guest.u64DR7 = pCtx->dr[7];
1509 pVmcb->guest.u64DR6 = pCtx->dr[6];
1515 * If the guest has enabled debug registers, we need to load them prior to
1516 * executing guest code so they'll trigger at the right time.
1538 Log5(("hmR0SvmLoadSharedDebugState: Loaded guest DRx\n"));
1589 * Loads the guest APIC state (currently just the TPR).
1594 * @param pCtx Pointer to the guest-CPU context.
1611 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
1630 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
1648 * Loads the exception interrupts required for guest execution in the VMCB.
1653 * @param pCtx Pointer to the guest-CPU context.
1672 * Sets up the appropriate function to run guest code.
1676 * @param pCtx Pointer to the guest-CPU context.
1689 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
1821 * Loads the guest state into the VMCB.
1824 * Also sets up the appropriate VMRUN function to execute guest code based on
1825 * the guest CPU mode.
1830 * @param pCtx Pointer to the guest-CPU context.
1847 pVmcb->guest.u64RIP = pCtx->rip;
1848 pVmcb->guest.u64RSP = pCtx->rsp;
1849 pVmcb->guest.u64RFlags = pCtx->eflags.u32;
1850 pVmcb->guest.u64RAX = pCtx->rax;
1874 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
1886 * Loads the state shared between the host and guest into the
1891 * @param pCtx Pointer to the guest-CPU context.
1915 * Saves the entire guest state from the VMCB into the
1916 * guest-CPU context. Currently there is no residual state left in the CPU that
1921 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1931 pMixedCtx->rip = pVmcb->guest.u64RIP;
1932 pMixedCtx->rsp = pVmcb->guest.u64RSP;
1933 pMixedCtx->eflags.u32 = pVmcb->guest.u64RFlags;
1934 pMixedCtx->rax = pVmcb->guest.u64RAX;
1947 pMixedCtx->cr2 = pVmcb->guest.u64CR2;
1952 pMixedCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */
1953 pMixedCtx->msrLSTAR = pVmcb->guest.u64LSTAR; /* 64-bit mode syscall rip */
1954 pMixedCtx->msrCSTAR = pVmcb->guest.u64CSTAR; /* compatibility mode syscall rip */
1955 pMixedCtx->msrSFMASK = pVmcb->guest.u64SFMASK; /* syscall flag mask */
1956 pMixedCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; /* swapgs exchange value */
1957 pMixedCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS;
1958 pMixedCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP;
1959 pMixedCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP;
2006 * and thus it's possible that when the CPL changes during guest execution that the SS DPL
2010 Assert(!(pVmcb->guest.u8CPL & ~0x3));
2011 pMixedCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3;
2026 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
2027 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base;
2029 pMixedCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit;
2030 pMixedCtx->idtr.pIdt = pVmcb->guest.IDTR.u64Base;
2037 pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
2038 pMixedCtx->dr[7] = pVmcb->guest.u64DR7;
2042 Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu));
2043 CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6);
2048 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
2051 && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
2053 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
2054 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);
2065 * @param pMixedCtx Pointer to the guest-CPU context.
2121 * @param pCtx Pointer to the guest-CPU context.
2159 * @param pCtx Pointer to the guest-CPU context.
2170 * VMMRZCallRing3() callback wrapper which saves the guest state (or restores
2177 * out-of-date guest-CPU context).
2235 * @param pCtx Pointer to the guest-CPU context.
2279 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
2331 * Sets an event as a pending event to be injected into the guest.
2338 * @remarks Statistics counter assumes this is a guest event being reflected to
2339 * the guest i.e. 'StatInjectPendingReflect' is incremented always.
2358 * Injects an event into the guest upon VMRUN by updating the relevant field
2362 * @param pVmcb Pointer to the guest VM control block.
2363 * @param pCtx Pointer to the guest-CPU context.
2513 * Gets the guest's interrupt-shadow.
2515 * @returns The guest's interrupt-shadow.
2517 * @param pCtx Pointer to the guest-CPU context.
2534 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2548 * instructs AMD-V to cause a #VMEXIT as soon as the guest is in a state to
2569 * #VMEXIT as soon as a guest starts executing an IRET. This is used to unblock
2604 * Evaluates the event to be delivered to the guest and sets it as the pending
2608 * @param pCtx Pointer to the guest-CPU context.
2645 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
2646 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is
2678 * Injects any pending events into the guest if the guest is in a state to
2682 * @param pCtx Pointer to the guest-CPU context.
2720 /* Update the guest interrupt shadow in the VMCB. */
2733 * guest-state).
2734 * @param pCtx Pointer to the guest-CPU context.
2794 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel));
2795 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr));
2796 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit));
2797 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base));
2798 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel));
2799 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr));
2800 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit));
2801 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base));
2802 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel));
2803 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr));
2804 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit));
2805 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base));
2806 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel));
2807 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr));
2808 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit));
2809 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base));
2810 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel));
2811 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr));
2812 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit));
2813 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base));
2815 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit));
2816 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base));
2818 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel));
2819 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr));
2820 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit));
2821 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base));
2823 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit));
2824 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base));
2826 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel));
2827 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr));
2828 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit));
2829 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base));
2831 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL));
2832 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0));
2833 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2));
2834 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3));
2835 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4));
2836 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6));
2837 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7));
2839 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP));
2840 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP));
2841 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX));
2842 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags));
2844 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS));
2845 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP));
2846 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP));
2848 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER));
2849 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR));
2850 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR));
2851 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR));
2852 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK));
2853 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase));
2854 Log4(("guest.u64GPAT %#RX64\n", pVmcb->guest.u64GPAT));
2855 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL));
2856 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM));
2857 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO));
2858 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM));
2859 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO));
2887 * @param pCtx Pointer to the guest-CPU context.
2952 * Does the preparations before executing guest code in AMD-V.
2956 * guest-state information into the the VMCB assuming we assuredly execute the
2957 * guest in AMD-V. If we fall back to the recompiler after updating the VMCB and
2959 * that the recompiler can (and should) use them when it resumes guest
2964 * @retval VINF_SUCCESS if we can proceed with running the guest.
2969 * @param pCtx Pointer to the guest-CPU context.
2990 /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */
2996 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
2997 * so we can update it on the way back if the guest changed the TPR.
3025 * executing guest code.
3046 * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
3070 * Prepares to run guest code in AMD-V and we've committed to doing so. This
3076 * @param pCtx Pointer to the guest-CPU context.
3089 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
3100 /* Load the state shared between host and guest (FPU, debug). */
3120 /* Store status of the shared guest-host state at the time of VMRUN. */
3146 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
3147 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
3174 * Wrapper for running the guest code in AMD-V.
3179 * @param pCtx Pointer to the guest-CPU context.
3200 * Performs some essential restoration of state after running guest code in
3205 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
3237 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
3256 hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */
3282 * Runs the guest code using AMD-V.
3300 /* Preparatory work for running guest code, this may force us to return
3316 and guest into the guest-CPU state. Re-enables interrupts! */
3320 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
3351 * Runs the guest code using AMD-V in single step mode.
3356 * @param pCtx Pointer to the guest-CPU context.
3374 /* Preparatory work for running guest code, this may force us to return
3393 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
3398 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */
3450 * Runs the guest code using AMD-V.
3455 * @param pCtx Pointer to the guest-CPU context.
3486 * @param pCtx Pointer to the guest-CPU context.
3752 * @param pCtx The guest CPU context.
3792 * @param pCtx The guest CPU context.
3854 * @param pCtx Pointer to the guest-CPU context.
3858 * @remarks This updates the guest CR2 with @a uFaultAddress!
3870 /* Update CR2 of the guest. */
3947 * @param pCtx Pointer to the guest-CPU context.
4043 * Handle a condition that occurred while delivering an event through the guest
4049 * continue execution of the guest which will delivery the #DF.
4053 * @param pCtx Pointer to the guest-CPU context.
4064 * that was trying to be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). */
4071 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4072 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4120 * exception to the guest after handling the #VMEXIT.
4154 /* If uExitVector is #PF, CR2 value will be updated from the VMCB if it's a guest #PF. See hmR0SvmExitXcptPF(). */
4185 * Advances the guest RIP in the if the NRIP_SAVE feature is supported by the
4189 * @param pCtx Pointer to the guest-CPU context.
4234 * Going back to executing guest code here unconditionally causes random scheduling problems (observed on an
4533 * the guest.
4601 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest(). When full APIC register
4653 /* We should -not- get this #VMEXIT if the guest's debug registers were active. */
4667 Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
4679 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
4858 pVmcb->guest.u64DR6 = pCtx->dr[6];
4859 pVmcb->guest.u64DR7 = pCtx->dr[7];
4951 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
4962 * If we succeed, resume guest execution.
4963 * If we fail in interpreting the instruction because we couldn't get the guest physical address
4964 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
4965 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
5012 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */
5016 /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
5033 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
5076 /* If the hypercall changes anything other than guest general-purpose registers,
5077 we would need to reload the guest changed bits here before VM-reentry. */
5099 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
5103 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
5130 /* A genuine guest #PF, reflect it to the guest. */
5137 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
5205 /* It's a guest page fault and needs to be reflected to the guest. */
5212 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
5243 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
5270 /* Forward #NM to the guest. */
5350 int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
5353 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
5355 CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
5357 /* Reflect the exception back to the guest. */
5367 Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
5368 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;