CPUMR0.cpp revision e153d6933f9a73d7071afc4b501b4c1489243818
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * CPUM - Host Context Ring 0.
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * Copyright (C) 2006-2013 Oracle Corporation
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * available from http://www.virtualbox.org. This file is free software;
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * you can redistribute it and/or modify it under the terms of the GNU
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * General Public License (GPL) as published by the Free Software
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
94872a0e88ab4f38c786fcf617ddeb4b63a76957vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
78a072e1b56619e3230735ae073668311232ec94vboxsync/*******************************************************************************
78a072e1b56619e3230735ae073668311232ec94vboxsync* Header Files *
78a072e1b56619e3230735ae073668311232ec94vboxsync*******************************************************************************/
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync/*******************************************************************************
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync* Structures and Typedefs *
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync*******************************************************************************/
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * Local APIC mappings.
9be72bf71509dd721ffa1df70ead200abf30afd8vboxsync /** Indicates that the entry is in use and have valid data. */
e7184fff6d89903aed623860629a05047960ac2dvboxsync /** Whether it's operating in X2APIC mode (EXTD). */
78a072e1b56619e3230735ae073668311232ec94vboxsync /** The APIC version number. */
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync /** The physical address of the APIC registers. */
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync /** The memory object entering the physical address. */
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync /** The mapping object for hMemObj. */
78a072e1b56619e3230735ae073668311232ec94vboxsync /** The mapping address APIC registers.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @remarks Different CPUs may use the same physical address to map their
78a072e1b56619e3230735ae073668311232ec94vboxsync * APICs, so this pointer is only valid when on the CPU owning the
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync/*******************************************************************************
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync* Global Variables *
78a072e1b56619e3230735ae073668311232ec94vboxsync*******************************************************************************/
78a072e1b56619e3230735ae073668311232ec94vboxsync * CPUID bits to unify among all cores.
78a072e1b56619e3230735ae073668311232ec94vboxsyncstatic struct
78a072e1b56619e3230735ae073668311232ec94vboxsync uint32_t ecx; /**< which bits in ecx to unify between CPUs. */
78a072e1b56619e3230735ae073668311232ec94vboxsync uint32_t edx; /**< which bits in edx to unify between CPUs. */
78a072e1b56619e3230735ae073668311232ec94vboxsync 0x00000001,
78a072e1b56619e3230735ae073668311232ec94vboxsync X86_CPUID_FEATURE_ECX_CX16 | X86_CPUID_FEATURE_ECX_MONITOR,
08c4185261c17943cff6cc94522579696eeeb478vboxsync/*******************************************************************************
08c4185261c17943cff6cc94522579696eeeb478vboxsync* Internal Functions *
08c4185261c17943cff6cc94522579696eeeb478vboxsync*******************************************************************************/
08c4185261c17943cff6cc94522579696eeeb478vboxsyncstatic int cpumR0MapLocalApics(void);
08c4185261c17943cff6cc94522579696eeeb478vboxsyncstatic void cpumR0UnmapLocalApics(void);
08c4185261c17943cff6cc94522579696eeeb478vboxsync * Does the Ring-0 CPU initialization once during module load.
08c4185261c17943cff6cc94522579696eeeb478vboxsync * XXX Host-CPU hot-plugging?
08c4185261c17943cff6cc94522579696eeeb478vboxsync * Terminate the module.
08c4185261c17943cff6cc94522579696eeeb478vboxsync * Check the CPUID features of this particular CPU and disable relevant features
08c4185261c17943cff6cc94522579696eeeb478vboxsync * for the guest which do not exist on this CPU. We have seen systems where the
78a072e1b56619e3230735ae073668311232ec94vboxsync * X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
08c4185261c17943cff6cc94522579696eeeb478vboxsync * @bugref{5436}.
08c4185261c17943cff6cc94522579696eeeb478vboxsync * @note This function might be called simultaneously on more than one CPU!
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param idCpu The identifier for the CPU the function is called on.
08c4185261c17943cff6cc94522579696eeeb478vboxsync * @param pvUser1 Pointer to the VM structure.
a6ab77f04b22f0de7691f50dfdee8196024ce26dvboxsync * @param pvUser2 Ignored.
a6ab77f04b22f0de7691f50dfdee8196024ce26dvboxsyncstatic DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
08c4185261c17943cff6cc94522579696eeeb478vboxsync for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync /* Note! Cannot use cpumCpuIdGetLeaf from here because we're not
08c4185261c17943cff6cc94522579696eeeb478vboxsync necessarily in the VM process context. So, we using the
08c4185261c17943cff6cc94522579696eeeb478vboxsync legacy arrays as temporary storage. */
08c4185261c17943cff6cc94522579696eeeb478vboxsync if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
08c4185261c17943cff6cc94522579696eeeb478vboxsync else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
08c4185261c17943cff6cc94522579696eeeb478vboxsync pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
08c4185261c17943cff6cc94522579696eeeb478vboxsync else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
da31d917654e0b617e7a9bf8b0cf786136edf8e8vboxsync pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
9726c89eba6e777f3eb4d57f65ca6171a2241d29vboxsync ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx);
da31d917654e0b617e7a9bf8b0cf786136edf8e8vboxsync ASMAtomicAndU32(&pLegacyLeaf->ecx, ecx | ~g_aCpuidUnifyBits[i].ecx);
08c4185261c17943cff6cc94522579696eeeb478vboxsync ASMAtomicAndU32(&pLegacyLeaf->edx, edx | ~g_aCpuidUnifyBits[i].edx);
78a072e1b56619e3230735ae073668311232ec94vboxsync * Does Ring-0 CPUM initialization.
78a072e1b56619e3230735ae073668311232ec94vboxsync * This is mainly to check that the Host CPU mode is compatible
78a072e1b56619e3230735ae073668311232ec94vboxsync * with VBox.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @returns VBox status code.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVM Pointer to the VM.
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * Check CR0 & CR4 flags.
78a072e1b56619e3230735ae073668311232ec94vboxsync if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
78a072e1b56619e3230735ae073668311232ec94vboxsync Log(("CPUMR0Init: PE or PG not set. cr0=%#x\n", u32CR0));
78a072e1b56619e3230735ae073668311232ec94vboxsync * Check for sysenter and syscall usage.
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * Intel docs claim you should test both the flag and family, model &
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * but don't support it. AMD CPUs may support this feature in legacy
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * mode, they've banned it from long mode. Since we switch to 32-bit
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * mode when entering raw-mode context the feature would become
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * accessible again on AMD CPUs, so we have to check regardless of
84ac4fe6468b328aadfe9994176e00504338a47avboxsync * host bitness.
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync ASMCpuId(1, &u32CpuVersion, &u32Dummy, &u32Dummy, &fFeatures);
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync uint32_t const u32Model = (u32CpuVersion >> 4) & 0xF;
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * Read the MSR and see if it's in use or not.
78a072e1b56619e3230735ae073668311232ec94vboxsync Log(("CPUMR0Init: host uses sysenter cs=%08x%08x\n", ASMRdMsr_High(MSR_IA32_SYSENTER_CS), u32));
78a072e1b56619e3230735ae073668311232ec94vboxsync * This feature is indicated by the SEP bit returned in EDX by CPUID
78a072e1b56619e3230735ae073668311232ec94vboxsync * function 0x80000001. Intel CPUs only supports this feature in
78a072e1b56619e3230735ae073668311232ec94vboxsync * long mode. Since we're not running 64-bit guests in raw-mode there
78a072e1b56619e3230735ae073668311232ec94vboxsync * are no issues with 32-bit intel hosts.
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMCpuId(0x80000000, &cExt, &u32Dummy, &u32Dummy, &u32Dummy);
78a072e1b56619e3230735ae073668311232ec94vboxsync uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
78a072e1b56619e3230735ae073668311232ec94vboxsync if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
78a072e1b56619e3230735ae073668311232ec94vboxsync if (fExtFeaturesEDX & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
78a072e1b56619e3230735ae073668311232ec94vboxsync * Unify/cross check some CPUID feature bits on all available CPU cores
78a072e1b56619e3230735ae073668311232ec94vboxsync * and threads. We've seen CPUs where the monitor support differed.
78a072e1b56619e3230735ae073668311232ec94vboxsync * Because the hyper heap isn't always mapped into ring-0, we cannot
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync * access it from a RTMpOnAll callback. We use the legacy CPUID arrays
78a072e1b56619e3230735ae073668311232ec94vboxsync * as temp ring-0 accessible memory instead, ASSUMING that they're all
78a072e1b56619e3230735ae073668311232ec94vboxsync * up to date when we get here.
78a072e1b56619e3230735ae073668311232ec94vboxsync for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
78a072e1b56619e3230735ae073668311232ec94vboxsync PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, uLeaf, 0);
78a072e1b56619e3230735ae073668311232ec94vboxsync if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
78a072e1b56619e3230735ae073668311232ec94vboxsync else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
78a072e1b56619e3230735ae073668311232ec94vboxsync pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
78a072e1b56619e3230735ae073668311232ec94vboxsync else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
78a072e1b56619e3230735ae073668311232ec94vboxsync pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
ae072e31d733f2a7c9cb1b2b4c4901b66197aadavboxsync * Check if debug registers are armed.
ae072e31d733f2a7c9cb1b2b4c4901b66197aadavboxsync * This ASSUMES that DR7.GD is not set, or that it's handled transparently!
34aa2e97a2a6d7c3c0a5be5121edfd51a31281acvboxsync pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
78a072e1b56619e3230735ae073668311232ec94vboxsync Log(("CPUMR0Init: host uses debug registers (dr7=%x)\n", u32DR7));
156101683587682f6d63b4af322f611d90b77fcevboxsync * Trap handler for device-not-available fault (#NM).
156101683587682f6d63b4af322f611d90b77fcevboxsync * Device not available, FP or (F)WAIT instruction.
156101683587682f6d63b4af322f611d90b77fcevboxsync * @returns VBox status code.
34aa2e97a2a6d7c3c0a5be5121edfd51a31281acvboxsync * @retval VINF_SUCCESS if the guest FPU state is loaded.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @retval VINF_EM_RAW_GUEST_TRAP if it is a guest trap.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVM Pointer to the VM.
34aa2e97a2a6d7c3c0a5be5121edfd51a31281acvboxsync * @param pVCpu Pointer to the VMCPU.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pCtx Pointer to the guest-CPU context.
78a072e1b56619e3230735ae073668311232ec94vboxsyncVMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync /* If the FPU state has already been loaded, then it's a guest trap. */
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS))
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync || ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM)));
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * There are two basic actions:
cc260ed3418d1fd2771d0395f818f76808b60238vboxsync * 1. Save host fpu and restore guest fpu.
78a072e1b56619e3230735ae073668311232ec94vboxsync * 2. Generate guest trap.
78a072e1b56619e3230735ae073668311232ec94vboxsync * When entering the hypervisor we'll always enable MP (for proper wait
78a072e1b56619e3230735ae073668311232ec94vboxsync * trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
78a072e1b56619e3230735ae073668311232ec94vboxsync * is taken from the guest OS in order to get proper SSE handling.
78a072e1b56619e3230735ae073668311232ec94vboxsync * Actions taken depending on the guest CR0 flags:
78a072e1b56619e3230735ae073668311232ec94vboxsync * TS | EM | MP | FPUInstr | WAIT :: VMM Action
78a072e1b56619e3230735ae073668311232ec94vboxsync * ------------------------------------------------------------------------
78a072e1b56619e3230735ae073668311232ec94vboxsync * 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
78a072e1b56619e3230735ae073668311232ec94vboxsync * 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
78a072e1b56619e3230735ae073668311232ec94vboxsync * 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
78a072e1b56619e3230735ae073668311232ec94vboxsync * 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
78a072e1b56619e3230735ae073668311232ec94vboxsync * 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
78a072e1b56619e3230735ae073668311232ec94vboxsync * 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
78a072e1b56619e3230735ae073668311232ec94vboxsync * 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
78a072e1b56619e3230735ae073668311232ec94vboxsync * 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
78a072e1b56619e3230735ae073668311232ec94vboxsync switch (pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
78a072e1b56619e3230735ae073668311232ec94vboxsync * Saves the host-FPU/XMM state and loads the guest-FPU state into the CPU.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @returns VBox status code.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVM Pointer to the VM.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVCpu Pointer to the VMCPU.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pCtx Pointer to the guest-CPU context.
34aa2e97a2a6d7c3c0a5be5121edfd51a31281acvboxsyncVMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
78a072e1b56619e3230735ae073668311232ec94vboxsync#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
78a072e1b56619e3230735ae073668311232ec94vboxsync Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
ae072e31d733f2a7c9cb1b2b4c4901b66197aadavboxsync /* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
78a072e1b56619e3230735ae073668311232ec94vboxsync /* Restore the state on entry as we need to be in 64-bit mode to access the full state. */
ae072e31d733f2a7c9cb1b2b4c4901b66197aadavboxsync Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
ae072e31d733f2a7c9cb1b2b4c4901b66197aadavboxsync /** @todo Move the FFXR handling down into
ae072e31d733f2a7c9cb1b2b4c4901b66197aadavboxsync * cpumR0SaveHostRestoreGuestFPUState to optimize the
ae072e31d733f2a7c9cb1b2b4c4901b66197aadavboxsync * VBOX_WITH_KERNEL_USING_XMM handling. */
78a072e1b56619e3230735ae073668311232ec94vboxsync /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
78a072e1b56619e3230735ae073668311232ec94vboxsync bool fRestoreEfer = false;
78a072e1b56619e3230735ae073668311232ec94vboxsync if (pVM->cpum.s.CPUFeaturesExt.edx & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync /** @todo r=ramshankar: Can't we used a cached value here
78a072e1b56619e3230735ae073668311232ec94vboxsync * instead of reading the MSR? host EFER doesn't usually
78a072e1b56619e3230735ae073668311232ec94vboxsync * change. */
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR);
78a072e1b56619e3230735ae073668311232ec94vboxsync pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
40d48df41519667fa639cfe552c097210b9685acvboxsync /* Do the job and record that we've switched FPU state. */
78a072e1b56619e3230735ae073668311232ec94vboxsync cpumR0SaveHostRestoreGuestFPUState(&pVCpu->cpum.s);
78a072e1b56619e3230735ae073668311232ec94vboxsync /* Restore EFER. */
78a072e1b56619e3230735ae073668311232ec94vboxsync Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
78a072e1b56619e3230735ae073668311232ec94vboxsync * Save guest FPU/XMM state
78a072e1b56619e3230735ae073668311232ec94vboxsync * @returns VBox status code.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVM Pointer to the VM.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVCpu Pointer to the VMCPU.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pCtx Pointer to the guest CPU context.
78a072e1b56619e3230735ae073668311232ec94vboxsyncVMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
78a072e1b56619e3230735ae073668311232ec94vboxsync AssertReturn((pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU), VINF_SUCCESS);
78a072e1b56619e3230735ae073668311232ec94vboxsync#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
78a072e1b56619e3230735ae073668311232ec94vboxsync if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
78a072e1b56619e3230735ae073668311232ec94vboxsync /* else nothing to do; we didn't perform a world switch */
78a072e1b56619e3230735ae073668311232ec94vboxsync * We've already saved the XMM registers in the assembly wrapper, so
78a072e1b56619e3230735ae073668311232ec94vboxsync * we have to save them before saving the entire FPU state and put them
78a072e1b56619e3230735ae073668311232ec94vboxsync * back afterwards.
78a072e1b56619e3230735ae073668311232ec94vboxsync /** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
78a072e1b56619e3230735ae073668311232ec94vboxsync * I'm not able to test such an optimization tonight.
78a072e1b56619e3230735ae073668311232ec94vboxsync * We could just all this in assembly. */
78a072e1b56619e3230735ae073668311232ec94vboxsync memcpy(&aGuestXmmRegs[0], &pVCpu->cpum.s.Guest.fpu.aXMM[0], sizeof(aGuestXmmRegs));
78a072e1b56619e3230735ae073668311232ec94vboxsync /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
f06e830c9fdbc3d9e89867d4429404c5446bc513vboxsync bool fRestoreEfer = false;
78a072e1b56619e3230735ae073668311232ec94vboxsync if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMWrMsr(MSR_K6_EFER, uHostEfer & ~MSR_K6_EFER_FFXSR);
78a072e1b56619e3230735ae073668311232ec94vboxsync cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);
78a072e1b56619e3230735ae073668311232ec94vboxsync /* Restore EFER MSR */
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMWrMsr(MSR_K6_EFER, uHostEfer | MSR_K6_EFER_FFXSR);
78a072e1b56619e3230735ae073668311232ec94vboxsync memcpy(&pVCpu->cpum.s.Guest.fpu.aXMM[0], &aGuestXmmRegs[0], sizeof(aGuestXmmRegs));
78a072e1b56619e3230735ae073668311232ec94vboxsync pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE);
78a072e1b56619e3230735ae073668311232ec94vboxsync * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
78a072e1b56619e3230735ae073668311232ec94vboxsync * DR7 with safe values.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @returns VBox status code.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVCpu Pointer to the VMCPU.
78a072e1b56619e3230735ae073668311232ec94vboxsync * Save the host state.
78a072e1b56619e3230735ae073668311232ec94vboxsync AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
78a072e1b56619e3230735ae073668311232ec94vboxsync /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
40fa6228bd9ab763bc67b51fe6290802e954eb8cvboxsync /* Preemption paranoia. */
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST);
40fa6228bd9ab763bc67b51fe6290802e954eb8cvboxsync * Make sure DR7 is harmless or else we could trigger breakpoints when
40fa6228bd9ab763bc67b51fe6290802e954eb8cvboxsync * load guest or hypervisor DRx values later.
78a072e1b56619e3230735ae073668311232ec94vboxsync * Saves the guest DRx state residing in host registers and restore the host
78a072e1b56619e3230735ae073668311232ec94vboxsync * register values.
78a072e1b56619e3230735ae073668311232ec94vboxsync * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
f06e830c9fdbc3d9e89867d4429404c5446bc513vboxsync * since it's assumed that we're shadowing the guest DRx register values
f06e830c9fdbc3d9e89867d4429404c5446bc513vboxsync * accurately when using the combined hypervisor debug register values
f06e830c9fdbc3d9e89867d4429404c5446bc513vboxsync * (CPUMR0LoadHyperDebugState).
78a072e1b56619e3230735ae073668311232ec94vboxsync * @returns true if either guest or hypervisor debug registers were loaded.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVCpu The cross context CPU structure for the calling EMT.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param fDr6 Whether to include DR6 or not.
40fa6228bd9ab763bc67b51fe6290802e954eb8cvboxsync * @thread EMT(pVCpu)
78a072e1b56619e3230735ae073668311232ec94vboxsyncVMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6)
78a072e1b56619e3230735ae073668311232ec94vboxsync bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
78a072e1b56619e3230735ae073668311232ec94vboxsync * Do we need to save the guest DRx registered loaded into host registers?
78a072e1b56619e3230735ae073668311232ec94vboxsync * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
78a072e1b56619e3230735ae073668311232ec94vboxsync if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
eb563d667bee4ab4ac0ba8be38d68f911f1d53eavboxsync#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
36dd9dabf3f64468206cb74145b51245cf7fde54vboxsync ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER));
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync * Restore the host's debug state. DR0-3, DR6 and only then DR7!
78a072e1b56619e3230735ae073668311232ec94vboxsync if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)
78a072e1b56619e3230735ae073668311232ec94vboxsync /* A bit of paranoia first... */
78a072e1b56619e3230735ae073668311232ec94vboxsync AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
78a072e1b56619e3230735ae073668311232ec94vboxsync /** @todo consider only updating if they differ, esp. DR6. Need to figure how
78a072e1b56619e3230735ae073668311232ec94vboxsync * expensive DRx reads are over DRx writes. */
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST);
78a072e1b56619e3230735ae073668311232ec94vboxsync * Saves the guest DRx state if it resides host registers.
5142b6456c5cec38e0ee291e235201d4ac73f10bvboxsync * This does NOT clear any use flags, so the host registers remains loaded with
5142b6456c5cec38e0ee291e235201d4ac73f10bvboxsync * the guest DRx state upon return. The purpose is only to make sure the values
5142b6456c5cec38e0ee291e235201d4ac73f10bvboxsync * in the CPU context structure is up to date.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @returns true if the host registers contains guest values, false if not.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVCpu The cross context CPU structure for the calling EMT.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param fDr6 Whether to include DR6 or not.
5142b6456c5cec38e0ee291e235201d4ac73f10bvboxsync * @thread EMT(pVCpu)
78a072e1b56619e3230735ae073668311232ec94vboxsyncVMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6)
78a072e1b56619e3230735ae073668311232ec94vboxsync * Do we need to save the guest DRx registered loaded into host registers?
78a072e1b56619e3230735ae073668311232ec94vboxsync * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
78a072e1b56619e3230735ae073668311232ec94vboxsync if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
78a072e1b56619e3230735ae073668311232ec94vboxsync#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
78a072e1b56619e3230735ae073668311232ec94vboxsync HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
78a072e1b56619e3230735ae073668311232ec94vboxsync return true;
78a072e1b56619e3230735ae073668311232ec94vboxsync return false;
78a072e1b56619e3230735ae073668311232ec94vboxsync * Lazily sync in the debug state.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param pVCpu The cross context CPU structure for the calling EMT.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @param fDr6 Whether to include DR6 or not.
223d0cc6ed8bf5767cb97c734684460b8eae31a8vboxsync * @thread EMT(pVCpu)
78a072e1b56619e3230735ae073668311232ec94vboxsyncVMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6)
78a072e1b56619e3230735ae073668311232ec94vboxsync * Save the host state and disarm all host BPs.
78a072e1b56619e3230735ae073668311232ec94vboxsync * Activate the guest state DR0-3.
78a072e1b56619e3230735ae073668311232ec94vboxsync * DR7 and DR6 (if fDr6 is true) are left to the caller.
223d0cc6ed8bf5767cb97c734684460b8eae31a8vboxsync#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
7a731cb139828206e59b084166bebe09e1e4d7c2vboxsync * Lazily sync in the hypervisor debug state
7a731cb139828206e59b084166bebe09e1e4d7c2vboxsync * @returns VBox status code.
7a731cb139828206e59b084166bebe09e1e4d7c2vboxsync * @param pVCpu The cross context CPU structure for the calling EMT.
7a731cb139828206e59b084166bebe09e1e4d7c2vboxsync * @param fDr6 Whether to include DR6 or not.
78a072e1b56619e3230735ae073668311232ec94vboxsync * @thread EMT(pVCpu)
7a731cb139828206e59b084166bebe09e1e4d7c2vboxsyncVMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6)
78a072e1b56619e3230735ae073668311232ec94vboxsync * Save the host state and disarm all host BPs.
04fb270f7378e5e38964b7da92abc2146c96e93avboxsync * Make sure the hypervisor values are up to date.
04fb270f7378e5e38964b7da92abc2146c96e93avboxsync CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */, true);
78a072e1b56619e3230735ae073668311232ec94vboxsync * Activate the guest state DR0-3.
78a072e1b56619e3230735ae073668311232ec94vboxsync * DR7 and DR6 (if fDr6 is true) are left to the caller.
78a072e1b56619e3230735ae073668311232ec94vboxsync#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
e068057c82b010bc7cc663e8f57ac3ef1890a33cvboxsync ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */
78a072e1b56619e3230735ae073668311232ec94vboxsync ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync * Per-CPU callback that probes the CPU for APIC support.
7a731cb139828206e59b084166bebe09e1e4d7c2vboxsync * @param idCpu The identifier for the CPU the function is called on.
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync * @param pvUser1 Ignored.
7a731cb139828206e59b084166bebe09e1e4d7c2vboxsync * @param pvUser2 Ignored.
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsyncstatic DECLCALLBACK(void) cpumR0MapLocalApicCpuProber(RTCPUID idCpu, void *pvUser1, void *pvUser2)
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync * Check for APIC support.
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync * Safe to access the MSR. Read it and calc the BASE (a little complicated).
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE);
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync ASMCpuId(0x80000000, &uMaxExtLeaf, &u32EBX, &u32ECX, &u32EDX);
2d86fd6bcfe35a4bb83bb8a4fe68090693cdce95vboxsync ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX);
78a072e1b56619e3230735ae073668311232ec94vboxsync u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000);
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync AssertCompile(sizeof(g_aLApics[iCpu].PhysBase) == sizeof(u64ApicBase));
78a072e1b56619e3230735ae073668311232ec94vboxsync g_aLApics[iCpu].fEnabled = RT_BOOL(u64ApicBase & MSR_IA32_APICBASE_EN);
78a072e1b56619e3230735ae073668311232ec94vboxsync g_aLApics[iCpu].fX2Apic = (u64ApicBase & (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN))
78a072e1b56619e3230735ae073668311232ec94vboxsync == (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN);
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync * Per-CPU callback that verifies our APIC expectations.
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync * @param idCpu The identifier for the CPU the function is called on.
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync * @param pvUser1 Ignored.
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync * @param pvUser2 Ignored.
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsyncstatic DECLCALLBACK(void) cpumR0MapLocalApicCpuChecker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
78a072e1b56619e3230735ae073668311232ec94vboxsync AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics));
78a072e1b56619e3230735ae073668311232ec94vboxsync * 0x0X 82489 external APIC
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync * 0x1X Local APIC
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync * 0x2X..0xFF reserved
2859546be945ab432d6b26306e233d969b1149bavboxsync uApicVersion = ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_VERSION);
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync if ((APIC_REG_VERSION_GET_VER(uApicVersion) & 0xF0) == 0x10)
78a072e1b56619e3230735ae073668311232ec94vboxsync#if 0 /* enable if you need it. */
78a072e1b56619e3230735ae073668311232ec94vboxsync SUPR0Printf("CPUM: X2APIC %02u - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x cmci=%#07x\n",
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync ApicX2RegRead32(APIC_REG_LVT_LINT0), ApicX2RegRead32(APIC_REG_LVT_LINT1),
78a072e1b56619e3230735ae073668311232ec94vboxsync ApicX2RegRead32(APIC_REG_LVT_PC), ApicX2RegRead32(APIC_REG_LVT_THMR),
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync SUPR0Printf("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x cmci=%#07x\n",
78a072e1b56619e3230735ae073668311232ec94vboxsync iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, uApicVersion,
78a072e1b56619e3230735ae073668311232ec94vboxsync ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT0), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT1),
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_PC), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_THMR),
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_CMCI));
78a072e1b56619e3230735ae073668311232ec94vboxsync uint32_t uExtFeatures = ApicRegRead(g_aLApics[iCpu].pv, 0x400);
78a072e1b56619e3230735ae073668311232ec94vboxsync SUPR0Printf("CPUM: APIC %02u: ExtSpace available. extfeat=%08x eilvt[0..3]=%08x %08x %08x %08x\n",
78a072e1b56619e3230735ae073668311232ec94vboxsync cEiLvt >= 1 ? ApicRegRead(g_aLApics[iCpu].pv, 0x500) : 0,
78a072e1b56619e3230735ae073668311232ec94vboxsync cEiLvt >= 2 ? ApicRegRead(g_aLApics[iCpu].pv, 0x510) : 0,
b0c4bd49112a99f00ef48b7a8eae6fb310a62bdavboxsync cEiLvt >= 3 ? ApicRegRead(g_aLApics[iCpu].pv, 0x520) : 0,
b0c4bd49112a99f00ef48b7a8eae6fb310a62bdavboxsync cEiLvt >= 4 ? ApicRegRead(g_aLApics[iCpu].pv, 0x530) : 0);
08c4185261c17943cff6cc94522579696eeeb478vboxsync SUPR0Printf("VBox/CPUM: Unsupported APIC version %#x (iCpu=%d)\n", uApicVersion, iCpu);
08c4185261c17943cff6cc94522579696eeeb478vboxsync * Map the MMIO page of each local APIC in the system.
08c4185261c17943cff6cc94522579696eeeb478vboxsyncstatic int cpumR0MapLocalApics(void)
78a072e1b56619e3230735ae073668311232ec94vboxsync * Check that we'll always stay within the array bounds.
78a072e1b56619e3230735ae073668311232ec94vboxsync LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
78a072e1b56619e3230735ae073668311232ec94vboxsync * Create mappings for all online CPUs we think have legacy APICs.
78a072e1b56619e3230735ae073668311232ec94vboxsync int rc = RTMpOnAll(cpumR0MapLocalApicCpuProber, NULL, NULL);
78a072e1b56619e3230735ae073668311232ec94vboxsync for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
78a072e1b56619e3230735ae073668311232ec94vboxsync if (g_aLApics[iCpu].fEnabled && !g_aLApics[iCpu].fX2Apic)
78a072e1b56619e3230735ae073668311232ec94vboxsync rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase,
78a072e1b56619e3230735ae073668311232ec94vboxsync rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void *)-1,
78a072e1b56619e3230735ae073668311232ec94vboxsync g_aLApics[iCpu].pv = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj);
78a072e1b56619e3230735ae073668311232ec94vboxsync RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
78a072e1b56619e3230735ae073668311232ec94vboxsync * Check the APICs.
78a072e1b56619e3230735ae073668311232ec94vboxsync rc = RTMpOnAll(cpumR0MapLocalApicCpuChecker, NULL, NULL);
08c4185261c17943cff6cc94522579696eeeb478vboxsync * Log the result (pretty useless, requires enabling CPUM in VBoxDrv
08c4185261c17943cff6cc94522579696eeeb478vboxsync * and !VBOX_WITH_R0_LOGGING).
3f6d4775faa373634b2f3fc2a90fc517733f6fd6vboxsync for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aLApics); iCpu++)
84ac4fe6468b328aadfe9994176e00504338a47avboxsync Log(("CPUM: %u APICs, %u X2APICs\n", cEnabled, cX2Apics));
84ac4fe6468b328aadfe9994176e00504338a47avboxsync * Unmap the Local APIC of all host CPUs.
84ac4fe6468b328aadfe9994176e00504338a47avboxsyncstatic void cpumR0UnmapLocalApics(void)
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;)
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */);
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */);
b9c1c047d827938b96c8c148c855cbe7f304e5f7vboxsync * Updates CPUMCPU::pvApicBase and CPUMCPU::fX2Apic prior to world switch.
22ecc39cb2e9aeb958d702b60fd677e8df0cdc70vboxsync * Writes the Local APIC mapping address of the current host CPU to CPUMCPU so
e1273e247f21b61fae280d2aa95a7f4a0850bb46vboxsync * the world switchers can access the APIC registers for the purpose of
5142b6456c5cec38e0ee291e235201d4ac73f10bvboxsync * disabling and re-enabling the NMIs. Must be called with disabled preemption
e3197fda68ad1e45cc3004f5b58a326a5270aa8fvboxsync * or disabled interrupts!
e3197fda68ad1e45cc3004f5b58a326a5270aa8fvboxsync * @param pVCpu Pointer to the cross context CPU structure of the
5142b6456c5cec38e0ee291e235201d4ac73f10bvboxsync * calling EMT.
08c4185261c17943cff6cc94522579696eeeb478vboxsync * @param iHostCpuSet The CPU set index of the current host CPU.
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsyncVMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, uint32_t iHostCpuSet)
5142b6456c5cec38e0ee291e235201d4ac73f10bvboxsync pVCpu->cpum.s.pvApicBase = g_aLApics[iHostCpuSet].pv;
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync pVCpu->cpum.s.fX2Apic = g_aLApics[iHostCpuSet].fX2Apic;
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync// Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic));
f001a45ec92f71f1e4c1015485fc1ddf84e8059cvboxsync#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */