CPUMR0.cpp revision b6d29e8ff79b0a0b18da5b0768fb44dd68e35893
/* $Id$ */
/** @file
* CPUM - Host Context Ring 0.
*/
/*
* Copyright (C) 2006-2011 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_CPUM
#include "CPUMInternal.h"
#include <iprt/asm-amd64-x86.h>
#endif
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* Local APIC mappings.
*/
typedef struct CPUMHOSTLAPIC
{
/** Indicates that the entry is in use and have valid data. */
bool fEnabled;
/** Has APIC_REG_LVT_THMR. Not used. */
/** The physical address of the APIC registers. */
/** The memory object entering the physical address. */
/** The mapping object for hMemObj. */
/** The mapping address APIC registers.
* @remarks Different CPUs may use the same physical address to map their
* APICs, so this pointer is only valid when on the CPU owning the
* APIC. */
void *pv;
#endif
/*******************************************************************************
* Global Variables *
*******************************************************************************/
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
static int cpumR0MapLocalApics(void);
static void cpumR0UnmapLocalApics(void);
#endif
/**
* Does the Ring-0 CPU initialization once during module load.
* XXX Host-CPU hot-plugging?
*/
VMMR0DECL(int) CPUMR0ModuleInit(void)
{
int rc = VINF_SUCCESS;
rc = cpumR0MapLocalApics();
#endif
return rc;
}
/**
* Terminate the module.
*/
VMMR0DECL(int) CPUMR0ModuleTerm(void)
{
#endif
return VINF_SUCCESS;
}
/**
* Check the CPUID features of this particular CPU and disable relevant features
* for the guest which do not exist on this CPU. We have seen systems where the
* X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
* @{bugref 5436}.
*
* @param idCpu The identifier for the CPU the function is called on.
* @param pvUser1 Pointer to the VM structure.
* @param pvUser2 Ignored.
*/
{
struct
{
} aCpuidUnify[]
=
{
{ 0x00000001, X86_CPUID_FEATURE_ECX_CX16
};
{
if (uLeave < 0x80000000)
else if (uLeave < 0xc0000000)
else
/* unify important bits */
}
}
/**
* Does Ring-0 CPUM initialization.
*
* This is mainly to check that the Host CPU mode is compatible
* with VBox.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
{
/*
* Check CR0 & CR4 flags.
*/
if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
{
return VERR_UNSUPPORTED_CPU_MODE;
}
/*
* Check for sysenter and syscall usage.
*/
if (ASMHasCpuId())
{
/*
*
* Intel docs claim you should test both the flag and family, model &
* stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
* but don't support it. AMD CPUs may support this feature in legacy
* mode, they've banned it from long mode. Since we switch to 32-bit
* mode when entering raw-mode context the feature would become
* accessible again on AMD CPUs, so we have to check regardless of
* host bitness.
*/
if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
|| u32Model >= 3
|| u32Stepping >= 3
|| !ASMIsIntelCpu())
)
{
/*
* Read the MSR and see if it's in use or not.
*/
if (u32)
{
}
}
/*
*
* This feature is indicated by the SEP bit returned in EDX by CPUID
* function 0x80000001. Intel CPUs only supports this feature in
* long mode. Since we're not running 64-bit guests in raw-mode there
* are no issues with 32-bit intel hosts.
*/
if ( cExt >= 0x80000001
&& cExt <= 0x8000ffff)
{
{
#ifdef RT_ARCH_X86
# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
# else
if (!ASMIsIntelCpu())
# endif
#endif
{
if (fEfer & MSR_K6_EFER_SCE)
{
Log(("CPUMR0Init: host uses syscall\n"));
}
}
}
}
}
/*
* Check if debug registers are armed.
* This ASSUMES that DR7.GD is not set, or that it's handled transparently!
*/
if (u32DR7 & X86_DR7_ENABLED_MASK)
{
}
return VINF_SUCCESS;
}
/**
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest CPU context.
*/
{
/* If the FPU state has already been loaded, then it's a guest trap. */
{
Assert( ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS))
return VINF_EM_RAW_GUEST_TRAP;
}
/*
* There are two basic actions:
* 1. Save host fpu and restore guest fpu.
* 2. Generate guest trap.
*
* When entering the hypervisor we'll always enable MP (for proper wait
* is taken from the guest OS in order to get proper SSE handling.
*
*
* Actions taken depending on the guest CR0 flags:
*
* 3 2 1
* TS | EM | MP | FPUInstr | WAIT :: VMM Action
* ------------------------------------------------------------------------
* 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
* 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
* 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
* 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
* 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
* 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
* 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
* 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
*/
{
case X86_CR0_MP | X86_CR0_TS:
return VINF_EM_RAW_GUEST_TRAP;
default:
break;
}
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
if (CPUMIsGuestInLongModeEx(pCtx))
{
/* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
/* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
}
else
#endif
{
# if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */
/** @todo Move the FFXR handling down into
* cpumR0SaveHostRestoreguestFPUState to optimize the
* VBOX_WITH_KERNEL_USING_XMM handling. */
/* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
{
if (SavedEFER & MSR_K6_EFER_FFXSR)
{
}
}
/* Do the job and record that we've switched FPU state. */
/* Restore EFER. */
# else
uint64_t oldMsrEFERHost = 0;
/* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
{
/** @todo Do we really need to read this every time?? The host could change this on the fly though.
* bird: what about starting by skipping the ASMWrMsr below if we didn't
* change anything? Ditto for the stuff in CPUMR0SaveGuestFPU. */
if (oldMsrEFERHost & MSR_K6_EFER_FFXSR)
{
}
}
/* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
/* Restore EFER MSR */
/* CPUMHandleLazyFPU could have changed CR0; restore it. */
# endif
#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
/*
* Save the FPU control word and MXCSR, so we can restore the state properly afterwards.
*/
/*
* The MSR_K6_EFER_FFXSR feature is AMD only so far, but check the cpuid just in case Intel adds it in the future.
*
* MSR_K6_EFER_FFXSR changes the behaviour of fxsave and fxrstore: the XMM state isn't saved/restored
*/
{
/** @todo Do we really need to read this every time?? The host could change this on the fly though. */
if (msrEFERHost & MSR_K6_EFER_FFXSR)
{
/* fxrstor doesn't restore the XMM state! */
}
}
#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
}
Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
return VINF_SUCCESS;
}
/**
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest CPU context.
*/
{
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
if (CPUMIsGuestInLongModeEx(pCtx))
{
{
}
/* else nothing to do; we didn't perform a world switch */
}
else
#endif
{
# ifdef VBOX_WITH_KERNEL_USING_XMM
/*
* We've already saved the XMM registers in the assembly wrapper, so
* we have to save them before saving the entire FPU state and put them
* back afterwards.
*/
/** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
* I'm not able to test such an optimization tonight.
* We could just all this in assembly. */
# endif
/* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
uint64_t oldMsrEFERHost = 0;
{
}
/* Restore EFER MSR */
# ifdef VBOX_WITH_KERNEL_USING_XMM
# endif
#else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
# ifdef VBOX_WITH_KERNEL_USING_XMM
# error "Fix all the NM_TRAPS_IN_KERNEL_MODE code path. I'm not going to fix unused code now."
# endif
{
/* fxsave doesn't save the XMM state! */
}
/*
* Restore the original FPU control word and MXCSR.
*/
#endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */
}
return VINF_SUCCESS;
}
/**
* Save guest debug state
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest CPU context.
* @param fDR6 Whether to include DR6 or not.
*/
{
/* Save the guest's debug state. The caller is responsible for DR7. */
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
if (CPUMIsGuestInLongModeEx(pCtx))
{
{
if (!fDR6) /* dr6 was already up-to-date */
}
}
else
#endif
{
#else
#endif
if (fDR6)
}
/*
* Restore the host's debug state. DR0-3, DR6 and only then DR7!
* DR7 contains 0x400 right now.
*/
return VINF_SUCCESS;
}
/**
* Lazily sync in the debug state
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest CPU context.
* @param fDR6 Whether to include DR6 or not.
*/
{
/* Save the host state. */
/* Activate the guest state DR0-3; DR7 is left to the caller. */
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
if (CPUMIsGuestInLongModeEx(pCtx))
{
/* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
}
else
#endif
{
#else
#endif
if (fDR6)
}
return VINF_SUCCESS;
}
/**
* Save the host debug state
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
{
/* Save the host state. */
AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
#else
#endif
/** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
/* Make sure DR7 is harmless or else we could trigger breakpoints when restoring dr0-3 (!) */
return VINF_SUCCESS;
}
/**
* Load the host debug state
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
*/
{
/*
* Restore the host's debug state. DR0-3, DR6 and only then DR7!
* DR7 contains 0x400 right now.
*/
AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
#else
#endif
return VINF_SUCCESS;
}
/**
* Lazily sync in the hypervisor debug state
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest CPU context.
* @param fDR6 Whether to include DR6 or not.
*/
{
/* Save the host state. */
/* Activate the guest state DR0-3; DR7 is left to the caller. */
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
if (CPUMIsGuestInLongModeEx(pCtx))
{
AssertFailed();
return VERR_NOT_IMPLEMENTED;
}
else
#endif
{
AssertFailed();
return VERR_NOT_IMPLEMENTED;
#else
#endif
if (fDR6)
}
return VINF_SUCCESS;
}
/**
* Worker for cpumR0MapLocalApics. Check each CPU for a present Local APIC.
* Play safe and treat each CPU separate.
*
* @param idCpu The identifier for the CPU the function is called on.
* @param pvUser1 Ignored.
* @param pvUser2 Ignored.
*/
{
if ( ( ( u32EBX == X86_CPUID_VENDOR_INTEL_EBX
&& u32EDX == X86_CPUID_VENDOR_INTEL_EDX)
|| ( u32EBX == X86_CPUID_VENDOR_AMD_EBX
&& u32EDX == X86_CPUID_VENDOR_AMD_EDX)
|| ( u32EBX == X86_CPUID_VENDOR_VIA_EBX
&& u32EDX == X86_CPUID_VENDOR_VIA_EDX))
&& u32MaxIdx >= 1)
{
if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
&& (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
{
/* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
{
u32PhysBits &= 0xff;
}
}
}
}
/**
* Map the MMIO page of each local APIC in the system.
*/
static int cpumR0MapLocalApics(void)
{
/*
* Check that we'll always stay within the array bounds.
*/
{
LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
return VERR_TOO_MANY_CPUS;
}
/*
* Create mappings for all online CPUs we think have APICs.
*/
/** @todo r=bird: This code is not adequately handling CPUs that are
* offline or unplugged at init time and later bought into action. */
{
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
/*
* 0x0X 82489 external APIC
* 0x1X Local APIC
* 0x2X..0xFF reserved
*/
/** @todo r=bird: The local APIC is usually at the same address for all CPUs,
* and therefore inaccessible by the other CPUs. */
{
Log(("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#x, lint0=%#x lint1=%#x pc=%#x thmr=%#x\n",
));
continue;
}
}
}
}
}
if (RT_FAILURE(rc))
{
return rc;
}
return VINF_SUCCESS;
}
/**
* Unmap the Local APIC of all host CPUs.
*/
static void cpumR0UnmapLocalApics(void)
{
{
{
}
}
}
/**
* Write the Local APIC mapping address of the current host CPU to CPUM to be
* able to access the APIC registers in the raw mode switcher for disabling/
* re-enabling the NMI. Must be called with disabled preemption or disabled
* interrupts!
*
* @param pVM Pointer to the VM.
* @param idHostCpu The ID of the current host CPU.
*/
{
}
#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */