CPUMR0.cpp revision 1c91416b9fbb0ed08ea60757d7e68ee2dc55cfe2
/* $Id$ */
/** @file
* CPUM - Host Context Ring 0.
*/
/*
* Copyright (C) 2006-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_CPUM
#include "CPUMInternal.h"
#include <iprt/asm-amd64-x86.h>
#endif
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* Local APIC mappings.
*/
typedef struct CPUMHOSTLAPIC
{
/** Indicates that the entry is in use and have valid data. */
bool fEnabled;
/** Whether it's operating in X2APIC mode (EXTD). */
bool fX2Apic;
/** The APIC version number. */
/** Has APIC_REG_LVT_THMR. Not used. */
/** The physical address of the APIC registers. */
/** The memory object entering the physical address. */
/** The mapping object for hMemObj. */
/** The mapping address APIC registers.
* @remarks Different CPUs may use the same physical address to map their
* APICs, so this pointer is only valid when on the CPU owning the
* APIC. */
void *pv;
#endif
/*******************************************************************************
* Global Variables *
*******************************************************************************/
#endif
/**
* CPUID bits to unify among all cores.
*/
static struct
{
}
const g_aCpuidUnifyBits[] =
{
{
0x00000001,
}
};
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
static int cpumR0MapLocalApics(void);
static void cpumR0UnmapLocalApics(void);
#endif
/**
* Does the Ring-0 CPU initialization once during module load.
* XXX Host-CPU hot-plugging?
*/
VMMR0_INT_DECL(int) CPUMR0ModuleInit(void)
{
int rc = VINF_SUCCESS;
rc = cpumR0MapLocalApics();
#endif
return rc;
}
/**
* Terminate the module.
*/
VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void)
{
#endif
return VINF_SUCCESS;
}
/**
*
*
* Check the CPUID features of this particular CPU and disable relevant features
* for the guest which do not exist on this CPU. We have seen systems where the
* X86_CPUID_FEATURE_ECX_MONITOR feature flag is only set on some host CPUs, see
* @bugref{5436}.
*
* @note This function might be called simultaneously on more than one CPU!
*
* @param idCpu The identifier for the CPU the function is called on.
* @param pvUser1 Pointer to the VM structure.
* @param pvUser2 Ignored.
*/
{
{
/* Note! Cannot use cpumCpuIdGetLeaf from here because we're not
necessarily in the VM process context. So, we using the
legacy arrays as temporary storage. */
else
continue;
}
}
/**
* Does Ring-0 CPUM initialization.
*
* This is mainly to check that the Host CPU mode is compatible
* with VBox.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
{
/*
* Check CR0 & CR4 flags.
*/
if ((u32CR0 & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) /* a bit paranoid perhaps.. */
{
return VERR_UNSUPPORTED_CPU_MODE;
}
/*
* Check for sysenter and syscall usage.
*/
if (ASMHasCpuId())
{
/*
*
* Intel docs claim you should test both the flag and family, model &
* stepping because some Pentium Pro CPUs have the SEP cpuid flag set,
* but don't support it. AMD CPUs may support this feature in legacy
* mode, they've banned it from long mode. Since we switch to 32-bit
* mode when entering raw-mode context the feature would become
* accessible again on AMD CPUs, so we have to check regardless of
* host bitness.
*/
if ( (fFeatures & X86_CPUID_FEATURE_EDX_SEP)
|| u32Model >= 3
|| u32Stepping >= 3
|| !ASMIsIntelCpu())
)
{
/*
* Read the MSR and see if it's in use or not.
*/
if (u32)
{
}
}
/*
*
* This feature is indicated by the SEP bit returned in EDX by CPUID
* function 0x80000001. Intel CPUs only supports this feature in
* long mode. Since we're not running 64-bit guests in raw-mode there
* are no issues with 32-bit intel hosts.
*/
if (ASMIsValidExtRange(cExt))
{
{
#ifdef RT_ARCH_X86
# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
# else
if (!ASMIsIntelCpu())
# endif
#endif
{
if (fEfer & MSR_K6_EFER_SCE)
{
Log(("CPUMR0Init: host uses syscall\n"));
}
}
}
}
/*
* and threads. We've seen CPUs where the monitor support differed.
*
* Because the hyper heap isn't always mapped into ring-0, we cannot
* access it from a RTMpOnAll callback. We use the legacy CPUID arrays
* as temp ring-0 accessible memory instead, ASSUMING that they're all
* up to date when we get here.
*/
{
if (pLeaf)
{
else
continue;
}
}
}
/*
* Check if debug registers are armed.
* This ASSUMES that DR7.GD is not set, or that it's handled transparently!
*/
if (u32DR7 & X86_DR7_ENABLED_MASK)
{
}
return VINF_SUCCESS;
}
/**
* Trap handler for device-not-available fault (#NM).
* Device not available, FP or (F)WAIT instruction.
*
* @returns VBox status code.
* @retval VINF_SUCCESS if the guest FPU state is loaded.
* @retval VINF_EM_RAW_GUEST_TRAP if it is a guest trap.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
{
/* If the FPU state has already been loaded, then it's a guest trap. */
{
|| ((pCtx->cr0 & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS | X86_CR0_EM)));
return VINF_EM_RAW_GUEST_TRAP;
}
/*
* There are two basic actions:
* 1. Save host fpu and restore guest fpu.
* 2. Generate guest trap.
*
* When entering the hypervisor we'll always enable MP (for proper wait
* is taken from the guest OS in order to get proper SSE handling.
*
*
* Actions taken depending on the guest CR0 flags:
*
* 3 2 1
* TS | EM | MP | FPUInstr | WAIT :: VMM Action
* ------------------------------------------------------------------------
* 0 | 0 | 0 | Exec | Exec :: Clear TS & MP, Save HC, Load GC.
* 0 | 0 | 1 | Exec | Exec :: Clear TS, Save HC, Load GC.
* 0 | 1 | 0 | #NM | Exec :: Clear TS & MP, Save HC, Load GC.
* 0 | 1 | 1 | #NM | Exec :: Clear TS, Save HC, Load GC.
* 1 | 0 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
* 1 | 0 | 1 | #NM | #NM :: Go to guest taking trap there.
* 1 | 1 | 0 | #NM | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
* 1 | 1 | 1 | #NM | #NM :: Go to guest taking trap there.
*/
{
case X86_CR0_MP | X86_CR0_TS:
return VINF_EM_RAW_GUEST_TRAP;
default:
break;
}
}
/**
*
* @returns VBox status code.
*
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest-CPU context.
*/
{
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
if (CPUMIsGuestInLongModeEx(pCtx))
{
/* Save the host state and record the fact (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM). */
/* Restore the state on entry as we need to be in 64-bit mode to access the full state. */
}
else
#endif
{
/** @todo Move the FFXR handling down into
* cpumR0SaveHostRestoreGuestFPUState to optimize the
* VBOX_WITH_KERNEL_USING_XMM handling. */
/* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
bool fRestoreEfer = false;
{
/** @todo r=ramshankar: Can't we used a cached value here
* instead of reading the MSR? host EFER doesn't usually
* change. */
if (uHostEfer & MSR_K6_EFER_FFXSR)
{
fRestoreEfer = true;
}
}
/* Do the job and record that we've switched FPU state. */
/* Restore EFER. */
if (fRestoreEfer)
}
Assert((pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)) == (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM));
return VINF_SUCCESS;
}
/**
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param pCtx Pointer to the guest CPU context.
*/
{
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
if (CPUMIsGuestInLongModeEx(pCtx))
{
{
}
/* else nothing to do; we didn't perform a world switch */
}
else
#endif
{
#ifdef VBOX_WITH_KERNEL_USING_XMM
/*
* We've already saved the XMM registers in the assembly wrapper, so
* we have to save them before saving the entire FPU state and put them
* back afterwards.
*/
/** @todo This could be skipped if MSR_K6_EFER_FFXSR is set, but
* I'm not able to test such an optimization tonight.
* We could just all this in assembly. */
#endif
/* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
bool fRestoreEfer = false;
{
if (uHostEfer & MSR_K6_EFER_FFXSR)
{
fRestoreEfer = true;
}
}
/* Restore EFER MSR */
if (fRestoreEfer)
#ifdef VBOX_WITH_KERNEL_USING_XMM
#endif
}
return VINF_SUCCESS;
}
/**
* Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
* DR7 with safe values.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
*/
{
/*
* Save the host state.
*/
AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
#else
#endif
/** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
/* Preemption paranoia. */
/*
* Make sure DR7 is harmless or else we could trigger breakpoints when
* load guest or hypervisor DRx values later.
*/
return VINF_SUCCESS;
}
/**
* Saves the guest DRx state residing in host registers and restore the host
* register values.
*
* The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
* since it's assumed that we're shadowing the guest DRx register values
* accurately when using the combined hypervisor debug register values
* (CPUMR0LoadHyperDebugState).
*
* @returns true if either guest or hypervisor debug registers were loaded.
* @param pVCpu The cross context CPU structure for the calling EMT.
* @param fDr6 Whether to include DR6 or not.
* @thread EMT(pVCpu)
*/
{
bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
/*
* Do we need to save the guest DRx registered loaded into host registers?
* (DR7 and DR6 (if fDr6 is true) are left to the caller.)
*/
{
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
{
if (!fDr6)
}
else
#endif
{
#else
#endif
if (fDr6)
}
}
ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~( CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
/*
* Restore the host's debug state. DR0-3, DR6 and only then DR7!
*/
{
/* A bit of paranoia first... */
if (uCurDR7 != X86_DR7_INIT_VAL)
AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
#else
#endif
/** @todo consider only updating if they differ, esp. DR6. Need to figure how
* expensive DRx reads are over DRx writes. */
}
return fDrXLoaded;
}
/**
* Saves the guest DRx state if it resides host registers.
*
* This does NOT clear any use flags, so the host registers remains loaded with
* the guest DRx state upon return. The purpose is only to make sure the values
* in the CPU context structure is up to date.
*
* @returns true if the host registers contains guest values, false if not.
* @param pVCpu The cross context CPU structure for the calling EMT.
* @param fDr6 Whether to include DR6 or not.
* @thread EMT(pVCpu)
*/
{
/*
* Do we need to save the guest DRx registered loaded into host registers?
* (DR7 and DR6 (if fDr6 is true) are left to the caller.)
*/
{
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
{
if (!fDr6)
}
else
#endif
{
#else
#endif
if (fDr6)
}
return true;
}
return false;
}
/**
* Lazily sync in the debug state.
*
* @param pVCpu The cross context CPU structure for the calling EMT.
* @param fDr6 Whether to include DR6 or not.
* @thread EMT(pVCpu)
*/
{
/*
* Save the host state and disarm all host BPs.
*/
/*
* Activate the guest state DR0-3.
* DR7 and DR6 (if fDr6 is true) are left to the caller.
*/
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
else
#endif
{
#else
#endif
if (fDr6)
}
}
/**
* Lazily sync in the hypervisor debug state
*
* @returns VBox status code.
* @param pVCpu The cross context CPU structure for the calling EMT.
* @param fDr6 Whether to include DR6 or not.
* @thread EMT(pVCpu)
*/
{
/*
* Save the host state and disarm all host BPs.
*/
/*
* Make sure the hypervisor values are up to date.
*/
/*
* Activate the guest state DR0-3.
* DR7 and DR6 (if fDr6 is true) are left to the caller.
*/
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
else
#endif
{
#else
#endif
if (fDr6)
}
}
/**
* Per-CPU callback that probes the CPU for APIC support.
*
* @param idCpu The identifier for the CPU the function is called on.
* @param pvUser1 Ignored.
* @param pvUser2 Ignored.
*/
{
/*
* Check for APIC support.
*/
{
if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC)
&& (u32EDX & X86_CPUID_FEATURE_EDX_MSR))
{
/*
* Safe to access the MSR. Read it and calc the BASE (a little complicated).
*/
/* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */
{
u32PhysBits &= 0xff;
}
}
}
}
/**
* Per-CPU callback that verifies our APIC expectations.
*
* @param idCpu The identifier for the CPU the function is called on.
* @param pvUser1 Ignored.
* @param pvUser2 Ignored.
*/
{
return;
/*
* 0x0X 82489 external APIC
* 0x1X Local APIC
* 0x2X..0xFF reserved
*/
else
{
#if 0 /* enable if you need it. */
SUPR0Printf("CPUM: X2APIC %02u - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x cmci=%#07x\n",
else
SUPR0Printf("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#010x, lint0=%#07x lint1=%#07x pc=%#07x thmr=%#07x cmci=%#07x\n",
ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT0), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT1),
ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_PC), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_THMR),
#endif
}
else
{
}
}
/**
* Map the MMIO page of each local APIC in the system.
*/
static int cpumR0MapLocalApics(void)
{
/*
* Check that we'll always stay within the array bounds.
*/
{
LogRel(("CPUM: Too many real CPUs/cores/threads - %u, max %u\n", RTMpGetArraySize(), RT_ELEMENTS(g_aLApics)));
return VERR_TOO_MANY_CPUS;
}
/*
* Create mappings for all online CPUs we think have legacy APICs.
*/
{
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
continue;
}
}
}
}
/*
* Check the APICs.
*/
if (RT_SUCCESS(rc))
if (RT_FAILURE(rc))
{
return rc;
}
#ifdef LOG_ENABLED
/*
* Log the result (pretty useless, requires enabling CPUM in VBoxDrv
* and !VBOX_WITH_R0_LOGGING).
*/
if (LogIsEnabled())
{
{
cEnabled++;
}
}
#endif
return VINF_SUCCESS;
}
/**
* Unmap the Local APIC of all host CPUs.
*/
static void cpumR0UnmapLocalApics(void)
{
{
{
}
}
}
/**
* Updates CPUMCPU::pvApicBase and CPUMCPU::fX2Apic prior to world switch.
*
* Writes the Local APIC mapping address of the current host CPU to CPUMCPU so
* the world switchers can access the APIC registers for the purpose of
* disabling and re-enabling the NMIs. Must be called with disabled preemption
* or disabled interrupts!
*
* @param pVCpu Pointer to the cross context CPU structure of the
* calling EMT.
* @param idHostCpu The ID of the current host CPU.
*/
{
// Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic));
}
#endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */