CPUMR3Db.cpp revision aae8a6a38fd27661046ab1d06cb2cb5c096c40ed
/* $Id$ */
/** @file
* CPUM - CPU database part.
*/
/*
* Copyright (C) 2013-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_CPUM
#include "CPUMInternal.h"
#include <iprt/asm-amd64-x86.h>
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
typedef struct CPUMDBENTRY
{
/** The CPU name. */
const char *pszName;
/** The full CPU name. */
const char *pszFullName;
/** The CPU vendor (CPUMCPUVENDOR). */
/** The CPU family. */
/** The CPU model. */
/** The CPU stepping. */
/** The microarchitecture. */
/** Scalable bus frequency used for reporting other frequencies. */
/** Flags (TBD). */
/** The maximum physical address with of the CPU. This should correspond to
* the value in CPUID leaf 0x80000008 when present. */
/** Pointer to an array of CPUID leaves. */
/** The number of CPUID leaves in the array paCpuIdLeaves points to. */
/** The method used to deal with unknown CPUID leaves. */
/** The default unknown CPUID value. */
/** MSR mask. Several microarchitectures ignore higher bits of the */
/** The number of ranges in the table pointed to b paMsrRanges. */
/** MSR ranges for this CPU. */
} CPUMDBENTRY;
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
/** @def NULL_ALONE
* For eliminating an unnecessary data dependency in standalone builds (for
* VBoxSVC). */
/** @def ZERO_ALONE
* For eliminating an unnecessary data size dependency in standalone builds (for
* VBoxSVC). */
#ifndef CPUM_DB_STANDALONE
#else
# define ZERO_ALONE(a_cTable) 0
#endif
/** @name Short macros for the MSR range entries.
*
* These are rather cryptic, but this is to reduce the attack on the right
* margin.
*
* @{ */
/** Alias one MSR onto another (a_uTarget). */
/** Functions handles everything. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
/** Functions handles everything, with GP mask. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, a_fWrGpMask, a_szName)
/** Function handlers, read-only. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_ReadOnly, 0, 0, 0, UINT64_MAX, a_szName)
/** Function handlers, ignore all writes. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_IgnoreWrite, 0, 0, UINT64_MAX, 0, a_szName)
/** Function handlers, with value. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, 0, 0, a_szName)
/** Function handlers, with write ignore mask. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, a_fWrIgnMask, 0, a_szName)
/** Function handlers, extended version. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
/** Function handlers, with CPUMCPU storage variable. */
/** Function handlers, with CPUMCPU storage variable, ignore mask and GP mask. */
#define MFZ(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_CpumCpuMember, a_fWrIgnMask, a_fWrGpMask) \
/** Read-only fixed value. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
/** Read-only fixed value, ignores all writes. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
/** Read fixed value, ignore writes outside GP mask. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, 0, a_fWrGpMask, a_szName)
/** Read fixed value, extended version with both GP and ignore masks. */
RINT(a_uMsr, a_uMsr, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, a_fWrIgnMask, a_fWrGpMask, a_szName)
/** The short form, no CPUM backing. */
#define MSN(a_uMsr, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
/** Range: Functions handles everything. */
RINT(a_uFirst, a_uLast, kCpumMsrRdFn_##a_enmRdFnSuff, kCpumMsrWrFn_##a_enmWrFnSuff, 0, 0, 0, 0, a_szName)
/** Range: Read fixed value, read-only. */
RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_ReadOnly, 0, a_uValue, 0, UINT64_MAX, a_szName)
/** Range: Read fixed value, ignore writes. */
RINT(a_uFirst, a_uLast, kCpumMsrRdFn_FixedValue, kCpumMsrWrFn_IgnoreWrite, 0, a_uValue, UINT64_MAX, 0, a_szName)
/** Range: The short form, no CPUM backing. */
#define RSN(a_uFirst, a_uLast, a_szName, a_enmRdFnSuff, a_enmWrFnSuff, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask) \
/** Internal form used by the macros. */
#ifdef VBOX_WITH_STATISTICS
# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
{ a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName, \
{ 0 }, { 0 }, { 0 }, { 0 } }
#else
# define RINT(a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName) \
{ a_uFirst, a_uLast, a_enmRdFn, a_enmWrFn, a_offCpumCpu, 0, a_uInitOrReadValue, a_fWrIgnMask, a_fWrGpMask, a_szName }
#endif
/** @} */
#include "cpus/Intel_Core_i7_3960X.h"
#include "cpus/Intel_Core_i5_3570.h"
#include "cpus/Intel_Core_i7_2635QM.h"
#include "cpus/Intel_Xeon_X5482_3_20GHz.h"
#include "cpus/Intel_Pentium_M_processor_2_00GHz.h"
#include "cpus/Intel_Pentium_4_3_00GHz.h"
#include "cpus/AMD_FX_8150_Eight_Core.h"
#include "cpus/AMD_Phenom_II_X6_1100T.h"
#include "cpus/Quad_Core_AMD_Opteron_2384.h"
#include "cpus/AMD_Athlon_64_X2_Dual_Core_4200.h"
#include "cpus/AMD_Athlon_64_3200.h"
#include "cpus/VIA_QuadCore_L4700_1_2_GHz.h"
/**
* The database entries.
*
* 1. The first entry is special. It is the fallback for unknown
* processors. Thus, it better be pretty representative.
*
* 2. The first entry for a CPU vendor is likewise important as it is
* the default entry for that vendor.
*
* Generally we put the most recent CPUs first, since these tend to have the
* most complicated and backwards compatible list of MSRs.
*/
static CPUMDBENTRY const * const g_apCpumDbEntries[] =
{
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
#endif
};
#ifndef CPUM_DB_STANDALONE
/**
* Binary search used by cpumR3MsrRangesInsert and has some special properties
* wrt to mismatches.
*
* @returns Insert location.
* @param paMsrRanges The MSR ranges to search.
* @param cMsrRanges The number of MSR ranges.
* @param uMsr What to search for.
*/
static uint32_t cpumR3MsrRangesBinSearch(PCCPUMMSRRANGE paMsrRanges, uint32_t cMsrRanges, uint32_t uMsr)
{
if (!cMsrRanges)
return 0;
for (;;)
{
return i;
{
if (i <= iStart)
return i;
iLast = i - 1;
}
else
{
if (i >= iLast)
{
if (i < cMsrRanges)
i++;
return i;
}
iStart = i + 1;
}
}
}
/**
* Ensures that there is space for at least @a cNewRanges in the table,
* reallocating the table if necessary.
*
* @returns Pointer to the MSR ranges on success, NULL on failure. On failure
* @a *ppaMsrRanges is freed and set to NULL.
* @param pVM Pointer to the VM, used as the heap selector.
* Passing NULL uses the host-context heap, otherwise
* the VM's hyper heap is used.
* @param cMsrRanges The current number of ranges.
* @param cNewRanges The number of ranges to be added.
*/
static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
{
if (!pVM)
else
{
/*
* We're using the hyper heap now, but when the range array was copied over to it from
* the host-context heap, we only copy the exact size and not the ensured size.
* See @bugref{7270}.
*/
}
{
void *pvNew;
if (pVM)
{
if (RT_FAILURE(rc))
{
*ppaMsrRanges = NULL;
return NULL;
}
}
else
{
if (!pvNew)
{
*ppaMsrRanges = NULL;
return NULL;
}
}
}
if (pVM)
{
/* Update R0 and RC pointers. */
}
return *ppaMsrRanges;
}
/**
* Inserts a new MSR range in into an sorted MSR range array.
*
* If the new MSR range overlaps existing ranges, the existing ones will be
*
* @returns VBox status code.
* @retval VINF_SUCCESS
* @retval VERR_NO_MEMORY
*
* @param pVM Pointer to the VM, used as the heap selector.
* Passing NULL uses the host-context heap, otherwise
* the hyper heap.
* Must be NULL if using the hyper heap.
* @param pcMsrRanges The variable holding number of ranges. Must be NULL
* if using the hyper heap.
* @param pNewRange The new range.
*/
int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange)
{
/*
* Validate and use the VM's MSR ranges array if we are using the hyper heap.
*/
if (pVM)
{
}
/*
* Optimize the linear insertion case where we add new entries at the end.
*/
if ( cMsrRanges > 0
{
if (!paMsrRanges)
return VERR_NO_MEMORY;
*pcMsrRanges += 1;
}
else
{
/*
* Adding an entirely new entry?
*/
if ( i >= cMsrRanges
{
if (!paMsrRanges)
return VERR_NO_MEMORY;
if (i < cMsrRanges)
paMsrRanges[i] = *pNewRange;
*pcMsrRanges += 1;
}
/*
* Replace existing entry?
*/
paMsrRanges[i] = *pNewRange;
/*
* Splitting an existing entry?
*/
{
if (!paMsrRanges)
return VERR_NO_MEMORY;
if (i < cMsrRanges)
*pcMsrRanges += 2;
}
/*
* Complicated scenarios that can affect more than one range.
*
* The current code does not optimize memmove calls when replacing
* one or more existing ranges, because it's tedious to deal with and
* not expected to be a frequent usage scenario.
*/
else
{
/* Adjust start of first match? */
else
{
/* Adjust end of first match? */
{
i++;
}
/* Replace the whole first match (lazy bird). */
else
{
if (i + 1 < cMsrRanges)
}
/* Do the new range affect more ranges? */
while ( i < cMsrRanges
{
{
/* Adjust the start of it, then we're done. */
break;
}
/* Remove it entirely. */
if (i + 1 < cMsrRanges)
}
}
/* Now, perform a normal insertion. */
if (!paMsrRanges)
return VERR_NO_MEMORY;
if (i < cMsrRanges)
paMsrRanges[i] = *pNewRange;
*pcMsrRanges += 1;
}
}
return VINF_SUCCESS;
}
/**
* Worker for cpumR3MsrApplyFudge that applies one table.
*
* @returns VBox status code.
* @param pVM Pointer to the cross context VM structure.
* @param paRanges Array of MSRs to fudge.
* @param cRanges Number of MSRs in the array.
*/
{
{
int rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,
&paRanges[i]);
if (RT_FAILURE(rc))
return rc;
}
return VINF_SUCCESS;
}
/**
* Fudges the MSRs that guest are known to access in some odd cases.
*
* A typical example is a VM that has been moved between different hosts where
* for instance the cpu vendor differs.
*
* @returns VBox status code.
* @param pVM Pointer to the cross context VM structure.
*/
{
/*
* Basic.
*/
static CPUMMSRRANGE const s_aFudgeMsrs[] =
{
MFZ(0x000002ff, "IA32_MTRR_DEF_TYPE", Ia32MtrrDefType, Ia32MtrrDefType, GuestMsrs.msr.MtrrDefType, 0, ~(uint64_t)0xc07),
MFN(0x00000400, "IA32_MCi_CTL_STATUS_ADDR_MISC", Ia32McCtlStatusAddrMiscN, Ia32McCtlStatusAddrMiscN),
};
/*
* XP might mistake opterons and other newer CPUs for P4s.
*/
{
static CPUMMSRRANGE const s_aP4FudgeMsrs[] =
{
MFX(0x0000002c, "P4_EBC_FREQUENCY_ID", IntelP4EbcFrequencyId, IntelP4EbcFrequencyId, 0xf12010f, UINT64_MAX, 0),
};
}
return rc;
}
{
int rc;
{
/*
* Create a CPU database entry for the host CPU. This means getting
* the CPUID bits from the real CPU and grabbing the closest matching
* database entry for MSRs.
*/
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return rc;
/* Lookup database entry for MSRs. */
CPUMMICROARCH const enmMicroarch = CPUMR3CpuIdDetermineMicroarchEx(enmVendor, uFamily, uModel, uStepping);
for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
{
{
/* Match against Family, Microarch, model and stepping. Except
for family, always match the closer with preference given to
{
{
{
{
/* Perfect match. */
break;
}
if ( !pEntry
}
else if ( !pEntry
}
else if ( !pEntry
}
/* We don't do closeness matching on family, we use the first
entry for the CPU vendor instead. (P4 workaround.) */
else if (!pEntry)
}
}
if (pEntry)
LogRel(("CPUM: Matched host CPU %s %#x/%#x/%#x %s with CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
pEntry->pszName, CPUMR3CpuVendorName((CPUMCPUVENDOR)pEntry->enmVendor), pEntry->uFamily, pEntry->uModel,
else
{
pEntry = g_apCpumDbEntries[0];
LogRel(("CPUM: No matching processor database entry %s %#x/%#x/%#x %s, falling back on '%s'\n",
}
}
else
{
/*
* We're supposed to be emulating a specific CPU that is included in
* our CPU database. The CPUID tables needs to be copied onto the
* heap so the caller can modify them and so they can be freed like
* in the host case above.
*/
for (unsigned i = 0; i < RT_ELEMENTS(g_apCpumDbEntries); i++)
{
pEntry = g_apCpumDbEntries[i];
break;
}
if (!pEntry)
{
return VERR_CPUM_DB_CPU_NOT_FOUND;
}
if (pEntry->cCpuIdLeaves)
{
if (!pInfo->paCpuIdLeavesR3)
return VERR_NO_MEMORY;
}
else
LogRel(("CPUM: Using CPU DB entry '%s' (%s %#x/%#x/%#x %s)\n",
}
/*
* Copy the MSR range.
*/
while (cLeft-- > 0)
{
if (RT_FAILURE(rc))
{
return rc;
}
pCurMsr++;
}
return VINF_SUCCESS;
}
/**
* Insert an MSR range into the VM.
*
* If the new MSR range overlaps existing ranges, the existing ones will be
*
* @returns VBox status code.
* @param pVM Pointer to the cross context VM structure.
* @param pNewRange Pointer to the MSR range being inserted.
*/
{
}
/**
* Register statistics for the MSRs.
*
* This must not be called before the MSRs have been finalized and moved to the
* hyper heap.
*
* @returns VBox status code.
* @param pVM Pointer to the cross context VM structure.
*/
{
/*
* Global statistics.
*/
STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
STAMUNIT_OCCURENCES, "RDMSR raising #GPs, except unknown MSRs.");
STAMUNIT_OCCURENCES, "RDMSR on unknown MSRs (raises #GP).");
STAMUNIT_OCCURENCES, "All RDMSRs making it to CPUM.");
STAMUNIT_OCCURENCES, "WRMSR raising #GPs, except unknown MSRs.");
STAM_REL_REG(pVM, &pCpum->cMsrWritesToIgnoredBits, STAMTYPE_COUNTER, "/CPUM/MSR-Totals/WritesToIgnoredBits",
STAMUNIT_OCCURENCES, "Writing of ignored bits.");
STAMUNIT_OCCURENCES, "WRMSR on unknown MSRs (raises #GP).");
# ifdef VBOX_WITH_STATISTICS
/*
* Per range.
*/
{
char szName[160];
else
STAMR3Register(pVM, &paRanges[i].cReads, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_OCCURENCES, "RDMSR");
STAMR3Register(pVM, &paRanges[i].cWrites, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR");
STAMR3Register(pVM, &paRanges[i].cGps, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "#GPs");
STAMR3Register(pVM, &paRanges[i].cIgnoredBits, STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "WRMSR w/ ignored bits");
}
# endif /* VBOX_WITH_STATISTICS */
return VINF_SUCCESS;
}
#endif /* !CPUM_DB_STANDALONE */