/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Copyright 2012 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/asm_linkage.h>
#include <sys/bootconf.h>
#include <sys/controlregs.h>
#include <sys/kobj_impl.h>
#include <sys/machsystm.h>
#include <sys/machparam.h>
#include <sys/sysmacros.h>
#include <sys/x86_archext.h>
#ifdef __xpv
#include <sys/hypervisor.h>
#endif
/*
* AMD-specific equivalence table
*/
/*
* mcpu_ucode_info for the boot CPU. Statically allocated.
*/
static int ucode_capable_amd(cpu_t *);
static int ucode_capable_intel(cpu_t *);
int);
#ifdef __xpv
static void ucode_load_xpv(ucode_update_t *);
static void ucode_chipset_amd(uint8_t *, int);
#endif
ucode_file_t *);
ucode_file_t *);
#ifndef __xpv
ucode_file_amd_t *, int);
#endif
static void ucode_read_rev_amd(cpu_ucode_info_t *);
static void ucode_read_rev_intel(cpu_ucode_info_t *);
};
};
static const char ucode_failure_fmt[] =
"cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
static const char ucode_success_fmt[] =
"?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
/*
* Force flag. If set, the first microcode binary that matches
* signature and platform id will be used for microcode update,
* regardless of version. Should only be used for debugging.
*/
int ucode_force_update = 0;
/*
* Allocate space for mcpu_ucode_info in the machcpu structure
* for all non-boot CPUs.
*/
void
{
}
void
{
}
/*
* Called when we are done with microcode update on all processors to free up
* space allocated for the microcode file.
*/
void
{
return;
}
/*
* allocated with BOP_ALLOC() and does not require a free.
*/
static void*
{
if (id)
/* BOP_ALLOC() failure results in panic */
}
static void
{
if (id)
}
/*
* Check whether or not a processor is capable of microcode operations
* Returns 1 if it is capable, 0 if not.
*
* At this point we only support microcode update for:
* - Intel processors family 6 and above, and
* - AMD processors family 0x10 and above.
*
* We also assume that we don't support a mix of Intel and
* AMD processors in the same box.
*
* An i86xpv guest domain or VM can't update the microcode.
*/
#define XPVDOMU_OR_HVM \
/*ARGSUSED*/
static int
{
if (XPVDOMU_OR_HVM)
return (0);
}
static int
{
if (XPVDOMU_OR_HVM)
return (0);
}
/*
* Called when it is no longer necessary to keep the microcode around,
* or when the cached microcode doesn't match the CPU being processed.
*/
static void
{
return;
}
static void
{
return;
}
if (ucodefp->uf_ext_table) {
}
}
/*
* Find the equivalent CPU id in the equivalence table.
*/
static int
{
int count;
/*
* No kmem_zalloc() etc. available on boot cpu.
*/
return (EM_OPENFILE);
/* ucode_zalloc() cannot fail on boot cpu */
do {
(void) kobj_close(fd);
return (EM_HIGHERREV);
}
(void) kobj_close(fd);
}
/*
* If not already done, load the equivalence table.
* Not done on boot CPU.
*/
return (EM_OPENFILE);
return (EM_OPENFILE);
}
if (ucode_eqtbl_amd == NULL) {
return (EM_NOMEM);
}
return (EM_FILESIZE);
}
/* Get the equivalent CPU id. */
for (eqtbl = ucode_eqtbl_amd;
eqtbl++)
;
/* No equivalent CPU id found, assume outdated microcode file. */
if (*eq_sig == 0)
return (EM_HIGHERREV);
return (EM_OK);
}
/*
* xVM cannot check for the presence of PCI devices. Look for chipset-
* specific microcode patches in the container file and disable them
* by setting their CPU revision to an invalid value.
*/
#ifdef __xpv
static void
{
int len = 0;
/* skip to first microcode patch */
"chipset id %x, revision %x",
}
"chipset id %x, revision %x",
}
}
}
#endif
/*
* Populate the ucode file structure from microcode file corresponding to
* this CPU, if exists.
*
* Return EM_OK on success, corresponding error code on failure.
*/
/*ARGSUSED*/
static ucode_errno_t
{
#ifndef __xpv
int i;
/* get equivalent CPU id */
return (rc);
/*
* Allocate a buffer for the microcode patch. If the buffer has been
* allocated before, check for a matching microcode to avoid loading
* the file again.
*/
== EM_OK)
return (EM_OK);
return (EM_NOMEM);
/*
* Find the patch for this CPU. The patch files are named XXXX-YY, where
* XXXX is the equivalent CPU id and YY is the running patch number.
* Patches specific to certain chipsets are guaranteed to have lower
* numbers than less specific patches, so we can just load the first
* patch that matches.
*/
for (i = 0; i < 0xff; i++) {
return (EM_NOMATCH);
(void) kobj_close(fd);
return (EM_OK);
}
return (EM_NOMATCH);
#else
int size = 0;
char c;
/*
* The xVM case is special. To support mixed-revision systems, the
* hypervisor will choose which patch to load for which CPU, so the
* whole microcode patch container file will have to be loaded.
*
* Since this code is only run on the boot cpu, we don't have to care
* about failing ucode_zalloc() or freeing allocated memory.
*/
return (EM_INVALIDARG);
return (EM_OPENFILE);
/* get the file size by counting bytes */
do {
} while (count);
/* load the microcode patch container file */
(void) kobj_close(fd);
return (EM_FILESIZE);
/* make sure the container file is valid */
return (rc);
/* disable chipset-specific patches */
return (EM_OK);
#endif
}
static ucode_errno_t
{
int count;
/*
* If the microcode matches the CPU we are processing, use it.
*/
return (EM_OK);
}
/*
* Look for microcode file with the right name.
*/
uinfop->cui_platid);
return (EM_OPENFILE);
}
/*
* We found a microcode file for the CPU we are processing,
* reset the microcode data structure and read in the new
* file.
*/
return (EM_NOMEM);
switch (count) {
case UCODE_HEADER_SIZE_INTEL: {
/*
* Make sure that the header contains valid fields.
*/
break;
}
rc = EM_FILESIZE;
}
if (rc)
break;
rc = EM_CHECKSUM;
break;
}
/*
* Check to see if there is extended signature table.
*/
if (ext_size <= 0)
break;
break;
}
rc = EM_FILESIZE;
} else if (ucode_checksum_intel(0, ext_size,
rc = EM_CHECKSUM;
} else {
int i;
i++) {
if (ucode_checksum_intel(0,
uet_ext_sig[i])))) {
rc = EM_CHECKSUM;
break;
}
}
}
break;
}
default:
rc = EM_FILESIZE;
break;
}
kobj_close(fd);
return (rc);
return (rc);
}
#ifndef __xpv
static ucode_errno_t
{
return (EM_NOMATCH);
/*
* Don't even think about loading patches that would require code
* execution. Does not apply to patches for family 0x14 and beyond.
*/
return (EM_NOMATCH);
return (EM_NOMATCH);
return (EM_NOMATCH);
}
return (EM_NOMATCH);
}
return (EM_HIGHERREV);
return (EM_OK);
}
#endif
/*
* Returns 1 if the microcode is for this processor; 0 otherwise.
*/
static ucode_errno_t
{
return (EM_NOMATCH);
return (EM_HIGHERREV);
return (EM_OK);
}
int i;
return (EM_HIGHERREV);
return (EM_OK);
}
}
}
return (EM_NOMATCH);
}
/*ARGSUSED*/
static int
{
#ifndef __xpv
#endif
#ifndef __xpv
/*
* Check one more time to see if it is really necessary to update
* microcode just in case this is a hyperthreaded processor where
* the threads share the same microcode.
*/
if (!ucode_force_update) {
return (0);
}
no_trap();
#endif
return (0);
}
/*ARGSUSED*/
static uint32_t
{
#ifdef __xpv
#else
#endif
#ifndef __xpv
no_trap();
return (0);
}
no_trap();
#else
#endif
}
/*ARGSUSED2*/
static uint32_t
{
#ifdef __xpv
#endif
#ifdef __xpv
/*
* the hypervisor wants the header, data, and extended
* signature tables. We can only get here from the boot
* CPU (cpu #0), we don't need to free as ucode_zalloc() will
* use BOP_ALLOC().
*/
if (ext_size > 0) {
}
#else
#endif
}
#ifdef __xpv
static void
{
int e;
/*LINTED: constant in conditional context*/
e = HYPERVISOR_platform_op(&op);
if (e != 0) {
}
}
#endif /* __xpv */
static void
{
}
static void
{
/*
* The Intel 64 and IA-32 Architecture Software Developer's Manual
* recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
* execute cpuid to guarantee the correct reading of this register.
*/
wrmsr(MSR_INTC_UCODE_REV, 0);
(void) __cpuid_insn(&crs);
}
static ucode_errno_t
{
#ifndef __xpv
int count;
int higher = 0;
/* skip over magic number & equivalence table header */
eqtbl++)
;
/* No equivalent CPU id found, assume outdated microcode file. */
if (eq_sig == 0)
return (EM_HIGHERREV);
/* Use the first microcode patch that matches. */
do {
if (!size)
if (rc == EM_HIGHERREV)
higher = 1;
#else
/*
* The hypervisor will choose the patch to load, so there is no way to
* know the "expected revision" in advance. This is especially true on
* mixed-revision systems where more than one patch will be loaded.
*/
uusp->expected_rev = 0;
#endif
return (EM_OK);
}
static ucode_errno_t
{
int remaining;
int found = 0;
/*
* Go through the whole buffer in case there are
* multiple versions of matching microcode for this
* processor.
*/
if (ext_size > 0)
uetp = (ucode_ext_table_intel_t *)
/*
* Since we are searching through a big file
* containing microcode for pretty much all the
* processors, we are bound to get EM_NOMATCH
* at one point. However, if we return
* EM_NOMATCH to users, it will really confuse
* them. Therefore, if we ever find a match of
* a lower rev, we will set return code to
* EM_HIGHERREV.
*/
if (tmprc == EM_HIGHERREV)
#ifndef __xpv
#else
#endif
found = 1;
}
remaining -= total_size;
}
if (!found)
return (search_rc);
return (EM_OK);
}
/*
* Entry point to microcode update from the ucode_drv driver.
*
* Returns EM_OK on success, corresponding error code on failure.
*/
{
int found = 0;
return (EM_NOTSUP);
/*
* If there is no such CPU or it is not xcall ready, skip it.
*/
continue;
/*
* If the current CPU has the same signature and platform
* id as the previous one we processed, reuse the information.
*/
/*
* Intuitively we should check here to see whether the
* running microcode rev is >= the expected rev, and
* quit if it is. But we choose to proceed with the
* xcall regardless of the running version so that
* the other threads in an HT processor can update
* the cpu_ucode_info structure in machcpu.
*/
== EM_OK) {
found = 1;
}
/* Nothing to do */
continue;
#ifdef __xpv
/*
* for i86xpv, the hypervisor will update all the CPUs.
* the hypervisor wants the header, data, and extended
* signature tables. ucode_write will just read in the
* updated version on all the CPUs after the update has
* completed.
*/
if (id == 0) {
}
#endif
rc = EM_HIGHERREV;
} else {
}
}
if (!found)
return (rc);
}
/*
* Initialize mcpu_ucode_info, and perform microcode update if necessary.
* This is the entry point from boot path where pointer to CPU structure
* is available.
*
* cpuid_info must be initialized before ucode_check can be called.
*/
void
{
/*
* Space statically allocated for BSP, ensure pointer is set
*/
/* set up function pointers if not already done */
if (!ucode)
switch (cpuid_getvendor(cp)) {
case X86_VENDOR_AMD:
break;
case X86_VENDOR_Intel:
ucode = &ucode_intel;
break;
default:
return;
}
return;
/*
* The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
* (Family 6, model 5 and above) and all processors after.
*/
}
#ifdef __xpv
/*
* for i86xpv, the hypervisor will update all the CPUs. We only need
* do do this on one of the CPUs (and there always is a CPU 0).
*/
return;
}
#endif
/*
* Check to see if we need ucode update
*/
}
/*
* If we fail to find a match for any reason, free the file structure
* just in case we have read in a partial file.
*
* Since the scratch memory for holding the microcode for the boot CPU
* came from BOP_ALLOC, we will reset the data structure as if we
* never did the allocation so we don't have to keep track of this
* special chunk of memory. We free the memory used for the rest
* of the CPUs in start_other_cpus().
*/
}
/*
* Returns microcode revision from the machcpu structure.
*/
{
int i;
return (EM_NOTSUP);
for (i = 0; i < max_ncpus; i++) {
continue;
}
return (EM_OK);
}