us3_cheetah.c revision bb121940c2fe627557326e0143391ace6e6b7372
2N/A/*
2N/A * CDDL HEADER START
2N/A *
2N/A * The contents of this file are subject to the terms of the
2N/A * Common Development and Distribution License (the "License").
2N/A * You may not use this file except in compliance with the License.
2N/A *
2N/A * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
2N/A * or http://www.opensolaris.org/os/licensing.
2N/A * See the License for the specific language governing permissions
2N/A * and limitations under the License.
2N/A *
2N/A * When distributing Covered Code, include this CDDL HEADER in each
2N/A * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
2N/A * If applicable, add the following below this CDDL HEADER, with the
2N/A * fields enclosed by brackets "[]" replaced with your own identifying
2N/A * information: Portions Copyright [yyyy] [name of copyright owner]
2N/A *
2N/A * CDDL HEADER END
2N/A */
2N/A/*
2N/A * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
2N/A * Use is subject to license terms.
2N/A */
2N/A
2N/A#pragma ident "%Z%%M% %I% %E% SMI"
2N/A
2N/A#include <sys/types.h>
2N/A#include <sys/systm.h>
2N/A#include <sys/ddi.h>
2N/A#include <sys/sysmacros.h>
2N/A#include <sys/archsystm.h>
2N/A#include <sys/vmsystm.h>
2N/A#include <sys/machparam.h>
2N/A#include <sys/machsystm.h>
2N/A#include <sys/machthread.h>
2N/A#include <sys/cpu.h>
2N/A#include <sys/cmp.h>
2N/A#include <sys/elf_SPARC.h>
2N/A#include <vm/hat_sfmmu.h>
2N/A#include <vm/seg_kmem.h>
2N/A#include <sys/cpuvar.h>
2N/A#include <sys/cheetahregs.h>
2N/A#include <sys/us3_module.h>
2N/A#include <sys/async.h>
2N/A#include <sys/cmn_err.h>
2N/A#include <sys/debug.h>
2N/A#include <sys/dditypes.h>
2N/A#include <sys/prom_debug.h>
2N/A#include <sys/prom_plat.h>
2N/A#include <sys/cpu_module.h>
2N/A#include <sys/sysmacros.h>
2N/A#include <sys/intreg.h>
2N/A#include <sys/clock.h>
2N/A#include <sys/platform_module.h>
2N/A#include <sys/machtrap.h>
2N/A#include <sys/ontrap.h>
2N/A#include <sys/panic.h>
2N/A#include <sys/memlist.h>
2N/A#include <sys/bootconf.h>
2N/A#include <sys/ivintr.h>
2N/A#include <sys/atomic.h>
2N/A#include <sys/fm/protocol.h>
2N/A#include <sys/fm/cpu/UltraSPARC-III.h>
2N/A#include <vm/vm_dep.h>
2N/A
2N/A#ifdef CHEETAHPLUS_ERRATUM_25
2N/A#include <sys/cyclic.h>
2N/A#endif /* CHEETAHPLUS_ERRATUM_25 */
2N/A
2N/A/*
2N/A * Note that 'Cheetah PRM' refers to:
2N/A * SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III
2N/A */
2N/A
2N/A/*
2N/A * Setup trap handlers.
2N/A */
2N/Avoid
2N/Acpu_init_trap(void)
2N/A{
2N/A CH_SET_TRAP(tt_pil15, ch_pil15_interrupt_instr);
2N/A
2N/A CH_SET_TRAP(tt0_fecc, fecc_err_instr);
2N/A CH_SET_TRAP(tt1_fecc, fecc_err_tl1_instr);
2N/A CH_SET_TRAP(tt1_swtrap0, fecc_err_tl1_cont_instr);
2N/A}
2N/A
2N/Astatic int
2N/Agetintprop(pnode_t node, char *name, int deflt)
2N/A{
2N/A int value;
2N/A
2N/A switch (prom_getproplen(node, name)) {
2N/A case sizeof (int):
2N/A (void) prom_getprop(node, name, (caddr_t)&value);
2N/A break;
2N/A
2N/A default:
2N/A value = deflt;
2N/A break;
2N/A }
2N/A
2N/A return (value);
2N/A}
2N/A
2N/A/*
2N/A * Set the magic constants of the implementation.
2N/A */
2N/A/*ARGSUSED*/
2N/Avoid
2N/Acpu_fiximp(pnode_t dnode)
2N/A{
2N/A int i, a;
2N/A
2N/A static struct {
2N/A char *name;
2N/A int *var;
2N/A int defval;
2N/A } prop[] = {
2N/A "dcache-size", &dcache_size, CH_DCACHE_SIZE,
2N/A "dcache-line-size", &dcache_linesize, CH_DCACHE_LSIZE,
2N/A "icache-size", &icache_size, CH_ICACHE_SIZE,
2N/A "icache-line-size", &icache_linesize, CH_ICACHE_LSIZE,
2N/A "ecache-size", &ecache_size, CH_ECACHE_MAX_SIZE,
2N/A "ecache-line-size", &ecache_alignsize, CH_ECACHE_MAX_LSIZE,
2N/A "ecache-associativity", &ecache_associativity, CH_ECACHE_NWAY
2N/A };
2N/A
2N/A for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++)
2N/A *prop[i].var = getintprop(dnode, prop[i].name, prop[i].defval);
2N/A
2N/A ecache_setsize = ecache_size / ecache_associativity;
2N/A
2N/A vac_size = CH_VAC_SIZE;
2N/A vac_mask = MMU_PAGEMASK & (vac_size - 1);
2N/A i = 0; a = vac_size;
2N/A while (a >>= 1)
2N/A ++i;
2N/A vac_shift = i;
2N/A shm_alignment = vac_size;
2N/A vac = 1;
2N/A
2N/A /*
2N/A * Cheetah's large page support has problems with large numbers of
2N/A * large pages, so just disable large pages out-of-the-box.
2N/A * Note that the other defaults are set in sun4u/vm/mach_vm_dep.c.
2N/A */
2N/A max_uheap_lpsize = MMU_PAGESIZE;
2N/A max_ustack_lpsize = MMU_PAGESIZE;
2N/A max_privmap_lpsize = MMU_PAGESIZE;
2N/A max_utext_lpsize = MMU_PAGESIZE;
2N/A max_shm_lpsize = MMU_PAGESIZE;
2N/A max_bootlp_tteszc = TTE8K;
2N/A}
2N/A
2N/Avoid
2N/Asend_mondo_set(cpuset_t set)
2N/A{
2N/A int lo, busy, nack, shipped = 0;
2N/A uint16_t i, cpuids[IDSR_BN_SETS];
2N/A uint64_t idsr, nackmask = 0, busymask, curnack, curbusy;
2N/A uint64_t starttick, endtick, tick, lasttick;
2N/A#if (NCPU > IDSR_BN_SETS)
2N/A int index = 0;
2N/A int ncpuids = 0;
2N/A#endif
2N/A#ifdef CHEETAHPLUS_ERRATUM_25
2N/A int recovered = 0;
2N/A int cpuid;
2N/A#endif
2N/A
2N/A ASSERT(!CPUSET_ISNULL(set));
2N/A starttick = lasttick = gettick();
2N/A
2N/A#if (NCPU <= IDSR_BN_SETS)
2N/A for (i = 0; i < NCPU; i++)
2N/A if (CPU_IN_SET(set, i)) {
2N/A shipit(i, shipped);
2N/A nackmask |= IDSR_NACK_BIT(shipped);
2N/A cpuids[shipped++] = i;
2N/A CPUSET_DEL(set, i);
2N/A if (CPUSET_ISNULL(set))
2N/A break;
2N/A }
2N/A CPU_STATS_ADDQ(CPU, sys, xcalls, shipped);
2N/A#else
2N/A for (i = 0; i < NCPU; i++)
2N/A if (CPU_IN_SET(set, i)) {
2N/A ncpuids++;
2N/A
2N/A /*
2N/A * Ship only to the first (IDSR_BN_SETS) CPUs. If we
2N/A * find we have shipped to more than (IDSR_BN_SETS)
2N/A * CPUs, set "index" to the highest numbered CPU in
2N/A * the set so we can ship to other CPUs a bit later on.
2N/A */
2N/A if (shipped < IDSR_BN_SETS) {
2N/A shipit(i, shipped);
2N/A nackmask |= IDSR_NACK_BIT(shipped);
2N/A cpuids[shipped++] = i;
2N/A CPUSET_DEL(set, i);
2N/A if (CPUSET_ISNULL(set))
2N/A break;
2N/A } else
2N/A index = (int)i;
2N/A }
2N/A
2N/A CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids);
2N/A#endif
2N/A
2N/A busymask = IDSR_NACK_TO_BUSY(nackmask);
2N/A busy = nack = 0;
2N/A endtick = starttick + xc_tick_limit;
2N/A for (;;) {
2N/A idsr = getidsr();
2N/A#if (NCPU <= IDSR_BN_SETS)
2N/A if (idsr == 0)
2N/A break;
2N/A#else
2N/A if (idsr == 0 && shipped == ncpuids)
2N/A break;
2N/A#endif
2N/A tick = gettick();
2N/A /*
2N/A * If there is a big jump between the current tick
2N/A * count and lasttick, we have probably hit a break
2N/A * point. Adjust endtick accordingly to avoid panic.
2N/A */
2N/A if (tick > (lasttick + xc_tick_jump_limit))
2N/A endtick += (tick - lasttick);
2N/A lasttick = tick;
2N/A if (tick > endtick) {
2N/A if (panic_quiesce)
2N/A return;
2N/A#ifdef CHEETAHPLUS_ERRATUM_25
2N/A cpuid = -1;
2N/A for (i = 0; i < IDSR_BN_SETS; i++) {
2N/A if (idsr & (IDSR_NACK_BIT(i) |
2N/A IDSR_BUSY_BIT(i))) {
2N/A cpuid = cpuids[i];
2N/A break;
2N/A }
2N/A }
2N/A if (cheetah_sendmondo_recover && cpuid != -1 &&
2N/A recovered == 0) {
2N/A if (mondo_recover(cpuid, i)) {
2N/A /*
2N/A * We claimed the whole memory or
2N/A * full scan is disabled.
2N/A */
2N/A recovered++;
2N/A }
2N/A tick = gettick();
2N/A endtick = tick + xc_tick_limit;
2N/A lasttick = tick;
2N/A /*
2N/A * Recheck idsr
2N/A */
2N/A continue;
2N/A } else
2N/A#endif /* CHEETAHPLUS_ERRATUM_25 */
2N/A {
2N/A cmn_err(CE_CONT, "send mondo timeout "
2N/A "[%d NACK %d BUSY]\nIDSR 0x%"
2N/A "" PRIx64 " cpuids:", nack, busy, idsr);
2N/A for (i = 0; i < IDSR_BN_SETS; i++) {
2N/A if (idsr & (IDSR_NACK_BIT(i) |
2N/A IDSR_BUSY_BIT(i))) {
2N/A cmn_err(CE_CONT, " 0x%x",
2N/A cpuids[i]);
2N/A }
2N/A }
2N/A cmn_err(CE_CONT, "\n");
2N/A cmn_err(CE_PANIC, "send_mondo_set: timeout");
2N/A }
2N/A }
2N/A curnack = idsr & nackmask;
2N/A curbusy = idsr & busymask;
2N/A#if (NCPU > IDSR_BN_SETS)
2N/A if (shipped < ncpuids) {
2N/A uint64_t cpus_left;
2N/A uint16_t next = (uint16_t)index;
2N/A
2N/A cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) &
2N/A busymask;
2N/A
2N/A if (cpus_left) {
2N/A do {
2N/A /*
2N/A * Sequence through and ship to the
2N/A * remainder of the CPUs in the system
2N/A * (e.g. other than the first
2N/A * (IDSR_BN_SETS)) in reverse order.
2N/A */
2N/A lo = lowbit(cpus_left) - 1;
2N/A i = IDSR_BUSY_IDX(lo);
2N/A shipit(next, i);
2N/A shipped++;
2N/A cpuids[i] = next;
2N/A
2N/A /*
2N/A * If we've processed all the CPUs,
2N/A * exit the loop now and save
2N/A * instructions.
2N/A */
2N/A if (shipped == ncpuids)
2N/A break;
2N/A
2N/A for ((index = ((int)next - 1));
2N/A index >= 0; index--)
2N/A if (CPU_IN_SET(set, index)) {
2N/A next = (uint16_t)index;
2N/A break;
2N/A }
2N/A
2N/A cpus_left &= ~(1ull << lo);
2N/A } while (cpus_left);
2N/A#ifdef CHEETAHPLUS_ERRATUM_25
2N/A /*
2N/A * Clear recovered because we are sending to
2N/A * a new set of targets.
2N/A */
2N/A recovered = 0;
2N/A#endif
2N/A continue;
2N/A }
2N/A }
2N/A#endif
2N/A if (curbusy) {
2N/A busy++;
2N/A continue;
2N/A }
2N/A
2N/A#ifdef SEND_MONDO_STATS
2N/A {
2N/A int n = gettick() - starttick;
2N/A if (n < 8192)
2N/A x_nack_stimes[n >> 7]++;
2N/A }
2N/A#endif
2N/A while (gettick() < (tick + sys_clock_mhz))
2N/A ;
2N/A do {
2N/A lo = lowbit(curnack) - 1;
2N/A i = IDSR_NACK_IDX(lo);
2N/A shipit(cpuids[i], i);
2N/A curnack &= ~(1ull << lo);
2N/A } while (curnack);
2N/A nack++;
2N/A busy = 0;
2N/A }
2N/A#ifdef SEND_MONDO_STATS
2N/A {
2N/A int n = gettick() - starttick;
2N/A if (n < 8192)
2N/A x_set_stimes[n >> 7]++;
2N/A else
2N/A x_set_ltimes[(n >> 13) & 0xf]++;
2N/A }
2N/A x_set_cpus[shipped]++;
2N/A#endif
2N/A}
2N/A
2N/A/*
2N/A * Handles error logging for implementation specific error types.
2N/A */
2N/A/*ARGSUSED*/
2N/Aint
2N/Acpu_impl_async_log_err(void *flt, errorq_elem_t *eqep)
2N/A{
2N/A /* There aren't any error types which are specific to cheetah only */
2N/A return (CH_ASYNC_LOG_UNKNOWN);
2N/A}
2N/A
2N/A/*
2N/A * Figure out if Ecache is direct-mapped (Cheetah or Cheetah+ with Ecache
2N/A * control ECCR_ASSOC bit off or 2-way (Cheetah+ with ECCR_ASSOC on).
2N/A * We need to do this on the fly because we may have mixed Cheetah+'s with
2N/A * both direct and 2-way Ecaches.
2N/A */
2N/Aint
2N/Acpu_ecache_nway(void)
2N/A{
2N/A return (CH_ECACHE_NWAY);
2N/A}
2N/A
2N/A/*
2N/A * Note that these are entered into the table: Fatal Errors (PERR, IERR,
2N/A * ISAP, EMU) first, orphaned UCU/UCC, AFAR Overwrite policy, finally IVU, IVC.
2N/A * Afar overwrite policy is:
2N/A * UCU,UCC > UE,EDU,WDU,CPU > CE,EDC,EMC,WDC,CPC > TO,BERR
2N/A */
2N/Aecc_type_to_info_t ecc_type_to_info[] = {
2N/A
2N/A /* Fatal Errors */
2N/A C_AFSR_PERR, "PERR ", ECC_ALL_TRAPS, CPU_FATAL,
2N/A "PERR Fatal",
2N/A FM_EREPORT_PAYLOAD_SYSTEM2,
2N/A FM_EREPORT_CPU_USIII_PERR,
2N/A C_AFSR_IERR, "IERR ", ECC_ALL_TRAPS, CPU_FATAL,
2N/A "IERR Fatal",
2N/A FM_EREPORT_PAYLOAD_SYSTEM2,
2N/A FM_EREPORT_CPU_USIII_IERR,
2N/A C_AFSR_ISAP, "ISAP ", ECC_ALL_TRAPS, CPU_FATAL,
2N/A "ISAP Fatal",
2N/A FM_EREPORT_PAYLOAD_SYSTEM1,
2N/A FM_EREPORT_CPU_USIII_ISAP,
2N/A C_AFSR_EMU, "EMU ", ECC_ASYNC_TRAPS, CPU_FATAL,
2N/A "EMU Fatal",
2N/A FM_EREPORT_PAYLOAD_MEMORY,
2N/A FM_EREPORT_CPU_USIII_EMU,
2N/A
2N/A /* Orphaned UCC/UCU Errors */
2N/A C_AFSR_UCU, "OUCU ", ECC_ORPH_TRAPS, CPU_ORPH,
2N/A "Orphaned UCU",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_UCU,
2N/A C_AFSR_UCC, "OUCC ", ECC_ORPH_TRAPS, CPU_ORPH,
2N/A "Orphaned UCC",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_UCC,
2N/A
2N/A /* UCU, UCC */
2N/A C_AFSR_UCU, "UCU ", ECC_F_TRAP, CPU_UE_ECACHE,
2N/A "UCU",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_UCU,
2N/A C_AFSR_UCC, "UCC ", ECC_F_TRAP, CPU_CE_ECACHE,
2N/A "UCC",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_UCC,
2N/A
2N/A /* UE, EDU:ST, EDU:BLD, WDU, CPU */
2N/A C_AFSR_UE, "UE ", ECC_ASYNC_TRAPS, CPU_UE,
2N/A "Uncorrectable system bus (UE)",
2N/A FM_EREPORT_PAYLOAD_MEMORY,
2N/A FM_EREPORT_CPU_USIII_UE,
2N/A C_AFSR_EDU, "EDU ", ECC_C_TRAP, CPU_UE_ECACHE_RETIRE,
2N/A "EDU:ST",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_EDUST,
2N/A C_AFSR_EDU, "EDU ", ECC_D_TRAP, CPU_UE_ECACHE_RETIRE,
2N/A "EDU:BLD",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_EDUBL,
2N/A C_AFSR_WDU, "WDU ", ECC_C_TRAP, CPU_UE_ECACHE_RETIRE,
2N/A "WDU",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_WDU,
2N/A C_AFSR_CPU, "CPU ", ECC_C_TRAP, CPU_UE_ECACHE,
2N/A "CPU",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_CPU,
2N/A
2N/A /* CE, EDC, EMC, WDC, CPC */
2N/A C_AFSR_CE, "CE ", ECC_C_TRAP, CPU_CE,
2N/A "Corrected system bus (CE)",
2N/A FM_EREPORT_PAYLOAD_MEMORY,
2N/A FM_EREPORT_CPU_USIII_CE,
2N/A C_AFSR_EDC, "EDC ", ECC_C_TRAP, CPU_CE_ECACHE,
2N/A "EDC",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_EDC,
2N/A C_AFSR_EMC, "EMC ", ECC_C_TRAP, CPU_EMC,
2N/A "EMC",
2N/A FM_EREPORT_PAYLOAD_MEMORY,
2N/A FM_EREPORT_CPU_USIII_EMC,
2N/A C_AFSR_WDC, "WDC ", ECC_C_TRAP, CPU_CE_ECACHE,
2N/A "WDC",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_WDC,
2N/A C_AFSR_CPC, "CPC ", ECC_C_TRAP, CPU_CE_ECACHE,
2N/A "CPC",
2N/A FM_EREPORT_PAYLOAD_L2_DATA,
2N/A FM_EREPORT_CPU_USIII_CPC,
2N/A
2N/A /* TO, BERR */
2N/A C_AFSR_TO, "TO ", ECC_ASYNC_TRAPS, CPU_TO,
2N/A "Timeout (TO)",
2N/A FM_EREPORT_PAYLOAD_IO,
2N/A FM_EREPORT_CPU_USIII_TO,
2N/A C_AFSR_BERR, "BERR ", ECC_ASYNC_TRAPS, CPU_BERR,
2N/A "Bus Error (BERR)",
2N/A FM_EREPORT_PAYLOAD_IO,
2N/A FM_EREPORT_CPU_USIII_BERR,
2N/A
2N/A /* IVU, IVC */
2N/A C_AFSR_IVU, "IVU ", ECC_C_TRAP, CPU_IV,
2N/A "IVU",
2N/A FM_EREPORT_PAYLOAD_SYSTEM1,
2N/A FM_EREPORT_CPU_USIII_IVU,
2N/A C_AFSR_IVC, "IVC ", ECC_C_TRAP, CPU_IV,
2N/A "IVC",
2N/A FM_EREPORT_PAYLOAD_SYSTEM1,
2N/A FM_EREPORT_CPU_USIII_IVC,
2N/A
2N/A 0, NULL, 0, 0,
2N/A NULL,
2N/A FM_EREPORT_PAYLOAD_UNKNOWN,
2N/A FM_EREPORT_CPU_USIII_UNKNOWN,
2N/A};
2N/A
2N/A/*
2N/A * Prioritized list of Error bits for AFAR overwrite.
2N/A * See Cheetah PRM P.6.1
2N/A * Class 4: UCC, UCU
2N/A * Class 3: UE, EDU, EMU, WDU, CPU
2N/A * Class 2: CE, EDC, EMC, WDC, CPC
2N/A * Class 1: TO, BERR
2N/A */
2N/Auint64_t afar_overwrite[] = {
2N/A C_AFSR_UCC | C_AFSR_UCU,
2N/A C_AFSR_UE | C_AFSR_EDU | C_AFSR_EMU | C_AFSR_WDU | C_AFSR_CPU,
2N/A C_AFSR_CE | C_AFSR_EDC | C_AFSR_EMC | C_AFSR_WDC | C_AFSR_CPC,
2N/A C_AFSR_TO | C_AFSR_BERR,
2N/A 0
2N/A};
2N/A
2N/A/*
2N/A * Prioritized list of Error bits for ESYND overwrite.
2N/A * See Cheetah PRM P.6.2
2N/A * Class 2: UE, IVU, EDU, WDU, UCU, CPU
2N/A * Class 1: CE, IVC, EDC, WDC, UCC, CPC
2N/A */
2N/Auint64_t esynd_overwrite[] = {
2N/A C_AFSR_UE | C_AFSR_IVU | C_AFSR_EDU | C_AFSR_WDU | C_AFSR_UCU |
2N/A C_AFSR_CPU,
2N/A C_AFSR_CE | C_AFSR_IVC | C_AFSR_EDC | C_AFSR_WDC | C_AFSR_UCC |
2N/A C_AFSR_CPC,
2N/A 0
2N/A};
2N/A
2N/A/*
2N/A * Prioritized list of Error bits for MSYND overwrite.
2N/A * See Cheetah PRM P.6.3
2N/A * Class 2: EMU
2N/A * Class 1: EMC
2N/A */
2N/Auint64_t msynd_overwrite[] = {
2N/A C_AFSR_EMU,
2N/A C_AFSR_EMC,
2N/A 0
2N/A};
2N/A
2N/A/*
2N/A * change cpu speed bits -- new speed will be normal-speed/divisor.
2N/A *
2N/A * The Jalapeno memory controllers are required to drain outstanding
2N/A * memory transactions within 32 JBus clocks in order to be ready
2N/A * to enter Estar mode. In some corner cases however, that time
2N/A * fell short.
2N/A *
2N/A * A safe software solution is to force MCU to act like in Estar mode,
2N/A * then delay 1us (in ppm code) prior to assert J_CHNG_L signal.
2N/A * To reverse the effect, upon exiting Estar, software restores the
2N/A * MCU to its original state.
2N/A */
2N/A/* ARGSUSED1 */
2N/Avoid
2N/Acpu_change_speed(uint64_t divisor, uint64_t arg2)
2N/A{
2N/A bus_config_eclk_t *bceclk;
2N/A uint64_t reg;
2N/A
2N/A for (bceclk = bus_config_eclk; bceclk->divisor; bceclk++) {
2N/A if (bceclk->divisor != divisor)
2N/A continue;
2N/A reg = get_safari_config();
2N/A reg &= ~SAFARI_CONFIG_ECLK_MASK;
2N/A reg |= bceclk->mask;
2N/A set_safari_config(reg);
2N/A CPU->cpu_m.divisor = (uchar_t)divisor;
2N/A return;
2N/A }
2N/A /*
2N/A * We will reach here only if OBP and kernel don't agree on
2N/A * the speeds supported by the CPU.
2N/A */
2N/A cmn_err(CE_WARN, "cpu_change_speed: bad divisor %" PRIu64, divisor);
2N/A}
2N/A
2N/A/*
2N/A * Cpu private initialization. This includes allocating the cpu_private
2N/A * data structure, initializing it, and initializing the scrubber for this
2N/A * cpu. This function calls cpu_init_ecache_scrub_dr to init the scrubber.
2N/A * We use kmem_cache_create for the cheetah private data structure because
2N/A * it needs to be allocated on a PAGESIZE (8192) byte boundary.
2N/A */
2N/Avoid
2N/Acpu_init_private(struct cpu *cp)
2N/A{
2N/A cheetah_private_t *chprp;
2N/A int i;
2N/A
2N/A ASSERT(CPU_PRIVATE(cp) == NULL);
2N/A
2N/A /* LINTED: E_TRUE_LOGICAL_EXPR */
2N/A ASSERT((offsetof(cheetah_private_t, chpr_tl1_err_data) +
2N/A sizeof (ch_err_tl1_data_t) * CH_ERR_TL1_TLMAX) <= PAGESIZE);
2N/A
2N/A /*
2N/A * Running with a Cheetah+, Jaguar, or Panther on a Cheetah CPU
2N/A * machine is not a supported configuration. Attempting to do so
2N/A * may result in unpredictable failures (e.g. running Cheetah+
2N/A * CPUs with Cheetah E$ disp flush) so don't allow it.
2N/A *
2N/A * This is just defensive code since this configuration mismatch
2N/A * should have been caught prior to OS execution.
2N/A */
2N/A if (!IS_CHEETAH(cpunodes[cp->cpu_id].implementation)) {
2N/A cmn_err(CE_PANIC, "CPU%d: UltraSPARC-III+/IV/IV+ not"
2N/A " supported on UltraSPARC-III code\n", cp->cpu_id);
2N/A }
2N/A
2N/A /*
2N/A * If the ch_private_cache has not been created, create it.
2N/A */
2N/A if (ch_private_cache == NULL) {
2N/A ch_private_cache = kmem_cache_create("ch_private_cache",
2N/A sizeof (cheetah_private_t), PAGESIZE, NULL, NULL,
2N/A NULL, NULL, static_arena, 0);
2N/A }
2N/A
2N/A chprp = CPU_PRIVATE(cp) = kmem_cache_alloc(ch_private_cache, KM_SLEEP);
2N/A
2N/A bzero(chprp, sizeof (cheetah_private_t));
2N/A chprp->chpr_fecctl0_logout.clo_data.chd_afar = LOGOUT_INVALID;
2N/A chprp->chpr_cecc_logout.clo_data.chd_afar = LOGOUT_INVALID;
2N/A chprp->chpr_async_logout.clo_data.chd_afar = LOGOUT_INVALID;
2N/A for (i = 0; i < CH_ERR_TL1_TLMAX; i++)
2N/A chprp->chpr_tl1_err_data[i].ch_err_tl1_logout.clo_data.chd_afar
2N/A = LOGOUT_INVALID;
2N/A
2N/A chprp->chpr_icache_size = CH_ICACHE_SIZE;
2N/A chprp->chpr_icache_linesize = CH_ICACHE_LSIZE;
2N/A
2N/A cpu_init_ecache_scrub_dr(cp);
2N/A
2N/A chprp->chpr_ec_set_size = cpunodes[cp->cpu_id].ecache_size /
2N/A cpu_ecache_nway();
2N/A
2N/A adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size);
ch_err_tl1_paddrs[cp->cpu_id] = va_to_pa(chprp);
ASSERT(ch_err_tl1_paddrs[cp->cpu_id] != -1);
}
/*
* Clear the error state registers for this CPU.
* For Cheetah, just clear the AFSR
*/
void
set_cpu_error_state(ch_cpu_errors_t *cpu_error_regs)
{
set_asyncflt(cpu_error_regs->afsr & ~C_AFSR_FATAL_ERRS);
}
/*
* For Cheetah, the error recovery code uses an alternate flush area in the
* TL>0 fast ECC handler. ecache_tl1_flushaddr is the physical address of
* this exclusive displacement flush area.
*/
uint64_t ecache_tl1_flushaddr = (uint64_t)-1; /* physaddr for E$ flushing */
/*
* Allocate and initialize the exclusive displacement flush area.
* Called twice. The first time allocates virtual address. The second
* call looks up the physical address.
*/
caddr_t
ecache_init_scrub_flush_area(caddr_t alloc_base)
{
static caddr_t ecache_tl1_virtaddr;
if (alloc_base != NULL) {
/*
* Need to allocate an exclusive flush area that is twice the
* largest supported E$ size, physically contiguous, and
* aligned on twice the largest E$ size boundary.
*/
unsigned size = 2 * CH_ECACHE_8M_SIZE;
caddr_t va = (caddr_t)roundup((uintptr_t)alloc_base, size);
ecache_tl1_virtaddr = va;
alloc_base = va + size;
} else {
/*
* Get the physical address of the exclusive flush area.
*/
ASSERT(ecache_tl1_virtaddr != NULL);
ecache_tl1_flushaddr = va_to_pa(ecache_tl1_virtaddr);
ASSERT(ecache_tl1_flushaddr != ((uint64_t)-1));
}
return (alloc_base);
}
/*
* Update cpu_offline_set so the scrubber knows which cpus are offline
*/
/*ARGSUSED*/
int
cpu_scrub_cpu_setup(cpu_setup_t what, int cpuid, void *arg)
{
switch (what) {
case CPU_ON:
case CPU_INIT:
CPUSET_DEL(cpu_offline_set, cpuid);
break;
case CPU_OFF:
CPUSET_ADD(cpu_offline_set, cpuid);
break;
default:
break;
}
return (0);
}