opl_olympus.c revision febcc4a52c3ed7fe3a106da2c2ba52c56afd5111
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Support for Olympus-C (SPARC64-VI) and Jupiter (SPARC64-VII).
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <sys/archsystm.h>
#include <sys/machparam.h>
#include <sys/machsystm.h>
#include <sys/machthread.h>
#include <sys/elf_SPARC.h>
#include <vm/hat_sfmmu.h>
#include <vm/seg_kmem.h>
#include <sys/opl_olympus_regs.h>
#include <sys/opl_module.h>
#include <sys/dditypes.h>
#include <sys/cpu_module.h>
#include <sys/sysmacros.h>
#include <sys/platform_module.h>
#include <sys/watchpoint.h>
/*
* Internal functions.
*/
static int cpu_sync_log_err(void *flt);
static int prom_SPARC64VII_support_enabled(void);
static void opl_ta3();
static int plat_prom_preserve_kctx_is_supported(void);
/*
* Error counters resetting interval.
*/
/*
* PA[22:0] represent Displacement in Jupiter
* configuration space.
*/
/*
*/
int cpu_berr_to_verbose = 0;
/*
* Set to 1 if booted with all Jupiter cpus (all-Jupiter features enabled).
*/
int cpu_alljupiter = 0;
static int min_ecache_size;
static uint_t priv_hcl_1;
static uint_t priv_hcl_2;
static uint_t priv_hcl_4;
static uint_t priv_hcl_8;
/*
* Olympus error log
*/
static opl_errlog_t *opl_err_log;
/*
* OPL ta 3 save area.
*/
char *opl_ta3_save;
/*
* UE is classified into four classes (MEM, CHANNEL, CPU, PATH).
* No any other ecc_type_info insertion is allowed in between the following
* four UE classess.
*/
"Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC,
"Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC,
"Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC,
"Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC,
"Bus Error", FM_EREPORT_PAYLOAD_SYNC,
"Bus Timeout", FM_EREPORT_PAYLOAD_SYNC,
"TLB MultiHit", FM_EREPORT_PAYLOAD_SYNC,
"TLB Parity", FM_EREPORT_PAYLOAD_SYNC,
"IAUG CRE", FM_EREPORT_PAYLOAD_URGENT,
UGESR_IAUG_TSBCTXT, "IAUG_TSBCTXT",
"IAUG TSBCTXT", FM_EREPORT_PAYLOAD_URGENT,
"IUG TSBP", FM_EREPORT_PAYLOAD_URGENT,
"IUG PSTATE", FM_EREPORT_PAYLOAD_URGENT,
"IUG TSTATE", FM_EREPORT_PAYLOAD_URGENT,
"IUG FREG", FM_EREPORT_PAYLOAD_URGENT,
"IUG RREG", FM_EREPORT_PAYLOAD_URGENT,
"AUG SDC", FM_EREPORT_PAYLOAD_URGENT,
"IUG WDT", FM_EREPORT_PAYLOAD_URGENT,
"IUG DTLB", FM_EREPORT_PAYLOAD_URGENT,
"IUG ITLB", FM_EREPORT_PAYLOAD_URGENT,
UGESR_IUG_COREERR, "IUG_COREERR",
"IUG COREERR", FM_EREPORT_PAYLOAD_URGENT,
"MULTI DAE", FM_EREPORT_PAYLOAD_URGENT,
"MULTI IAE", FM_EREPORT_PAYLOAD_URGENT,
"MULTI UGE", FM_EREPORT_PAYLOAD_URGENT,
0, NULL, 0, 0,
NULL, 0, 0,
};
/*
* Setup trap handlers for 0xA, 0x32, 0x40 trap types
* and "ta 3" and "ta 4".
*/
void
cpu_init_trap(void)
{
}
static int
{
int value;
case sizeof (int):
break;
default:
break;
}
return (value);
}
/*
* Set the magic constants of the implementation.
*/
/*ARGSUSED*/
void
{
int i, a;
static struct {
char *name;
int *var;
int defval;
} prop[] = {
};
i = 0; a = vac_size;
while (a >>= 1)
++i;
vac_shift = i;
vac = 1;
}
/*
* Enable features for Jupiter-only domains.
*/
void
cpu_fix_alljupiter(void)
{
if (!prom_SPARC64VII_support_enabled()) {
/*
* Do not enable all-Jupiter features and do not turn on
* the cpu_alljupiter flag.
*/
return;
}
cpu_alljupiter = 1;
/*
* Enable ima hwcap for Jupiter-only domains. DR will prevent
* addition of Olympus-C to all-Jupiter domains to preserve ima
* hwcap semantics.
*/
}
#ifdef OLYMPUS_C_REV_B_ERRATA_XCALL
/*
* Quick and dirty way to redefine locally in
* OPL the value of IDSR_BN_SETS to 31 instead
* of the standard 32 value. This is to workaround
* REV_B of Olympus_c processor's problem in handling
* more than 31 xcall broadcast.
*/
#define IDSR_BN_SETS 31
#endif /* OLYMPUS_C_REV_B_ERRATA_XCALL */
void
{
#if (NCPU > IDSR_BN_SETS)
int index = 0;
int ncpuids = 0;
#endif
#ifdef OLYMPUS_C_REV_A_ERRATA_XCALL
int bn_sets = IDSR_BN_SETS;
#endif
#ifdef OLYMPUS_C_REV_A_ERRATA_XCALL
ver = ultra_getver();
bn_sets = 1;
#endif
#if (NCPU <= IDSR_BN_SETS)
for (i = 0; i < NCPU; i++)
if (CPU_IN_SET(set, i)) {
CPUSET_DEL(set, i);
if (CPUSET_ISNULL(set))
break;
}
#else
for (i = 0; i < NCPU; i++)
if (CPU_IN_SET(set, i)) {
ncpuids++;
/*
* Ship only to the first (IDSR_BN_SETS) CPUs. If we
* find we have shipped to more than (IDSR_BN_SETS)
* CPUs, set "index" to the highest numbered CPU in
* the set so we can ship to other CPUs a bit later on.
*/
#ifdef OLYMPUS_C_REV_A_ERRATA_XCALL
#else
if (shipped < IDSR_BN_SETS) {
#endif
CPUSET_DEL(set, i);
if (CPUSET_ISNULL(set))
break;
} else
index = (int)i;
}
#endif
for (;;) {
#if (NCPU <= IDSR_BN_SETS)
if (idsr == 0)
break;
#else
break;
#endif
/*
* If there is a big jump between the current tick
* count and lasttick, we have probably hit a break
* point. Adjust endtick accordingly to avoid panic.
*/
if (panic_quiesce)
return;
#ifdef OLYMPUS_C_REV_A_ERRATA_XCALL
for (i = 0; i < bn_sets; i++) {
#else
for (i = 0; i < IDSR_BN_SETS; i++) {
#endif
if (idsr & (IDSR_NACK_BIT(i) |
IDSR_BUSY_BIT(i))) {
}
}
}
#ifdef OLYMPUS_C_REV_B_ERRATA_XCALL
/*
* Only proceed to send more xcalls if all the
* cpus in the previous IDSR_BN_SETS were completed.
*/
if (curbusy) {
busy++;
continue;
}
#endif /* OLYMPUS_C_REV_B_ERRATA_XCALL */
#if (NCPU > IDSR_BN_SETS)
if (cpus_left) {
do {
/*
* Sequence through and ship to the
* remainder of the CPUs in the system
* (e.g. other than the first
* (IDSR_BN_SETS)) in reverse order.
*/
i = IDSR_BUSY_IDX(lo);
shipped++;
/*
* If we've processed all the CPUs,
* exit the loop now and save
* instructions.
*/
break;
break;
}
} while (cpus_left);
continue;
}
}
#endif
#ifndef OLYMPUS_C_REV_B_ERRATA_XCALL
if (curbusy) {
busy++;
continue;
}
#endif /* OLYMPUS_C_REV_B_ERRATA_XCALL */
#ifdef SEND_MONDO_STATS
{
if (n < 8192)
x_nack_stimes[n >> 7]++;
}
#endif
;
do {
i = IDSR_NACK_IDX(lo);
} while (curnack);
nack++;
busy = 0;
}
#ifdef SEND_MONDO_STATS
{
if (n < 8192)
x_set_stimes[n >> 7]++;
else
}
x_set_cpus[shipped]++;
#endif
}
/*
* Cpu private initialization.
*/
void
{
}
}
void
cpu_setup(void)
{
extern int at_flags;
extern int cpc_has_overflow_intr;
extern uint64_t opl_cpu0_err_log;
/*
* Initialize Error log Scratch register for error handling.
*/
/*
* Enable MMU translating multiple page sizes for
* sITLB and sDTLB.
*/
/*
* Setup chip-specific trap handlers.
*/
/*
* Due to the number of entries in the fully-associative tlb
* this may have to be tuned lower than in spitfire.
*/
/*
* Block stores do not invalidate all pages of the d$, pagecopy
* et. al. need virtual translations with virtual coloring taken
* load side.
*/
if (use_page_coloring) {
do_pg_coloring = 1;
}
isa_list =
"sparcv9+vis2 sparcv9+vis sparcv9 "
"sparcv8plus+vis2 sparcv8plus+vis sparcv8plus "
"sparcv8 sparcv8-fsmuld sparcv7 sparc";
/*
* On SPARC64-VI, there's no hole in the virtual address space
*/
hole_start = hole_end = 0;
/*
* The kpm mapping window.
* kpm_size:
* The size of a single kpm range.
* The overall size will be: kpm_size * vac_colors.
* kpm_vbase:
* The virtual start address of the kpm range within the kernel
* virtual address space. kpm_vbase has to be kpm_size aligned.
*/
kpm_size_shift = 47;
kpm_smallpages = 1;
/*
* The traptrace code uses either %tick or %stick for
* timestamping. We have %stick so we can use it.
*/
traptrace_use_stick = 1;
/*
* SPARC64-VI has a performance counter overflow interrupt
*/
/*
* Declare that this architecture/cpu combination does not support
* fpRAS.
*/
fpras_implemented = 0;
}
/*
* Called by setcpudelay
*/
void
cpu_init_tick_freq(void)
{
/*
* For SPARC64-VI we want to use the system clock rate as
* the basis for low level timing, due to support of mixed
* speed CPUs and power managment.
*/
if (system_clock_freq == 0)
}
#ifdef SEND_MONDO_STATS
#endif
/*
* Note: A version of this function is used by the debugger via the KDI,
* and must be kept in sync with this version. Any changes made to this
* function to support new chips or to accomodate errata must also be included
* in the KDI-specific version. See us3_kdi.c.
*/
void
send_one_mondo(int cpuid)
{
for (;;) {
if (idsr == 0)
break;
/*
* If there is a big jump between the current tick
* count and lasttick, we have probably hit a break
* point. Adjust endtick accordingly to avoid panic.
*/
if (panic_quiesce)
return;
}
busy++;
continue;
}
drv_usecwait(1);
nack++;
busy = 0;
}
#ifdef SEND_MONDO_STATS
{
if (n < 8192)
x_one_stimes[n >> 7]++;
else
}
#endif
}
/*
* init_mmu_page_sizes is set to one after the bootup time initialization
* via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a
* valid value.
*
* mmu_disable_ism_large_pages and mmu_disable_large_pages are the mmu-specific
* versions of disable_ism_large_pages and disable_large_pages, and feed back
* into those two hat variables at hat initialization time.
*
*/
int init_mmu_page_sizes = 0;
static uint_t mmu_disable_large_pages = 0;
(1 << TTE512K));
/*
* Re-initialize mmu_page_sizes and friends, for SPARC64-VI mmu support.
* Called during very early bootup from check_cpus_set().
* Can be called to verify that mmu_page_sizes are set up correctly.
*
* Set Olympus defaults. We do not use the function parameter.
*/
/*ARGSUSED*/
int
{
if (!init_mmu_page_sizes) {
init_mmu_page_sizes = 1;
return (0);
}
return (1);
}
/* SPARC64-VI worst case DTLB parameters */
#ifndef LOCKED_DTLB_ENTRIES
#endif
#define TOTAL_DTLB_ENTRIES 32
#define AVAIL_32M_ENTRIES 0
#define AVAIL_256M_ENTRIES 0
/*
* The function returns the mmu-specific values for the
* hat's disable_large_pages, disable_ism_large_pages, and
* disable_auto_data_large_pages and
* disable_text_data_large_pages variables.
*/
{
uint_t pages_disable = 0;
extern int use_text_pgsz64K;
extern int use_text_pgsz512K;
} else if (flag == HAT_LOAD_SHARE) {
} else if (flag == HAT_AUTO_DATA) {
} else if (flag == HAT_AUTO_TEXT) {
if (use_text_pgsz512K) {
}
if (use_text_pgsz64K) {
}
}
return (pages_disable);
}
/*
* mmu_init_large_pages is called with the desired ism_pagesize parameter.
* It may be called from set_platform_defaults, if some value other than 32M
* is desired. mmu_ism_pagesize is the tunable. If it has a bad value,
* then only warn, since it would be bad form to panic due to a user typo.
*
* The function re-initializes the mmu_disable_ism_large_pages variable.
*/
void
{
switch (ism_pagesize) {
case MMU_PAGESIZE4M:
break;
case MMU_PAGESIZE32M:
break;
case MMU_PAGESIZE256M:
break;
default:
break;
}
}
/*
* Function to reprogram the TLBs when page sizes used
* by a process change significantly.
*/
void
{
/*
* Don't program 2nd dtlb for kernel and ism hat
*/
/*
* hat->sfmmu_pgsz[] is an array whose elements
* contain a sorted order of page sizes. Element
* 0 is the most commonly used page size, followed
* by element 1, and so on.
*
* ttecnt[] is an array of per-page-size page counts
* mapped into the process.
*
* If the HAT's choice for page sizes is unsuitable,
* we can override it here. The new values written
* to the array will be handed back to us later to
* do the actual programming of the TLB hardware.
*
*/
/*
* This implements PAGESIZE programming of the sTLB
* if large TTE counts don't exceed the thresholds.
*/
/* otherwise, accept what the HAT chose for us */
}
/*
* The HAT calls this function when an MMU context is allocated so that we
* can reprogram the large TLBs appropriately for the new process using
* the context.
*
* The caller must hold the HAT lock.
*/
void
{
/*
* Don't program 2nd dtlb for kernel and ism hat
*/
return;
/*
* If supported, reprogram the TLBs to a larger pagesize.
*/
#ifdef DEBUG
int i;
/*
* assert cnum should be invalid, this is because pagesize
* can only be changed after a proc's ctxs are invalidated.
*/
for (i = 0; i < max_mmu_ctxdoms; i++) {
}
#endif /* DEBUG */
}
/*
* sfmmu_setctx_sec() will take care of the
* rest of the dirty work for us.
*/
}
/*
* This function assumes that there are either four or six supported page
* sizes and at most two programmable TLBs, so we need to decide which
* page sizes are most important and then adjust the TLB page sizes
* accordingly (if supported).
*
* If these assumptions change, this function will need to be
* updated to support whatever the new limits are.
*/
void
{
/*
* We only consider reprogramming the TLBs if one or more of
* the two most used page sizes changes and we're using
* large pages in this process.
*/
if (SFMMU_LGPGS_INUSE(sfmmup)) {
/* Sort page sizes. */
for (i = 0; i < mmu_page_sizes; i++) {
}
for (j = 0; j < mmu_page_sizes; j++) {
max = i;
}
}
/* Check 2 largest values after the sort. */
}
}
}
/*
* Return processor specific async error structure
* size used.
*/
int
cpu_aflt_size(void)
{
return (sizeof (opl_async_flt_t));
}
/*
* The cpu_sync_log_err() function is called via the [uc]e_drain() function to
* post-process CPU events that are dequeued. As such, it can be invoked
* from softint context, from AST processing in the trap() flow, or from the
* panic flow. We decode the CPU-specific data, and take appropriate actions.
* Historically this entry point was used to log the actual cmn_err(9F) text;
* now with FMA it is used to prepare 'flt' to be converted into an ereport.
* With FMA this function now also returns a flag which indicates to the
* caller whether the ereport should be posted (1) or suppressed (0).
*/
/*ARGSUSED*/
static int
cpu_sync_log_err(void *flt)
{
/*
* No extra processing of urgent error events.
* Always generate ereports for these events.
*/
return (1);
/*
* Additional processing for synchronous errors.
*/
case OPL_CPU_INV_SFSR:
return (1);
case OPL_CPU_SYNC_UE:
/*
* The validity: SFSR_MK_UE bit has been checked
* in opl_cpu_sync_error()
* No more check is required.
*
* opl_flt->flt_eid_mod and flt_eid_sid have been set by H/W,
* and they have been retrieved in cpu_queue_events()
*/
/*
* We want to skip logging only if ALL the following
* conditions are true:
*
* 1. We are not panicing already.
* 2. The error is a memory error.
* 3. There is only one error.
* 4. The error is on a retired page.
* 5. The error occurred under on_trap
* protection AFLT_PROT_EC
*/
/*
* Do not log an error from
* the retired page
*/
return (0);
}
if (!panicstr)
}
return (1);
case OPL_CPU_SYNC_OTHERS:
/*
* For the following error cases, the processor HW does
* not set the flt_eid_mod/flt_eid_sid. Instead, SW will attempt
* to assign appropriate values here to reflect what we
* think is the most likely cause of the problem w.r.t to
* the particular error event. For Buserr and timeout
* error event, we will assign OPL_ERRID_CHANNEL as the
* most likely reason. For TLB parity or multiple hit
* error events, we will assign the reason as
* OPL_ERRID_CPU (cpu related problem) and set the
* flt_eid_sid to point to the cpuid.
*/
/*
* flt_eid_sid will not be used for this case.
*/
}
}
/*
* In case of no effective error bit
*/
}
break;
default:
return (1);
}
return (1);
}
/*
* Retire the bad page that may contain the flushed error.
*/
void
{
}
/*
* Invoked by error_init() early in startup and therefore before
* startup_errorq() is called to drain any error Q -
*
* startup()
* startup_end()
* error_init()
* cpu_error_init()
* errorq_init()
* errorq_drain()
* start_other_cpus()
*
* The purpose of this routine is to create error-related taskqs. Taskqs
* are used for this purpose because cpu_lock can't be grabbed from interrupt
* context.
*
*/
/*ARGSUSED*/
void
cpu_error_init(int items)
{
opl_err_log = (opl_errlog_t *)
"is not page aligned");
}
/*
* We route all errors through a single switch statement.
*/
void
{
case CPU_FAULT:
if (cpu_sync_log_err(aflt))
break;
case BUS_FAULT:
break;
default:
return;
}
}
/*
* Routine for panic hook callback from panic_idle().
*
* Nothing to do here.
*/
void
cpu_async_panic_callb(void)
{
}
/*
* Routine to return a string identifying the physical name
*/
/*ARGSUSED*/
int
{
int synd_code;
int ret;
/*
* An AFSR of -1 defaults to a memory syndrome.
*/
if (&plat_get_mem_unum) {
buf[0] = '\0';
*lenp = 0;
}
return (ret);
}
buf[0] = '\0';
*lenp = 0;
return (ENOTSUP);
}
/*
* Wrapper for cpu_get_mem_unum() routine that takes an
* async_flt struct rather than explicit arguments.
*/
int
{
/*
* We always pass -1 so that cpu_get_mem_unum will interpret this as a
* memory error.
*/
(uint64_t)-1,
}
/*
* This routine is a more generic interface to cpu_get_mem_unum()
* that may be used by other modules (e.g. mm).
*/
/*ARGSUSED*/
int
{
ushort_t flt_status = 0;
char unum[UNUM_NAMLEN];
/*
* Check for an invalid address.
*/
return (ENXIO);
else
if (ret != 0)
return (ret);
return (ENAMETOOLONG);
return (0);
}
/*
* Routine to return memory information associated
* with a physical address and syndrome.
*/
/*ARGSUSED*/
int
{
return (ENXIO);
if (p2get_mem_info != NULL)
else
return (ENOTSUP);
}
/*
* Routine to return a string identifying the physical
* name associated with a cpuid.
*/
int
{
int ret;
char unum[UNUM_NAMLEN];
if (&plat_get_cpu_unum) {
lenp)) != 0)
return (ret);
} else {
return (ENOTSUP);
}
return (ENAMETOOLONG);
return (0);
}
/*
* This routine exports the name buffer size.
*/
{
return (UNUM_NAMLEN);
}
/*
* Flush the entire ecache by ASI_L2_CNTL.U2_FLUSH
*/
void
cpu_flush_ecache(void)
{
}
static uint8_t
{
return (TRAP_TYPE_ECC_I);
return (TRAP_TYPE_ECC_D);
return (TRAP_TYPE_URGENT);
return (TRAP_TYPE_UNKNOWN);
}
/*
* Encode the data saved in the opl_async_flt_t struct into
* the FM ereport payload.
*/
/* ARGSUSED */
static void
{
char unum[UNUM_NAMLEN];
int len;
}
}
}
}
}
}
}
}
switch (opl_flt->flt_eid_mod) {
case OPL_ERRID_CPU:
break;
case OPL_ERRID_CHANNEL:
/*
* No resource is created but the cpumem DE will find
* the defective path by retreiving EID from SFSR which is
* included in the payload.
*/
break;
case OPL_ERRID_MEM:
break;
case OPL_ERRID_PATH:
/*
* No resource is created but the cpumem DE will find
* the defective path by retreiving EID from SFSR which is
* included in the payload.
*/
break;
}
}
/*
* Returns whether fault address is valid for this error bit and
* whether the address is "in memory" (i.e. pf_is_memory returns 1).
*/
/*ARGSUSED*/
static int
{
return ((t_afsr_bit & SFSR_MEMORY) &&
}
return (0);
}
/*
* In OPL SCF does the stick synchronization.
*/
void
sticksync_slave(void)
{
}
/*
* In OPL SCF does the stick synchronization.
*/
void
sticksync_master(void)
{
}
/*
* Cpu private unitialization. OPL cpus do not use the private area.
*/
void
{
}
/*
* Always flush an entire cache.
*/
void
cpu_error_ecache_flush(void)
{
}
void
{
return;
} else {
}
/*
* Create the scheme "cpu" FMRI.
*/
case OLYMPUS_C_IMPL:
break;
case JUPITER_IMPL:
break;
default:
break;
}
sbuf);
/*
* Encode all the common data into the ereport.
*/
/*
* Encode the error specific data that was saved in
* the async_flt structure into the ereport.
*/
} else {
}
}
void
{
int status;
}
void
{
}
void
{
/*
* Set hw copy limits.
*
* of these settings.
*
* At this time, ecache size seems only mildly relevant.
* We seem to run into issues with the d-cache and stalls
* we see on misses.
*
* Cycle measurement indicates that 2 byte aligned copies fare
* little better than doing things with VIS at around 512 bytes.
* 4 byte aligned shows promise until around 1024 bytes. 8 Byte
* aligned is faster whenever the source and destination data
* in cache and the total size is less than 2 Kbytes. The 2K
* limit seems to be driven by the 2K write cache.
* When more than 2K of copies are done in non-VIS mode, stores
* backup in the write cache. In VIS mode, the write cache is
* bypassed, allowing faster cache-line writes aligned on cache
* boundaries.
*
* In addition, in non-VIS mode, there is no prefetching, so
* for larger copies, the advantage of prefetching to avoid even
* occasional cache misses is enough to justify using the VIS code.
*
* During testing, it was discovered that netbench ran 3% slower
* when hw_copy_limit_8 was 2K or larger. Apparently for server
* applications, data is only used once (copied to the output
* buffer, then copied by the network device off the system). Using
* the VIS copy saves more L2 cache state. Network copies are
* around 1.3K to 1.5K in size for historical reasons.
*
* Therefore, a limit of 1K bytes will be used for the 8 byte
* aligned copy even for large caches and 8 MB ecache. The
* infrastructure to allow different limits for different sized
* caches is kept to allow further tuning in later releases.
*/
if (min_ecache_size == 0 && use_hw_bcopy) {
/*
* is read.
* Could skip the checks for zero but this lets us
* preserve any debugger rewrites.
*/
if (hw_copy_limit_1 == 0) {
}
if (hw_copy_limit_2 == 0) {
}
if (hw_copy_limit_4 == 0) {
}
if (hw_copy_limit_8 == 0) {
}
} else {
/*
* been parsed. One CPU has already been initialized.
* of our values.
*/
if (ecache_size == min_ecache_size) {
/*
* Same size ecache. We do nothing unless we
* have a pessimistic ecache setting. In that
* case we become more optimistic (if the cache is
* large enough).
*/
/*
* Need to adjust hw_copy_limit* from our
* pessimistic uniprocessor value to a more
* optimistic UP value *iff* it hasn't been
* reset.
*/
if ((ecache_size > 1048576) &&
(priv_hcl_8 == hw_copy_limit_8)) {
if (ecache_size <= 2097152)
hw_copy_limit_8 = 4 *
else if (ecache_size <= 4194304)
hw_copy_limit_8 = 4 *
else
hw_copy_limit_8 = 4 *
}
}
} else if (ecache_size < min_ecache_size) {
/*
* A different ecache size. Can this even happen?
*/
if (priv_hcl_8 == hw_copy_limit_8) {
/*
* The previous value that we set
* is unchanged (i.e., it hasn't been
*/
if (ecache_size <= 1048576)
hw_copy_limit_8 = 8 *
else if (ecache_size <= 2097152)
hw_copy_limit_8 = 8 *
else if (ecache_size <= 4194304)
hw_copy_limit_8 = 8 *
else
hw_copy_limit_8 = 10 *
}
}
}
}
#define VIS_BLOCKSIZE 64
int
{
if (watched)
return (ret);
}
void
{
/*
* We do not need to re-initialize cpu0 registers.
*/
/*
* Support for "ta 3"
*/
opl_ta3();
return;
}
/*
* Initialize Error log Scratch register for error handling.
*/
ERRLOG_BUFSZ * (getprocessorid())));
/*
* Enable MMU translating multiple page sizes for
* sITLB and sDTLB.
*/
}
/*
* Queue one event in ue_queue based on ecc_type_to_info entry.
*/
static void
{
if (reason &&
}
}
/*
* Queue events on async event queue one event per error bit.
* Return number of events queued.
*/
int
{
int nevents = 0;
/*
* Queue expected errors, error bit and fault type must must match
* in the ecc_type_to_info table.
*/
eccp++) {
/*
* UE error event can be further
* classified/breakdown into finer granularity
* based on the flt_eid_mod value set by HW. We do
* special handling here so that we can report UE
* error in finer granularity as ue_mem,
* ue_channel, ue_cpu or ue_path.
*/
/*
* Need to advance eccp pointer by flt_eid_mod
* so that we get an appropriate ecc pointer
*
* EID # of advances
* ----------------------------------
* OPL_ERRID_MEM 0
* OPL_ERRID_CHANNEL 1
* OPL_ERRID_CPU 2
* OPL_ERRID_PATH 3
*/
}
nevents++;
}
}
return (nevents);
}
/*
* Sync. error wrapper functions.
* We use these functions in order to transfer here from the
* nucleus trap handler information about trap type (data or
* instruction) and trap level (0 or above 0). This way we
* get rid of using SFSR's reserved bits.
*/
#define OPL_SYNC_TL0 0
#define OPL_SYNC_TL1 1
#define OPL_ISYNC_ERR 0
#define OPL_DSYNC_ERR 1
void
{
}
void
{
}
void
{
}
void
{
}
/*
* The fj sync err handler transfers control here for UE, BERR, TO, TLB_MUL
* and TLB_PRT.
* This function is designed based on cpu_deferred_error().
*/
static void
{
int trampolined = 0;
char pr_reason[MAX_REASON_STRING];
int expected = DDI_FM_ERR_UNEXPECTED;
/*
* We need to look at p_flag to determine if the thread detected an
* error while dumping core. We can't grab p_lock here, but it's ok
* because we just need a consistent snapshot and we know that everyone
* else will store a consistent set of bits while holding p_lock. We
* don't have to worry about a race because SDOCORE is set once prior
* to doing i/o from the process's address space and is never cleared.
*/
pr_reason[0] = '\0';
/*
* handle the specific error
*/
TSTATE_PRIV) ? 1 : 0));
/*
* So, clear all error bits to avoid mis-handling and force the system
* panicked.
* We skip all the procedures below down to the panic message call.
*/
}
/*
* If either UE and MK bit is off, this is not valid UE error.
* If it is not valid UE error, clear UE & MK_UE bits to prevent
* mis-handling below.
* aflt->flt_stat keeps the original bits as a reference.
*/
(SFSR_MK_UE|SFSR_UE)) {
}
/*
* If the trap occurred in privileged mode at TL=0, we need to check to
* see if we were executing in the kernel under on_trap() or t_lofault
* protection. If so, modify the saved registers so that we return
* from the trap to the appropriate trampoline routine.
*/
trampolined = 1;
}
trampolined = 1;
/*
* for peeks and caut_gets errors are expected
*/
if (!hp)
}
trampolined = 1;
}
}
/*
* If we're in user mode or we're doing a protected copy, we either
* want the ASTON code below to send a signal to the user process
* or we want to panic if aft_panic is set.
*
* If we're in privileged mode and we're not doing a copy, then we
* need to check if we've trampolined. If we haven't trampolined,
* we should panic.
*/
} else if (!trampolined) {
}
/*
* If we've trampolined due to a privileged TO or BERR, or if an
* unprivileged TO or BERR occurred, we don't want to enqueue an
* event for that TO or BERR. Queue all other events (if any) besides
*/
if (trampolined) {
/*
* User mode, suppress messages if
* cpu_berr_to_verbose is not set.
*/
if (!cpu_berr_to_verbose)
}
}
}
/*
* Panic here if aflt->flt_panic has been set. Enqueued errors will
* be logged as part of the panic flow.
*/
if (pr_reason[0] == 0)
}
/*
* If we queued an error and we are going to return from the trap and
* the error was in user mode or inside of a copy routine, set AST flag
* so the queue will be drained before returning to user mode. The
* AST processing will also act on our failure policy.
*/
int pcb_flag = 0;
pcb_flag |= ASYNC_HWERR;
pcb_flag |= ASYNC_BERR;
}
}
/*ARGSUSED*/
void
{
char pr_reason[MAX_REASON_STRING];
/* normalize tl */
pr_reason[0] = '\0';
1 : 0));
/*
* So we have to set it here.
*/
}
fm_panic("Urgent Error");
}
/*
* Initialization error counters resetting.
*/
/* ARGSUSED */
static void
{
}
void
cpu_mp_init(void)
{
(void) cyclic_add_omni(&hdlr);
}
int heaplp_use_stlb = -1;
void
{
if (heaplp_use_stlb == 0) {
/* do not reprogram stlb */
} else if (!plat_prom_preserve_kctx_is_supported()) {
/* OBP does not support non-zero primary context */
heaplp_use_stlb = 0;
}
}
{
if (lpsize == 0) {
return (MMU_PAGESIZE4M);
}
return (lpsize);
}
}
/*
* Support for ta 3.
* We allocate here a buffer for each cpu
* for saving the current register window.
*/
typedef struct win_regs {
uint64_t l[8];
uint64_t i[8];
} win_regs_t;
static void
opl_ta3(void)
{
}
/*
* The following are functions that are unused in
* OPL cpu module. They are defined here to resolve
* dependencies in the "unix" module.
* Unused functions that should never be called in
* OPL are coded with ASSERT(0).
*/
void
cpu_disable_errors(void)
{}
void
cpu_enable_errors(void)
{ ASSERT(0); }
/*ARGSUSED*/
void
{ ASSERT(0); }
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{ ASSERT(0); }
/*ARGSUSED*/
void
{ ASSERT(0); }
/*ARGSUSED*/
void
{ ASSERT(0); }
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/* ARGSUSED */
void
{ ASSERT(0); }
void
cpu_init_cache_scrub(void)
{}
/* ARGSUSED */
int
{
if (&plat_get_mem_sid) {
} else {
return (ENOTSUP);
}
}
/* ARGSUSED */
int
{
if (&plat_get_mem_addr) {
} else {
return (ENOTSUP);
}
}
/* ARGSUSED */
int
{
if (&plat_get_mem_offset) {
} else {
return (ENOTSUP);
}
}
/*ARGSUSED*/
void
{ ASSERT(0); }
/*ARGSUSED*/
void
{ ASSERT(0); }
/*ARGSUSED*/
void
{ ASSERT(0); }
/*ARGSUSED*/
int
{
ASSERT(0);
return (0);
}
/*ARGSUSED*/
char *
{
ASSERT(0);
return (NULL);
}
#define PROM_SPARC64VII_MODE_PROPNAME "SPARC64-VII-mode"
/*
* Check for existence of OPL OBP property that indicates
* SPARC64-VII support. By default, only enable Jupiter
* features if the property is present. It will be
* present in all-Jupiter domains by OBP if the domain has
* been selected by the user on the system controller to
* run in Jupiter mode. Basically, this OBP property must
* be present to turn on the cpu_alljupiter flag.
*/
static int
{
int val;
}
#define PROM_KCTX_PRESERVED_PROPNAME "context0-page-size-preserved"
/*
* Check for existence of OPL OBP property that indicates support for
* preserving Solaris kernel page sizes when entering OBP. We need to
* check the prom tree since the ddi tree is not yet built when the
* platform startup sequence is called.
*/
static int
{
int val;
/*
* Check for existence of context0-page-size-preserved property
* in virtual-memory prom node.
*/
}