Lines Matching refs:CPU

92  * Per CPU pointers to physical address of TL>0 logout data areas.
105 * Per CPU pending error at TL>0, used by level15 softint handler
227 #define S003 149 /* Syndrome 0x003 => likely from CPU/EDU:ST/FRU/BP */
232 #define S071 150 /* Syndrome 0x071 => likely from WDU/CPU */
779 * 3) when the CPU that holds the TSB mapping locked tries to
781 * us or the CPU we're trying to recover, and will in turn
888 * even for a different target CPU.
919 CHEETAH_LIVELOCK_ENTRY_SET(histp, buddy, CPU->cpu_id);
994 * This is called by the cyclic framework when this CPU becomes online
1047 if ((CPU->cpu_next_onln != CPU) && (sendmondo_in_recover == 0)) {
1048 xt_one(CPU->cpu_next_onln->cpu_id, (xcfunc_t *)xt_sync_tl1,
1081 CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
1242 * Zero out + invalidate CPU logout.
1259 myid = CPU->cpu_id;
1315 * Get the CPU log out info. If we can't find our CPU private
1319 if (CPU_PRIVATE(CPU) == NULL) {
1324 clop = CPU_PRIVATE_PTR(CPU, chpr_fecctl0_logout);
1389 aflt->flt_inst = CPU->cpu_id;
1423 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
1454 * Zero out + invalidate CPU logout.
1511 if (ch_err_tl1_paddrs[CPU->cpu_id] == 0) {
1514 } else if (CPU_PRIVATE(CPU) != NULL) {
1515 cl1p = CPU_PRIVATE_PTR(CPU, chpr_tl1_err_data[0]);
1606 * EDC, WDU, WDC, CPU, CPC, IVU, IVC events.
1607 * Disrupting errors controlled by NCEEN: EDU:ST, WDU, CPU, IVU
1640 * Get the CPU log out info. If we can't find our CPU private
1644 if (CPU_PRIVATE(CPU) == NULL) {
1656 clop = CPU_PRIVATE_PTR(CPU, chpr_cecc_logout);
1764 * Get the CPU log out info. If we can't find our CPU private
1768 if (CPU_PRIVATE(CPU) == NULL) {
1781 clop = CPU_PRIVATE_PTR(CPU, chpr_async_logout);
1813 aflt->flt_inst = CPU->cpu_id;
1925 * Zero out + invalidate CPU logout.
2109 aflt->flt_inst = CPU->cpu_id;
2139 IS_JALAPENO(cpunodes[CPU->cpu_id].implementation)) {
2187 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
2214 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
2238 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
2370 * post-process CPU events that are dequeued. As such, it can be invoked
2372 * panic flow. We decode the CPU-specific data, and take appropriate actions.
2665 * to try and ensure that the CPU makes progress in the face
2753 * Clear CEEN. CPU CE TL > 0 trap handling will already have done
2755 * avoid CPU migration so that we restore CEEN on the correct
2856 clop = CPU_PRIVATE(CPU) ? CPU_PRIVATE_PTR(CPU, chpr_cecc_logout) : NULL;
2877 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
2886 totalsize = cpunodes[CPU->cpu_id].ecache_size;
3180 * lookup CPU type; with a small timeout this is unlikely.
3212 * CPU and PCI drain functions. Decide what further classification actions,
3392 aflt->flt_inst = CPU->cpu_id;
3522 * not be the CPU AFSR, and we pass in a -1 to cpu_get_mem_unum()
3597 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
3605 if (cpu_error_is_ecache_data(CPU->cpu_id, t_afsr_errs))
3609 CPU->cpu_id, flt_in_memory, flt_status, unum, UNUM_NAMLEN, lenp);
3736 * has already filled flt_synd. For errors trapped by CPU we only fill
3783 * there is a failing CPU in an error-trap loop which
3806 flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size,
3807 cpunodes[CPU->cpu_id].ecache_linesize);
3811 * Return CPU E$ set size - E$ size divided by the associativity.
3836 int ec_set_size = cpu_ecache_set_size(CPU);
3849 int ec_set_size = cpu_ecache_set_size(CPU);
3862 int lsize = cpunodes[CPU->cpu_id].ecache_linesize;
3863 int ec_set_size = cpu_ecache_set_size(CPU);
3878 int totalsize = cpunodes[CPU->cpu_id].ecache_size;
3879 int ec_set_size = cpu_ecache_set_size(CPU);
3910 int ispanther = IS_PANTHER(cpunodes[CPU->cpu_id].implementation);
3921 ec_set_size = cpu_ecache_set_size(CPU);
3923 totalsize = cpunodes[CPU->cpu_id].ecache_size;
4035 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
4109 if (CPU_PRIVATE(CPU)) {
4110 ic_set_size = CPU_PRIVATE_VAL(CPU, chpr_icache_size) /
4112 ic_linesize = CPU_PRIVATE_VAL(CPU, chpr_icache_linesize);
4139 if (CPU_PRIVATE(CPU)) {
4140 ic_set_size = CPU_PRIVATE_VAL(CPU, chpr_icache_size) /
4142 ic_linesize = CPU_PRIVATE_VAL(CPU, chpr_icache_linesize);
4152 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
4518 * E$ Data type, otherwise, return CPU type.
4735 * Initialize the info in the CPU logout structure.
4824 * Check if the CPU log out captured was valid.
5349 * Slave CPU stick synchronization.
5419 stick_sync_stats[CPU->cpu_id].skew_val[tries] =
5435 * Master CPU side of stick synchronization.
5475 * in cpu_async_panic_callb, each cpu checks for CPU events on its way to
5640 ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
5646 xt_one_unchecked(CPU->cpu_id, setsoftint_tl1,
5659 ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
5847 * Called periodically on each CPU to scrub the D$.
5853 ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
5889 ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
5913 * Called periodically on each CPU to scrub the I$. The I$ is scrubbed
5922 ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
5963 ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
5988 * Called periodically on each CPU to scrub the E$.
5993 ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
5995 int cpuid = CPU->cpu_id;
5999 int ec_set_size = cpu_ecache_set_size(CPU);
6042 ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
6082 * When the CPU gets a disrupting trap for any of the errors
6087 * after a CE is logged on a CPU, we schedule a timeout function,
6090 * have occurred on that CPU, and if none have, will reenable CEEN.
6093 * timeout will be scheduled. This is to ensure that the CPU can
6125 * If we are running on the CPU that got the
6128 if (cp->cpu_id == CPU->cpu_id) {
6137 * send an x-call to get the CPU that originally
6140 * lose CEEN forever on that CPU.
6148 * When the CPU is not accepting xcalls, or
6156 * a CE, disable CEEN, offline the CPU during the
6299 if (CPU_PRIVATE(CPU) == NULL)
6302 clop = CPU_PRIVATE_PTR(CPU, chpr_cecc_logout);
6367 aflt->flt_inst = CPU->cpu_id;
6374 * Get the CPU log out info for Disrupting Trap.
6376 if (CPU_PRIVATE(CPU) == NULL) {
6380 clop = CPU_PRIVATE_PTR(CPU, chpr_cecc_logout);
6457 * Zero out + invalidate CPU logout.
6597 if (IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
6599 else if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
6611 if (IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
6613 else if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
6627 if (IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
6629 else if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
6642 if (IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
6644 else if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
6884 * been parsed. One CPU has already been initialized.
6959 cgp = &fpras_chkfngrps[CPU->cpu_id];
6980 * check. In all cases we are still bound to CPU.
6994 * We're running on a sick CPU. Avoid further FPU use at least for
7010 aflt->flt_inst = CPU->cpu_id;
7027 cfp = &fpras_chkfngrps[CPU->cpu_id].fpras_fn[op];
7042 fm_panic("FPU failure on CPU %d", CPU->cpu_id);
7279 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {