Lines Matching refs:aflt

112 static void cpu_read_paddr(struct async_flt *aflt, short verbose, short ce_err);
116 static void log_ce_err(struct async_flt *aflt, char *unum);
117 static void log_ue_err(struct async_flt *aflt, char *unum);
120 static int check_ecc(struct async_flt *aflt);
926 log_ce_err(struct async_flt *aflt, char *unum)
930 if ((aflt->flt_stat & P_AFSR_CE) && (ce_verbose_memory == 0)) {
934 spf_flt.cmn_asyncflt = *aflt;
951 flt_to_error_type(struct async_flt *aflt)
953 if (aflt->flt_status & ECC_INTERMITTENT)
955 if (aflt->flt_status & ECC_PERSISTENT)
957 if (aflt->flt_status & ECC_STICKY)
972 struct async_flt *aflt = (struct async_flt *)spf_flt;
974 status = aflt->flt_status;
1172 cpu_ue_log_err(struct async_flt *aflt)
1175 switch (aflt->flt_class) {
1177 cpu_async_log_err(aflt);
1181 bus_async_log_err(aflt);
1186 "fault class (0x%x)", (void *)aflt, aflt->flt_class);
1248 struct async_flt *aflt;
1290 * aflt->flt_priv from %tstate, instead of from the AFSR.PRIV bit. The
1291 * initial setting of aflt->flt_panic is based on TL: we must panic if
1296 aflt = (struct async_flt *)&spf_flt;
1297 aflt->flt_id = gethrtime_waitfree();
1298 aflt->flt_stat = t_afsr;
1299 aflt->flt_addr = t_afar;
1300 aflt->flt_bus_id = getprocessorid();
1301 aflt->flt_inst = CPU->cpu_id;
1302 aflt->flt_pc = (caddr_t)rp->r_pc;
1303 aflt->flt_prot = AFLT_PROT_NONE;
1304 aflt->flt_class = CPU_FAULT;
1305 aflt->flt_priv = (rp->r_tstate & TSTATE_PRIV) ? 1 : 0;
1306 aflt->flt_tl = (uchar_t)tl;
1307 aflt->flt_panic = (tl != 0 || aft_testfatal != 0);
1308 aflt->flt_core = (pflag & SDOCORE) ? 1 : 0;
1316 aflt->flt_status = ECC_D_TRAP;
1319 aflt->flt_status = ECC_I_TRAP;
1337 if (aflt->flt_priv && tl == 0) {
1342 aflt->flt_prot = AFLT_PROT_EC;
1351 aflt->flt_prot = AFLT_PROT_ACCESS;
1368 aflt->flt_prot = AFLT_PROT_COPY;
1397 if (aflt->flt_prot == AFLT_PROT_NONE) {
1402 if (aflt->flt_priv || aft_panic)
1403 aflt->flt_panic = 1;
1407 if (aflt->flt_priv)
1408 aflt->flt_panic = 1;
1410 } else if (aflt->flt_prot == AFLT_PROT_COPY && aft_panic) {
1411 aflt->flt_panic = 1;
1419 cpu_run_bus_error_handlers(aflt, expected);
1436 aflt->flt_in_memory = (pf_is_memory(aflt->flt_addr >>
1444 if (aflt->flt_in_memory) {
1448 uint64_t faultpa = P2ALIGN(aflt->flt_addr, 64);
1461 (aflt->flt_addr % ec_set_size);
1472 ((uint64_t)aflt->flt_addr >>
1486 ((uint64_t)aflt->flt_addr >> cpu_ec_tag_shift)) {
1510 flushecacheline(P2ALIGN(aflt->flt_addr, 64),
1532 aflt->flt_panic = 1;
1535 aflt->flt_synd = sdbh & P_DER_E_SYND;
1538 aflt->flt_panic);
1541 aflt->flt_synd = sdbl & P_DER_E_SYND;
1542 aflt->flt_synd |= UDBL_REG; /* indicates UDBL */
1543 if (!(aflt->flt_stat & P_AFSR_ME))
1544 aflt->flt_addr |= 0x8;
1547 aflt->flt_panic);
1555 if (aflt->flt_panic && aflt->flt_in_memory) {
1556 panic_aflt = *aflt;
1578 aflt->flt_addr = AFLT_INV_ADDR;
1579 scan_ecache(&aflt->flt_addr, &spf_flt.flt_ec_data[0],
1588 if (aflt->flt_addr != AFLT_INV_ADDR) {
1589 aflt->flt_in_memory = (pf_is_memory(aflt->flt_addr >>
1594 aflt->flt_panic = 1;
1599 aflt->flt_panic);
1621 if (aflt->flt_priv && aflt->flt_prot == AFLT_PROT_NONE) {
1625 aflt->flt_panic);
1643 aflt->flt_panic = 1;
1650 aflt->flt_panic);
1662 aflt->flt_panic = 1;
1666 aflt->flt_panic);
1683 aflt->flt_addr = AFLT_INV_ADDR;
1684 scan_ecache(&aflt->flt_addr, &spf_flt.flt_ec_data[0],
1693 if (aflt->flt_addr != AFLT_INV_ADDR) {
1694 aflt->flt_in_memory =
1695 (pf_is_memory(aflt->flt_addr >>
1701 aflt->flt_panic);
1720 (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY)) {
1746 * aflt->flt_panic is already set above.
1748 ASSERT((aflt->flt_panic != 0) || (action != ACTION_NONE) ||
1773 if ((aflt->flt_stat & P_AFSR_UE) && aflt->flt_addr == t_afar)
1774 acc_afsr |= aflt->flt_stat & ~P_AFSR_UE;
1776 acc_afsr |= aflt->flt_stat;
1788 aflt->flt_panic = 1;
1791 aflt->flt_stat = acc_afsr;
1794 aflt->flt_panic);
1798 * If aflt->flt_panic is set at this point, we need to panic as the
1803 if (aflt->flt_panic) {
1824 struct async_flt *aflt = (struct async_flt *)spf_flt;
1832 if (aflt->flt_stat & P_AFSR_ISAP)
1834 else if (aflt->flt_stat & P_AFSR_ETP)
1836 else if (aflt->flt_stat & P_AFSR_IVUE)
1926 cpu_get_mem_unum_aflt(int synd_status, struct async_flt *aflt,
1929 return (cpu_get_mem_unum(synd_status, SYND(aflt->flt_synd),
1930 aflt->flt_stat, aflt->flt_addr, aflt->flt_bus_id,
1931 aflt->flt_in_memory, aflt->flt_status, buf, buflen, lenp));
2008 log_ue_err(struct async_flt *aflt, char *unum)
2010 spitf_async_flt *spf_flt = (spitf_async_flt *)aflt;
2014 int afsr_priv = (aflt->flt_stat & P_AFSR_PRIV) ? 1 : 0;
2020 if (afsr_priv != aflt->flt_priv) {
2027 "TSTATE.PRIV=%d used", (aflt->flt_priv) ? 1 : 0);
2030 aflt->flt_stat &= ~P_AFSR_PRIV;
2031 if (aflt->flt_priv)
2032 aflt->flt_stat |= P_AFSR_PRIV;
2036 (void) cpu_get_mem_unum_aflt(AFLT_STAT_VALID, aflt, unum,
2042 if (SYND(aflt->flt_synd) == 0x3) {
2048 if (aflt->flt_in_memory)
2063 struct async_flt *aflt = (struct async_flt *)flt;
2091 (aflt->flt_stat & S_AFSR_ALL_ERRS) == P_AFSR_UE &&
2092 aflt->flt_prot == AFLT_PROT_EC) {
2093 if (page_retire_check(aflt->flt_addr, NULL) == 0) {
2095 softcall(ecc_page_zero, (void *)aflt->flt_addr);
2105 log_ue_err(aflt, unum);
2106 if (aflt->flt_in_memory)
2107 cpu_check_allcpus(aflt);
2111 if (aflt->flt_stat & P_AFSR_EDP)
2115 if (aflt->flt_stat & P_AFSR_LDP)
2151 if (aflt->flt_stat & P_AFSR_BERR) {
2152 cpu_aflt_log(CE_WARN, aflt->flt_panic ? 1 : 2,
2155 aflt->flt_priv ? "privileged" : "user");
2158 if (aflt->flt_stat & P_AFSR_TO) {
2159 cpu_aflt_log(CE_WARN, aflt->flt_panic ? 1 : 2,
2162 aflt->flt_priv ? "privileged" : "user");
2174 ASSERT(aflt->flt_id == panic_aflt.flt_id);
2187 aflt->flt_inst, space, (panic_aflt.flt_status & ECC_IOBUS) ?
2207 "on PCIBus)", aflt->flt_inst);
2300 if (aflt->flt_addr != AFLT_INV_ADDR && aflt->flt_in_memory) {
2302 (void) page_retire(aflt->flt_addr, PR_UE);
2309 clearphys(P2ALIGN(aflt->flt_addr, 64),
2321 if (!aflt->flt_priv) {
2326 } else if (aflt->flt_prot == AFLT_PROT_COPY && aflt->flt_core) {
2330 } else if (aflt->flt_prot == AFLT_PROT_COPY) {
2335 } else if (aflt->flt_prot == AFLT_PROT_EC) {
2349 cpu_check_allcpus(struct async_flt *aflt)
2356 cpflt->flt_id = aflt->flt_id;
2357 cpflt->flt_addr = aflt->flt_addr;
2368 if (aflt->flt_status & ECC_D_TRAP)
2370 else if (aflt->flt_status & ECC_I_TRAP)
2379 (aflt->flt_status & ECC_IOBUS) ?
2380 "IOBUS" : "CPU", aflt->flt_bus_id);
2404 struct async_flt *aflt = (struct async_flt *)arg;
2427 aflt->flt_stat = afsr;
2451 flt_addr_tag = aflt->flt_addr >> cpu_ec_tag_shift;
2456 for (i = 0, ec_idx = (aflt->flt_addr % ec_set_size);
2495 struct async_flt *aflt = (struct async_flt *)&cp;
2499 aflt->flt_addr = panic_aflt.flt_addr;
2500 (void) get_cpu_status((uint64_t)aflt);
2505 aflt->flt_stat |= *scrub_afsr;
2509 if (aflt->flt_stat & P_AFSR_CP) {
2510 aflt->flt_id = panic_aflt.flt_id;
2511 aflt->flt_panic = 1;
2512 aflt->flt_inst = CPU->cpu_id;
2513 aflt->flt_class = CPU_FAULT;
2517 aflt->flt_panic);
2708 struct async_flt *aflt =
2713 aflt->flt_stat = t_afsr;
2714 aflt->flt_addr = t_afar;
2922 struct async_flt *aflt = (struct async_flt *)spf_flt;
2932 aflt->flt_stat = afsr;
2933 get_asyncaddr(&aflt->flt_addr);
2934 aflt->flt_addr &= SABRE_AFAR_PA;
3048 struct async_flt *aflt = (struct async_flt *)spf_flt;
3086 "E$parity 0x%02x %s", (uint32_t)(aflt->flt_addr >> 32),
3087 (uint32_t)aflt->flt_addr, (uint32_t)(ecache_tag >> 32),
3094 (uint32_t)(P2ALIGN(aflt->flt_addr, 64) >> 32),
3095 (uint32_t)P2ALIGN(aflt->flt_addr, 64));
3181 struct async_flt *aflt = (struct async_flt *)spflt;
3186 if ((aflt == NULL) || ((aflt->flt_class == CPU_FAULT) &&
3187 (aflt->flt_stat & P_AFSR_LEVEL1)) ||
3188 (aflt->flt_panic)) {
3191 int verbose = ((aflt->flt_class == BUS_FAULT) ||
3192 (aflt->flt_stat & P_AFSR_CE)) ?
3211 (uint32_t)(aflt->flt_id >> 32), (uint32_t)aflt->flt_id);
3228 aflt->flt_inst);
3233 if (aflt->flt_status & ECC_D_TRAP)
3236 else if (aflt->flt_status & ECC_I_TRAP)
3244 aflt->flt_tl ? ">0" : "=0");
3251 (uint32_t)(aflt->flt_id >> 32),
3252 (uint32_t)aflt->flt_id);
3259 (uint32_t)(aflt->flt_stat >> 32), AFSR_FMTSTR0,
3260 (uint32_t)aflt->flt_stat, AFSR_FMTSTR1);
3266 (uint32_t)(aflt->flt_addr >> 32),
3267 (uint32_t)aflt->flt_addr);
3273 (aflt->flt_stat & P_AFSR_P_SYND);
3283 (uchar_t)((aflt->flt_stat & P_AFSR_ETS) >> 16));
3289 (void *)aflt->flt_pc);
3310 ushort_t synd = SYND(aflt->flt_synd);
3314 UDBL(aflt->flt_synd) ? "UDBL" : "UDBH", synd);
3891 struct async_flt *aflt;
3896 aflt = &spf_flt.cmn_asyncflt;
3908 aflt->flt_inst = CPU->cpu_id;
3909 aflt->flt_class = CPU_FAULT;
3910 aflt->flt_id = gethrtime_waitfree();
3911 aflt->flt_addr = paddr;
3912 aflt->flt_stat = afsr;
3913 aflt->flt_panic = (uchar_t)ecache_scrub_panic;
3928 ue_queue, aflt->flt_panic);
3930 if (aflt->flt_panic)
3944 struct async_flt *aflt;
3948 aflt = &spf_flt.cmn_asyncflt;
3954 aflt->flt_addr = AFLT_INV_ADDR;
3955 scan_ecache(&aflt->flt_addr, &spf_flt.flt_ec_data[0],
3967 if (aflt->flt_addr != AFLT_INV_ADDR) {
3968 aflt->flt_in_memory = (pf_is_memory(aflt->flt_addr >>
3974 aflt->flt_inst = CPU->cpu_id;
3975 aflt->flt_class = CPU_FAULT;
3976 aflt->flt_id = gethrtime_waitfree();
3977 aflt->flt_status = afsr;
3978 aflt->flt_panic = (uchar_t)ecache_scrub_panic;
3985 flushecacheline(P2ALIGN(aflt->flt_addr, 64),
3993 (void *)&spf_flt, sizeof (spf_flt), ue_queue, aflt->flt_panic);
4088 cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
4096 de.fme_ena = fm_ena_generate_cpu(aflt->flt_id, aflt->flt_inst,
4099 de.fme_bus_specific = (void *)aflt->flt_addr;
4102 if ((aflt->flt_prot == AFLT_PROT_NONE) && (status == DDI_FM_FATAL))
4103 aflt->flt_panic = 1;
4111 struct async_flt *aflt = (struct async_flt *)payload;
4113 aflt->flt_erpt_class = error_class;