Lines Matching refs:cpu
326 * now have per-cpu vectors.
492 * This variable tracks the last place events were disabled on each cpu
505 * Set cpu's base SPL level to the highest active interrupt level
510 struct cpu *cpu = CPU;
511 uint16_t active = (uint16_t)cpu->cpu_intr_actv;
513 cpu->cpu_base_spl = active == 0 ? 0 : bsrw_insn(active);
517 * Do all the work necessary to set up the cpu and thread structures
527 hilevel_intr_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil, struct regs *rp)
529 struct machcpu *mcpu = &cpu->cpu_m;
537 cpu->cpu_profile_pil = oldpil;
539 cpu->cpu_profile_pc = 0;
540 cpu->cpu_profile_upc = rp->r_pc;
541 cpu->cpu_cpcprofile_pc = 0;
542 cpu->cpu_cpcprofile_upc = rp->r_pc;
544 cpu->cpu_profile_pc = rp->r_pc;
545 cpu->cpu_profile_upc = 0;
546 cpu->cpu_cpcprofile_pc = rp->r_pc;
547 cpu->cpu_cpcprofile_upc = 0;
551 mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK;
565 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
573 kthread_t *t = cpu->cpu_thread;
583 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
593 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
602 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
606 mask = cpu->cpu_intr_actv;
608 cpu->cpu_intr_actv |= (1 << pil);
623 hilevel_intr_epilog(struct cpu *cpu, uint_t pil, uint_t oldpil, uint_t vecnum)
625 struct machcpu *mcpu = &cpu->cpu_m;
632 cpu->cpu_stats.sys.intr[pil - 1]++;
634 ASSERT(cpu->cpu_intr_actv & (1 << pil));
643 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
648 cpu->cpu_intr_actv &= ~(1 << pil);
650 cpu->cpu_intr_actv &= ~(1 << pil);
657 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
664 mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK;
686 kthread_t *t = cpu->cpu_thread;
695 return (cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK);
699 * Set up the cpu, thread and interrupt thread structures for
704 intr_thread_prolog(struct cpu *cpu, caddr_t stackptr, uint_t pil)
706 struct machcpu *mcpu = &cpu->cpu_m;
711 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
712 cpu->cpu_intr_actv |= (1 << pil);
721 t = cpu->cpu_thread;
725 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
734 * unlink the interrupt thread off the cpu
741 it = cpu->cpu_intr_thread;
742 cpu->cpu_intr_thread = it->t_link;
753 cpu->cpu_thread = it; /* new curthread on this cpu */
770 intr_thread_epilog(struct cpu *cpu, uint_t vec, uint_t oldpil)
772 struct machcpu *mcpu = &cpu->cpu_m;
774 kthread_t *it = cpu->cpu_thread; /* curthread */
780 cpu->cpu_stats.sys.intr[pil - 1]++;
785 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
787 ASSERT(cpu->cpu_intr_actv & (1 << pil));
788 cpu->cpu_intr_actv &= ~(1 << pil);
811 cpu->cpu_stats.sys.intrblk++;
816 basespl = cpu->cpu_base_spl;
825 it->t_link = cpu->cpu_intr_thread;
826 cpu->cpu_intr_thread = it;
835 it->t_link = cpu->cpu_intr_thread;
836 cpu->cpu_intr_thread = it;
839 basespl = cpu->cpu_base_spl;
844 cpu->cpu_thread = t;
907 struct cpu *cpu;
914 cpu = CPU;
915 mcpu = &cpu->cpu_m;
916 t = cpu->cpu_thread;
918 ASSERT((cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK) == 0);
931 cpu->cpu_intracct[cpu->cpu_mstate] += delta;
939 struct cpu *cpu,
945 struct machcpu *mcpu = &cpu->cpu_m;
953 if (pil <= oldpil || pil <= cpu->cpu_base_spl)
998 it = cpu->cpu_intr_thread;
999 cpu->cpu_intr_thread = it->t_link;
1002 t = cpu->cpu_thread;
1006 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
1030 cpu->cpu_thread = it;
1035 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
1036 cpu->cpu_intr_actv |= (1 << pil);
1049 dosoftint_epilog(struct cpu *cpu, uint_t oldpil)
1051 struct machcpu *mcpu = &cpu->cpu_m;
1057 it = cpu->cpu_thread;
1060 cpu->cpu_stats.sys.intr[pil - 1]++;
1062 ASSERT(cpu->cpu_intr_actv & (1 << pil));
1063 cpu->cpu_intr_actv &= ~(1 << pil);
1066 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
1080 it->t_link = cpu->cpu_intr_thread;
1081 cpu->cpu_intr_thread = it;
1088 it->t_link = cpu->cpu_intr_thread;
1089 cpu->cpu_intr_thread = it;
1091 cpu->cpu_thread = t;
1094 basespl = cpu->cpu_base_spl;
1142 intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc",
1171 kstat_delete_byname_zone("cpu", cp->cpu_id, "intrstat", ALL_ZONES);
1208 cpu_t *cpu;
1231 cpu = CPU;
1232 cpu->cpu_m.intrstat[t->t_pil][0] += interval;
1234 atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
1276 struct cpu *cpu = CPU;
1279 av_dispatch_softvect((int)cpu->cpu_thread->t_pil);
1286 dosoftint_epilog(cpu, oldpil);
1295 struct cpu *cpu = CPU;
1305 intr_thread_epilog(cpu, vector, oldipl);
1315 struct cpu *cpu = CPU;
1319 while (cpu->cpu_softinfo.st_pending) {
1320 oldipl = cpu->cpu_pri;
1321 newsp = dosoftint_prolog(cpu, (caddr_t)regs,
1322 cpu->cpu_softinfo.st_pending, oldipl);
1341 struct cpu *cpu = CPU;
1342 int newipl, oldipl = cpu->cpu_pri;
1350 ttp->ttr_spl = cpu->cpu_base_spl;
1356 ++*(uint16_t *)&cpu->cpu_m.mcpu_istamp;
1380 cpu->cpu_pri = newipl;
1387 * High priority interrupts run on this cpu's interrupt stack.
1389 if (hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) {
1390 newsp = cpu->cpu_intr_stack;
1395 (void) hilevel_intr_epilog(cpu, newipl, oldipl, vector);
1400 newsp = intr_thread_prolog(cpu, (caddr_t)rp, newipl);
1408 if (cpu->cpu_softinfo.st_pending)
1528 bsrw_insn((uint16_t)cpu->cpu_softinfo.st_pending) > (newpri)))
1542 cpu_t *cpu;
1546 cpu = CPU; /* ints are disabled, now safe to cache cpu ptr */
1547 curpri = cpu->cpu_m.mcpu_pri;
1548 basepri = cpu->cpu_base_spl;
1551 cpu->cpu_m.mcpu_pri = newpri;
1572 cpu_t *cpu;
1576 cpu = CPU; /* ints are disabled, now safe to cache cpu ptr */
1577 curpri = cpu->cpu_m.mcpu_pri;
1582 basepri = cpu->cpu_base_spl;
1585 cpu->cpu_m.mcpu_pri = newpri;