mach_locore.s revision 2f0fcb93196badcdd803715656c809058d9f3114
* show in which TRACE_PTR the assertion failure happened. * Our contract with the boot prom specifies that the MMU is on and the * first 16 meg of memory is mapped with a level-1 pte. We are called * with p1275cis ptr in %o0 and kdi_dvec in %o1; we start execution * directly from physical memory, so we need to get up into our proper * addresses quickly: all code before we do this must be position * NB: Above is not true for boot/stick kernel, the only thing mapped is * the text+data+bss. The kernel is loaded directly into KERNELBASE. * entry, the romvec pointer (romp) is the first argument; * the bootops vector is in the third argument (%o1) * construct mappings for KERNELBASE (not needed for boot/stick kernel) * hop up into high memory (not needed for boot/stick kernel) * initialize stack pointer * initialize trap base register * initialize window invalid mask * initialize psr (with traps enabled) * figure out all the module type stuff * tear down the 1-1 mappings set t0stacktop, %g1 ! setup kernel stack pointer sub %g1, SA(KFPUSIZE+GSR_SIZE), %g2 sub %o1, SA(MPCBSIZE) + STACK_BIAS, %sp ! Initialize global thread register. ! Fill in enough of the cpu structure so that ! the wbuf management code works. Make sure the ! boot cpu is inserted in cpu[] based on cpuid. sll %g2, CPTRSHIFT, %g2 ! convert cpuid to cpu[] offset stn %o0, [%g1 + %g2] ! cpu[cpuid] = &cpu0 stn %o0, [THREAD_REG + T_CPU] ! threadp()->t_cpu = cpu[cpuid] stn THREAD_REG, [%o0 + CPU_THREAD] ! cpu[cpuid]->cpu_thread = threadp() ! We do NOT need to bzero our BSS...boot has already done it for us. ! Just need to reference edata so that we don't
break /
dev/
ksyms #
error "hole in struct machpcb between frame and regs?" * Generic system trap handler. * Some kernel trap handlers save themselves from buying a window by * borrowing some of sys_trap's unused locals. %l0 thru %l3 may be used * for this purpose, as user_rtt and priv_rtt do not depend on them. * %l4 thru %l7 should NOT be used this way. * %pstate am:0 priv:1 ie:0 * %g2, %g3 args for handler * %g4 desired %pil (-1 means current %pil) * %l6 curthread for user traps, %pil for priv traps * Called function prototype variants: * func(struct regs *rp, uintptr_t arg1 [%g2], uintptr_t arg2 [%g3]) * func(struct regs *rp, uintptr_t arg1 [%g2], * uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h]) * func(struct regs *rp, uint32_t arg1 [%g2.l], * uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h], uint32_t [%g2.h]) bne,a,pn %xcc, ptl1_panic sethi %hi(nwin_minus_one), %g5 ld [%g5 + %lo(nwin_minus_one)], %g5 ldn [%g5 + CPU_THREAD], %g5 ! set window registers so that current windows are "other" windows sllx %l1, WSTATE_SHIFT, %l1 wrpr %l1, WSTATE_K64, %wstate ! set pcontext to run kernel stxa %l0, [%l1]ASI_MMU_CTX ! Ensure new ctx takes effect by the time the "done" (below) completes set utl0, %g6 ! bounce to utl0 ld [%o1 + MCPU_KWBUF_FULL], %o2 bnz,a,pn %icc, ptl1_panic SYSTRAP_TRACE(%o1, %o2, %o3) ! at this point we have a new window we can play in, ! and %g6 is the label we want done to bounce to ! save needed current globals srlx %g3, 32, %o3 ! pseudo arg #3 srlx %g2, 32, %o4 ! pseudo arg #4 mov %g5, %l6 ! curthread if user trap, %pil if priv trap ! save trap state on stack add %sp, REGOFF + STACK_BIAS, %l7 stx %l2, [%l7 + TSTATE_OFF] nop ! yes, nop; to avoid anull set sys_trap_wrong_pil, %o1 ! arg #1 ba 1f ! stay at the current %pil ! set trap regs to execute in kernel at %g6 ! done resumes execution there #define KWBUF64_TO_STACK(SBP,SPP,TMP) \ ldx [SBP + (0*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 0]; \ ldx [SBP + (1*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 8]; \ ldx [SBP + (2*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 16]; \ ldx [SBP + (3*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 24]; \ ldx [SBP + (4*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 32]; \ ldx [SBP + (5*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 40]; \ ldx [SBP + (6*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 48]; \ ldx [SBP + (7*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 56]; \ ldx [SBP + (8*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 64]; \ ldx [SBP + (9*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 72]; \ ldx [SBP + (10*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 80]; \ ldx [SBP + (11*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 88]; \ ldx [SBP + (12*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 96]; \ ldx [SBP + (13*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 104]; \ ldx [SBP + (14*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 112]; \ ldx [SBP + (15*8)], TMP; \ stx TMP, [SPP + V9BIAS64 + 120]; #define KWBUF32_TO_STACK(SBP,SPP,TMP) \ lduw [SBP + (0 * 4)], TMP; \ lduw [SBP + (1 * 4)], TMP; \ stw TMP, [SPP + (1 * 4)]; \ lduw [SBP + (2 * 4)], TMP; \ stw TMP, [SPP + (2 * 4)]; \ lduw [SBP + (3 * 4)], TMP; \ stw TMP, [SPP + (3 * 4)]; \ lduw [SBP + (4 * 4)], TMP; \ stw TMP, [SPP + (4 * 4)]; \ lduw [SBP + (5 * 4)], TMP; \ stw TMP, [SPP + (5 * 4)]; \ lduw [SBP + (6 * 4)], TMP; \ stw TMP, [SPP + (6 * 4)]; \ lduw [SBP + (7 * 4)], TMP; \ stw TMP, [SPP + (7 * 4)]; \ lduw [SBP + (8 * 4)], TMP; \ stw TMP, [SPP + (8 * 4)]; \ lduw [SBP + (9 * 4)], TMP; \ stw TMP, [SPP + (9 * 4)]; \ lduw [SBP + (10 * 4)], TMP; \ stw TMP, [SPP + (10 * 4)]; \ lduw [SBP + (11 * 4)], TMP; \ stw TMP, [SPP + (11 * 4)]; \ lduw [SBP + (12 * 4)], TMP; \ stw TMP, [SPP + (12 * 4)]; \ lduw [SBP + (13 * 4)], TMP; \ stw TMP, [SPP + (13 * 4)]; \ lduw [SBP + (14 * 4)], TMP; \ stw TMP, [SPP + (14 * 4)]; \ lduw [SBP + (15 * 4)], TMP; \ stw TMP, [SPP + (15 * 4)]; #define COPY_KWBUF_TO_STACK(TMP1,TMP2,TMP3) \ add TMP2, CPU_MCPU, TMP2 ;\ ld [TMP2 + MCPU_KWBUF_FULL], TMP3 ;\ st %g0, [TMP2 + MCPU_KWBUF_FULL] ;\ set MCPU_KWBUF_SP, TMP3 ;\ ldn [TMP2 + TMP3], TMP3 ;\ KWBUF64_TO_STACK(TMP2, TMP3, TMP1) ;\ KWBUF32_TO_STACK(TMP2, TMP3, TMP1) ;\ ! prom trap switches the stack to 32-bit ! if we took a trap from a 64-bit window ! Then buys a window on the current stack. save %sp, -SA64(REGOFF + REGSIZE), %sp /* 32 bit frame, 64 bit sized */ COPY_KWBUF_TO_STACK(%o1, %o2, %o3) ! buy a window on the current stack ! is the trap PC in the range allocated to Open Firmware? save %sp, -SA(REGOFF + REGSIZE), %sp COPY_KWBUF_TO_STACK(%o1, %o2, %o3) * from a 32/64-bit * wide address space via the designated asi. * It is used to fill windows in user_rtt to avoid going above TL 2. #define FILL_32bit_rtt(asi_num) \ lda [%sp + 0]%asi, %l0 ;\ lda [%sp + 4]%asi, %l1 ;\ lda [%sp + 8]%asi, %l2 ;\ lda [%sp + 12]%asi, %l3 ;\ lda [%sp + 16]%asi, %l4 ;\ lda [%sp + 20]%asi, %l5 ;\ lda [%sp + 24]%asi, %l6 ;\ lda [%sp + 28]%asi, %l7 ;\ lda [%sp + 32]%asi, %i0 ;\ lda [%sp + 36]%asi, %i1 ;\ lda [%sp + 40]%asi, %i2 ;\ lda [%sp + 44]%asi, %i3 ;\ lda [%sp + 48]%asi, %i4 ;\ lda [%sp + 52]%asi, %i5 ;\ lda [%sp + 56]%asi, %i6 ;\ lda [%sp + 60]%asi, %i7 ;\ #define FILL_64bit_rtt(asi_num) \ ldxa [%sp + V9BIAS64 + 0]%asi, %l0 ;\ ldxa [%sp + V9BIAS64 + 8]%asi, %l1 ;\ ldxa [%sp + V9BIAS64 + 16]%asi, %l2 ;\ ldxa [%sp + V9BIAS64 + 24]%asi, %l3 ;\ ldxa [%sp + V9BIAS64 + 32]%asi, %l4 ;\ ldxa [%sp + V9BIAS64 + 40]%asi, %l5 ;\ ldxa [%sp + V9BIAS64 + 48]%asi, %l6 ;\ ldxa [%sp + V9BIAS64 + 56]%asi, %l7 ;\ ldxa [%sp + V9BIAS64 + 64]%asi, %i0 ;\ ldxa [%sp + V9BIAS64 + 72]%asi, %i1 ;\ ldxa [%sp + V9BIAS64 + 80]%asi, %i2 ;\ ldxa [%sp + V9BIAS64 + 88]%asi, %i3 ;\ ldxa [%sp + V9BIAS64 + 96]%asi, %i4 ;\ ldxa [%sp + V9BIAS64 + 104]%asi, %i5 ;\ ldxa [%sp + V9BIAS64 + 112]%asi, %i6 ;\ ldxa [%sp + V9BIAS64 + 120]%asi, %i7 ;\ wrpr %g0, PSTATE_KERN, %pstate ! enable ints jmpl %l3, %o7 ! call trap handler ! disable interrupts and check for ASTs and wbuf restores ! keep cpu_base_spl in %l4 ldn [THREAD_REG + T_CPU], %l0 ld [%l0 + CPU_BASE_SPL], %l4 ldub [THREAD_REG + T_ASTFLAG], %l2 ld [%sp + STACK_BIAS + MPCB_WBCNT], %l3 ! call trap to do ast processing wrpr %g0, %l4, %pil ! pil = cpu_base_spl ! call restore_wbuf to push wbuf windows to stack wrpr %g0, %l4, %pil ! pil = cpu_base_spl TRACE_RTT(TT_SYS_RTT_USER, %l0, %l1, %l2, %l3) ld [%sp + STACK_BIAS + MPCB_WSTATE], %l3 ! get wstate ! restore user globals and outs wrpr %l1, PSTATE_IE, %pstate ! switch to global set 1, saving THREAD_REG in %l6 mov %sp, %g6 ! remember the mpcb pointer in %g6 ! set %pil from cpu_base_spl ! raise tl (now using nucleus context) ! set pcontext to scontext for user execution ldxa [%g1]ASI_MMU_CTX, %g2 stxa %g2, [%g1]ASI_MMU_CTX ! If shared context support is not enabled, then the next four ! instructions will be patched with nop instructions. .global sfmmu_shctx_user_rtt_patch sfmmu_shctx_user_rtt_patch: ldxa [%g1]ASI_MMU_CTX, %g2 stxa %g2, [%g1]ASI_MMU_CTX ! Ensure new ctxs take effect by the time the "retry" (below) completes ldx [%l7 + TSTATE_OFF], %l0 andn %l0, TSTATE_CWP, %g7 ! switch "other" windows back to "normal" windows and ! restore to window we originally trapped in add %l3, WSTATE_CLEAN_OFFSET, %l3 ! convert to "clean" wstate wrpr %g0, %g1, %canrestore ! First attempt to restore from the watchpoint saved register window clrn [%g6 + STACK_BIAS + MPCB_RSP0] clrn [%g6 + STACK_BIAS + MPCB_RSP0] ! test for user return window in pcb ldn [%g6 + STACK_BIAS + MPCB_RSP0], %g1 clrn [%g6 + STACK_BIAS + MPCB_RSP0] ! restore from user return window RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN0) ! Attempt to restore from the scond watchpoint saved register window clrn [%g6 + STACK_BIAS + MPCB_RSP1] ldn [%g6 + STACK_BIAS + MPCB_RSP1], %g1 clrn [%g6 + STACK_BIAS + MPCB_RSP1] RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN1) nop ! no trap, use restore directly wrpr %g1, %g7, %tstate ! needed by wbuf recovery code ! hand craft the restore to avoid getting to TL > 2 restore ! should not trap ! set %cleanwin to %canrestore ! set %tstate to the correct %cwp ! retry resumes user execution ldn [%g5 + CPU_THREAD], THREAD_REG wrpr %g0, PSTATE_KERN, %pstate ! enable ints jmpl %l3, %o7 ! call trap handler TRACE_RTT(TT_SYS_RTT_PROM, %l0, %l1, %l2, %l3) * THREAD_REG cannot be restored in fault_32bit_fn1 since * sun4v cannot safely lower %gl then raise it again. ldn [%l0 + CPU_THREAD], THREAD_REG SAVE_OUTS(%l7) ! for the call bug workaround wrpr %g0, PSTATE_KERN, %pstate ! enable ints jmpl %l3, %o7 ! call trap handler TRACE_RTT(TT_SYS_RTT_PRIV, %l0, %l1, %l2, %l3) ! Check for a kernel preemption request ldn [THREAD_REG + T_CPU], %l0 ldub [%l0 + CPU_KPRUNRUN], %l0 ldstub [THREAD_REG + T_PREEMPT_LK], %l0 ! load preempt lock .
asciz "sys_trap: %g4(%d) is lower than %pil(%d)" * sys_tl1_panic can be called by traps at tl1 which * really want to panic, but need the rearrangement of * the args as provided by this wrapper routine. * Flush all windows to memory, except for the one we entered in. * We do this by doing NWINDOW-2 saves then the same number of restores. * This leaves the WIM immediately before window entered in. * This is used for context switching. * flush user windows to memory. * Throw out any user windows in the register file. * Used by setregs (exec) to clean out old user. * Used by sigcleanup to remove extraneous windows when returning from a ! Also, this sets up a nice underflow when first returning to the wrpr %g2, PSTATE_IE, %pstate rdpr %otherwin, %g1 ! re-read in case of interrupt clr [%g5 + MPCB_WBCNT] ! zero window buffer cnt SET_SIZE(trash_user_windows) * Setup g7 via the CPU data structure. ta 72 ! no tbr, stop simulation * These need to be defined somewhere to lint and there is no "hicore.s"... #define PTL1_SAVE_WINDOW(RP) \ stxa %l0, [RP + RW64_LOCAL + (0 * RW64_LOCAL_INCR)] %asi; \ stxa %l1, [RP + RW64_LOCAL + (1 * RW64_LOCAL_INCR)] %asi; \ stxa %l2, [RP + RW64_LOCAL + (2 * RW64_LOCAL_INCR)] %asi; \ stxa %l3, [RP + RW64_LOCAL + (3 * RW64_LOCAL_INCR)] %asi; \ stxa %l4, [RP + RW64_LOCAL + (4 * RW64_LOCAL_INCR)] %asi; \ stxa %l5, [RP + RW64_LOCAL + (5 * RW64_LOCAL_INCR)] %asi; \ stxa %l6, [RP + RW64_LOCAL + (6 * RW64_LOCAL_INCR)] %asi; \ stxa %l7, [RP + RW64_LOCAL + (7 * RW64_LOCAL_INCR)] %asi; \ stxa %i0, [RP + RW64_IN + (0 * RW64_IN_INCR)] %asi; \ stxa %i1, [RP + RW64_IN + (1 * RW64_IN_INCR)] %asi; \ stxa %i2, [RP + RW64_IN + (2 * RW64_IN_INCR)] %asi; \ stxa %i3, [RP + RW64_IN + (3 * RW64_IN_INCR)] %asi; \ stxa %i4, [RP + RW64_IN + (4 * RW64_IN_INCR)] %asi; \ stxa %i5, [RP + RW64_IN + (5 * RW64_IN_INCR)] %asi; \ stxa %i6, [RP + RW64_IN + (6 * RW64_IN_INCR)] %asi; \ stxa %i7, [RP + RW64_IN + (7 * RW64_IN_INCR)] %asi #define PTL1_NEXT_WINDOW(scr) \ #define PTL1_RESET_RWINDOWS(scr) \ sethi %hi(nwin_minus_one), scr; \ ld [scr + %lo(nwin_minus_one)], scr; \ #define PTL1_DCACHE_LINE_SIZE 4 /* small enough for all CPUs */ * ptl1_panic is called when the kernel detects that it is in an invalid state * and the trap level is greater than 0. ptl1_panic is responsible to save the * current CPU state, to restore the CPU state to normal, and to call panic. * The CPU state must be saved reliably without causing traps. ptl1_panic saves * it in the ptl1_state structure, which is a member of the machcpu structure. * In order to access the ptl1_state structure without causing traps, physical * addresses are used so that we can avoid MMU miss traps. The restriction of * physical memory accesses is that the ptl1_state structure must be on a single * physical page. This is because (1) a single physical address for each * ptl1_state structure is needed and (2) it simplifies physical address * calculation for each member of the structure. * ptl1_panic is a likely spot for stack overflows to wind up; thus, the current * stack may not be usable. In order to call panic reliably in such a state, * each CPU needs a dedicated ptl1 panic stack. * CPU_ALLOC_SIZE, which is defined to be MMU_PAGESIZE, is used to allocate the * cpu structure and a ptl1 panic stack. They are put together on the same page * for memory space efficiency. The low address part is used for the cpu * structure, and the high address part is for a ptl1 panic stack. * The cpu_pa array holds the physical addresses of the allocated cpu structures, * as the cpu array holds their virtual addresses. * %g1 reason to be called ! increment the entry counter. ! save CPU state if this is the first entry. add %g2, CPU_PTL1, %g2 ! pstate = &CPU->mcpu.ptl1_state wr %g0, ASI_MEM, %asi ! physical address access ! pstate->ptl1_entry_count++ lduwa [%g2 + PTL1_ENTRY_COUNT] %asi, %g3 stuwa %g3, [%g2 + PTL1_ENTRY_COUNT] %asi ! CPU state saving is skipped from the 2nd entry to ptl1_panic since we ! do not want to clobber the state from the original failure. panic() ! is responsible for handling multiple or recursive panics. cmp %g3, 2 ! if (ptl1_entry_count >= 2) bge,pn %icc, state_saved ! goto state_saved add %g2, PTL1_REGS, %g3 ! %g3 = &pstate->ptl1_regs[0] ! save current global registers ! so that all them become available for use stxa %o1, [%g3 + PTL1_RWINDOW] %asi ! save %o1 stxa %o2, [%g3 + PTL1_RWINDOW + 8] %asi ! save %o2 stxa %o3, [%g3 + PTL1_RWINDOW + 16] %asi ! save %o3 add %g3, PTL1_GREGS, %o2 ! %o4 = &ptl1_gregs[0] stxa %o1, [%o2 + PTL1_GL] %asi stxa %g1, [%o2 + PTL1_G1] %asi stxa %g2, [%o2 + PTL1_G2] %asi stxa %g3, [%o2 + PTL1_G3] %asi stxa %g4, [%o2 + PTL1_G4] %asi stxa %g5, [%o2 + PTL1_G5] %asi stxa %g6, [%o2 + PTL1_G6] %asi stxa %g7, [%o2 + PTL1_G7] %asi add %o2, PTL1_GREGS_INCR, %o2 ! restore %g3, %o1, %o2 and %o3 ldxa [%g3 + PTL1_RWINDOW] %asi, %o1 ldxa [%g3 + PTL1_RWINDOW + 8] %asi, %o2 ldxa [%g3 + PTL1_RWINDOW + 16] %asi, %o3 ! %tl, %tt, %tstate, %tpc, %tnpc for each TL brz %g1, 1f ! if(trap_level == 0) -------+ add %g3, PTL1_TRAP_REGS, %g4 ! %g4 = &ptl1_trap_regs[0]; ! 0: ! -----------<----------+ ! stwa %g1, [%g4 + PTL1_TL] %asi ! ! stwa %g5, [%g4 + PTL1_TT] %asi ! ! stxa %g5, [%g4 + PTL1_TSTATE] %asi ! ! stxa %g5, [%g4 + PTL1_TPC] %asi ! ! stxa %g5, [%g4 + PTL1_TNPC] %asi ! ! add %g4, PTL1_TRAP_REGS_INCR, %g4 ! ! bnz,a,pt %icc, 0b ! if(trap_level != 0) --+ ! 1: ! ----------<----------------+ ! %pstate, %pil, SOFTINT, (S)TICK ! Pending interrupts is also cleared in order to avoid a recursive call ! to ptl1_panic in case the interrupt handler causes a panic. stba %g1, [%g3 + PTL1_PIL] %asi stha %g1, [%g3 + PTL1_PSTATE] %asi sta %g1, [%g3 + PTL1_SOFTINT] %asi sethi %hi(traptrace_use_stick), %g1 ld [%g1 + %lo(traptrace_use_stick)], %g1 2: stxa %g1, [%g3 + PTL1_TICK] %asi MMU_FAULT_STATUS_AREA(%g1) ldx [%g1 + MMFSA_D_TYPE], %g4 stxa %g4, [%g3 + PTL1_DMMU_TYPE] %asi ldx [%g1 + MMFSA_D_ADDR], %g4 stxa %g4, [%g3 + PTL1_DMMU_ADDR] %asi ldx [%g1 + MMFSA_D_CTX], %g4 stxa %g4, [%g3 + PTL1_DMMU_CTX] %asi ldx [%g1 + MMFSA_I_TYPE], %g4 stxa %g4, [%g3 + PTL1_IMMU_TYPE] %asi ldx [%g1 + MMFSA_I_ADDR], %g4 stxa %g4, [%g3 + PTL1_IMMU_ADDR] %asi ldx [%g1 + MMFSA_I_CTX], %g4 stxa %g4, [%g3 + PTL1_IMMU_CTX] %asi ! Save register window state and register windows. stba %g1, [%g3 + PTL1_CWP] %asi stba %g1, [%g3 + PTL1_WSTATE] %asi stba %g1, [%g3 + PTL1_OTHERWIN] %asi stba %g1, [%g3 + PTL1_CLEANWIN] %asi stba %g1, [%g3 + PTL1_CANSAVE] %asi stba %g1, [%g3 + PTL1_CANRESTORE] %asi add %g3, PTL1_RWINDOW, %g4 ! %g4 = &ptl1_rwindow[0]; 3: PTL1_SAVE_WINDOW(%g4) ! <-------------+ cmp %g1, %g2 ! saturation check PTL1_NEXT_WINDOW(%g4) ! ------+ ! most crucial CPU state was saved. ! Proceed to go back to TL = 0. wrpr %g0, WSTATE_KERN, %wstate ! Set pcontext to run kernel. stxa %g0, [%g1]ASI_MMU_CTX ptl1_panic_tl0: ! ----<-----+ TL:0 CPU_ADDR(%l0, %l1) ! %l0 = cpu[cpuid] add %l0, CPU_PTL1, %l1 ! %l1 = &CPU->mcpu.ptl1_state ! prepare to call panic() ldn [%l0 + CPU_THREAD], THREAD_REG ! restore %g7 ldn [%l1 + PTL1_STKTOP], %l2 ! %sp = ptl1_stktop sub %l2, SA(MINFRAME) + STACK_BIAS, %sp clr %fp ! no frame below this window ! enable limited interrupts wrpr %g0, CLOCK_LEVEL, %pil wrpr %g0, PSTATE_KERN, %pstate ba,pt %xcc, ptl1_panic_handler * ptl1_recurse() calls itself a number of times to either set up a known * stack or to cause a kernel stack overflow. It decrements the arguments * Initialize Out Registers to Known Values * Asm function to handle a cross trap to call ptl1_panic() #
endif /* PTL1_PANIC_DEBUG */ * The interface for a 32-bit client program that takes over the TBA * calling the 64-bit romvec OBP. .
asciz "hypervisor call 0x%x returned an unexpected error %d" * panic_bad_hcall is called when a hcall returns