interrupt.s revision bd28a477274db2b836577dfd6f223c696ad00720
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#if defined(lint)
#else /* lint */
#include "assym.h"
#endif /* lint */
#include <sys/asm_linkage.h>
#include <sys/machthread.h>
#include <sys/machcpuvar.h>
#ifdef TRAPTRACE
#include <sys/traptrace.h>
#endif /* TRAPTRACE */
#if defined(lint)
/* ARGSUSED */
void
pil_interrupt(int level)
{}
#else /* lint */
/*
* (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
* Register passed from LEVEL_INTERRUPT(level)
* %g4 - interrupt request level
*/
!
!
!
!
0:
1:
2:
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
!
!
! iv->iv_pil_xnext
!
!
!
!
3:
#endif /* lint */
#ifndef lint
.asciz "!interrupt 0x%x at level %d not serviced"
/*
* SERVE_INTR_PRE is called once, just before the first invocation
* of SERVE_INTR.
*
* Registers on entry:
*
* iv_p, cpu, regs: may be out-registers
* ls1, ls2: local scratch registers
* os1, os2, os3: scratch registers, may be out
*/
/*
* SERVE_INTR is called immediately after either SERVE_INTR_PRE or
* SERVE_INTR_NEXT, without intervening code. No register values
* may be modified.
*
* After calling SERVE_INTR, the caller must check if os3 is set. If
* so, there is another interrupt to process. The caller must call
* SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
*
* Before calling SERVE_INTR_NEXT, the caller may perform accounting
* and other actions which need to occur after invocation of an interrupt
* handler. However, the values of ls1 and os3 *must* be preserved and
* passed unmodified into SERVE_INTR_NEXT.
*
* Registers on return from SERVE_INTR:
*
* ls1 - the pil just processed
* ls2 - the pointer to intr_vec_t (iv) just processed
* os3 - if set, another interrupt needs to be processed
* cpu, ls1, os3 - must be preserved if os3 is set
*/
/*
* Registers on entry:
*
* cpu - cpu pointer (clobbered, set to cpu upon completion)
* ls1, os3 - preserved from prior call to SERVE_INTR
* ls2 - local scratch reg (not preserved)
* os1, os2, os4, os5 - scratch reg, can be out (not preserved)
*/
#ifdef TRAPTRACE
/*
* inum - not modified, _spurious depends on it.
*/
#else /* TRAPTRACE */
#endif /* TRAPTRACE */
#ifdef TRAPTRACE
/*
* inum - not modified, _spurious depends on it.
*/
#else /* TRAPTRACE */
#endif /* TRAPTRACE */
#endif /* lint */
#if defined(lint)
/*ARGSUSED*/
void
{}
#else /* lint */
#define INTRCNT_LIMIT 16
/*
* Handle an interrupt in a new thread.
* Entry:
* %o0 = pointer to regs structure
* %o1 = pointer to current intr_vec_t (iv) to be processed
* %o2 = pil
* %sp = on current thread's kernel stack
* %o7 = return linkage to trap code
* %g7 = current thread
* %pstate = normal globals, interrupts enabled,
* privileged, fp disabled
* %pil = DISP_LEVEL
*
* Register Usage
* %l0 = return linkage
* %l1 = pil
* %l2 - %l3 = scratch
* %l4 - %l7 = reserved for sys_trap
* %o2 = cpu
* %o3 = intr thread
* %o0 = scratch
* %o4 - %o5 = scratch
*/
!
!
0:
! swtch() after its slice has been accounted for.
! Only account for the time slice if the starting timestamp is non-zero.
RD_CLOCK_TICK(%o4,%l2,%l3,__LINE__)
sub %o4, %o3, %o4 ! o4 has interval
! A high-level interrupt in current_thread() interrupting here
! starting timestamp, calculate the interval with %tick, and zero
! its starting timestamp.
! To do this, we do a casx on the t_intr_start field, and store 0 to it.
! If it has changed since we loaded it above, we need to re-compute the
! interval, since a changed t_intr_start implies current_thread placed
! a new, later timestamp there after running a high-level interrupt,
! and the %tick val in %o4 had become stale.
mov %g0, %l2
casx [%o5], %o3, %l2
! If %l2 == %o3, our casx was successful. If not, the starting timestamp
! changed between loading it (after label 0b) and computing the
! interval above.
cmp %l2, %o3
bne,pn %xcc, 0b
! Check for Energy Star mode
lduh [%o2 + CPU_DIVISOR], %l2 ! delay -- %l2 = clock divisor
cmp %l2, 1
bg,a,pn %xcc, 2f
mulx %o4, %l2, %o4 ! multiply interval by clock divisor iff > 1
2:
! We now know that a valid interval for the interrupted interrupt
! thread is in %o4. Update its cumulative counter.
ldub [THREAD_REG + T_PIL], %l3 ! load PIL
sllx %l3, 4, %l3 ! convert PIL index to byte offset
add %l3, CPU_MCPU, %l3 ! CPU_INTRSTAT is too big for use
add %l3, MCPU_INTRSTAT, %l3 ! as const, add offsets separately
ldx [%o2 + %l3], %o5 ! old counter in o5
add %o5, %o4, %o5 ! new counter in o5
stx %o5, [%o2 + %l3] ! store new counter
! Also update intracct[]
lduh [%o2 + CPU_MSTATE], %l3
sllx %l3, 3, %l3
add %l3, CPU_INTRACCT, %l3
add %l3, %o2, %l3
0:
ldx [%l3], %o5
add %o5, %o4, %o3
casx [%l3], %o5, %o3
cmp %o5, %o3
bne,pn %xcc, 0b
nop
1:
!
! Get set to run interrupt thread.
! There should always be an interrupt thread since we allocate one
! for each level on the CPU.
!
! Note that the code in kcpc_overflow_intr -relies- on the ordering
! of events here -- in particular that t->t_lwp of the interrupt thread
! is set to the pinned thread *before* curthread is changed.
!
ldn [%o2 + CPU_INTR_THREAD], %o3 ! interrupt thread pool
ldn [%o3 + T_LINK], %o4 ! unlink thread from CPU's list
!
!
ld [%o2 + CPU_INTR_ACTV], %o5
mov 1, %o4
sll %o4, %l1, %o4
#ifdef DEBUG
!
! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
!
andcc %o5, %o4, %g0
bz,pt %xcc, 0f
nop
! Do not call panic if a panic is already in progress.
sethi %hi(panic_quiesce), %l2
ld [%l2 + %lo(panic_quiesce)], %l2
brnz,pn %l2, 0f
nop
sethi %hi(intr_thread_actv_bit_set), %o0
call panic
or %o0, %lo(intr_thread_actv_bit_set), %o0
0:
#endif /* DEBUG */
or %o5, %o4, %o5
st %o5, [%o2 + CPU_INTR_ACTV]
!
! Consider the new thread part of the same LWP so that
! window overflow code can find the PCB.
!
ldn [THREAD_REG + T_LWP], %o4
stn %o4, [%o3 + T_LWP]
!
! Threads on the interrupt thread free list could have state already
!
!
! resume may use that stack between threads.
!
stn %o7, [THREAD_REG + T_PC] ! mark pc for resume
stn %sp, [THREAD_REG + T_SP] ! mark stack for resume
stn THREAD_REG, [%o3 + T_INTR] ! push old thread
stn %o3, [%o2 + CPU_THREAD] ! set new thread
mov %o3, THREAD_REG ! set global curthread register
ldn [%o3 + T_STACK], %o4 ! interrupt stack pointer
sub %o4, STACK_BIAS, %sp
!
! Initialize thread priority level from intr_pri
!
sethi %hi(intr_pri), %o4
ldsh [%o4 + %lo(intr_pri)], %o4 ! grab base interrupt priority
add %l1, %o4, %o4 ! convert level to dispatch priority
sth %o4, [THREAD_REG + T_PRI]
stub %l1, [THREAD_REG + T_PIL] ! save pil for intr_passivate
! Store starting timestamp in thread structure.
add THREAD_REG, T_INTR_START, %o3
1:
ldx [%o3], %o5
RD_CLOCK_TICK(%o4,%l2,%l3,__LINE__)
casx [%o3], %o5, %o4
cmp %o4, %o5
! If a high-level interrupt occurred while we were attempting to store
! the timestamp, try again.
bne,pn %xcc, 1b
nop
wrpr %g0, %l1, %pil ! lower %pil to new level
!
! Fast event tracing.
!
ld [%o2 + CPU_FTRACE_STATE], %o4 ! %o2 = curthread->t_cpu
btst FTRACE_ENABLED, %o4
be,pt %icc, 1f ! skip if ftrace disabled
mov %l1, %o5
!
! Tracing is enabled - write the trace entry.
!
save %sp, -SA(MINFRAME), %sp
set ftrace_intr_thread_format_str, %o0
mov %i0, %o1
mov %i1, %o2
mov %i5, %o3
call ftrace_3
ldn [%i0 + PC_OFF], %o4
restore
1:
!
! call the handler
!
SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
!
! %o0 and %o1 are now available as scratch registers.
!
0:
SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
!
! If %o3 is set, we must call serve_intr_next, and both %l1 and %o3
! must be preserved. %l1 holds our pil, %l3 holds our inum.
!
! blocked.
!
!
!
1:
#ifdef DEBUG
9:
#endif /* DEBUG */
!
!
!
!
!
2:
!
! we're updating. Need to use casx.
!
sllx %l1, 4, %o1 ! delay - PIL as byte offset
add %o1, CPU_MCPU, %o1 ! CPU_INTRSTAT const too big
add %o1, MCPU_INTRSTAT, %o1 ! add parts separately
add %o1, %o2, %o1
1:
ldx [%o1], %o5 ! old counter in o5
add %o5, %l2, %o0 ! new counter in o0
stx %o0, [%o1 + 8] ! store into intrstat[pil][1]
casx [%o1], %o5, %o0 ! and into intrstat[pil][0]
cmp %o5, %o0
bne,pn %xcc, 1b
nop
! Also update intracct[]
lduh [%o2 + CPU_MSTATE], %o1
sllx %o1, 3, %o1
add %o1, CPU_INTRACCT, %o1
add %o1, %o2, %o1
1:
ldx [%o1], %o5
add %o5, %l2, %o0
casx [%o1], %o5, %o0
cmp %o5, %o0
bne,pn %xcc, 1b
nop
!
! we've crossed the threshold and we should unpin the pinned threads
! by preempt()ing ourselves, which will bubble up the t_intr chain
! until hitting the non-interrupt thread, which will then in turn
! preempt itself allowing the interrupt processing to resume. Finally,
! the scheduler takes over and picks the next thread to run.
!
! If our CPU is quiesced, we cannot preempt because the idle thread
! blocked.
!
! algorithm. Just check for cpu_kprunrun, and if set then preempt.
! This insures we enter the scheduler if a higher-priority thread
! has become runnable.
!
ldub [%o2 + CPU_INTRCNT], %o5 ! delay - %o5 = cpu_intrcnt
inc %o5
cmp %o5, INTRCNT_LIMIT ! have we hit the limit?
bl,a,pt %xcc, 1f ! no preempt if < INTRCNT_LIMIT
stub %o5, [%o2 + CPU_INTRCNT] ! delay annul - inc CPU_INTRCNT
!
! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
!
mov 1, %o4 ! delay
stub %o4, [%o2 + CPU_KPRUNRUN]
ldx [%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
inc %o4
stx %o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
ba 2f
stub %o5, [%o2 + CPU_INTRCNT] ! delay
3:
! Code for t_intr == NULL
ldub [%o2 + CPU_KPRUNRUN], %o5
2:
1:
!
!
!
!
0:
!
!
mov 1, %o4
sll %o4, %l1, %o4
#ifdef DEBUG
!
! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
!
andcc %o4, %o5, %g0
bnz,pt %xcc, 0f
nop
! Do not call panic if a panic is already in progress.
sethi %hi(panic_quiesce), %l2
ld [%l2 + %lo(panic_quiesce)], %l2
brnz,pn %l2, 0f
nop
sethi %hi(intr_thread_actv_bit_not_set), %o0
call panic
or %o0, %lo(intr_thread_actv_bit_not_set), %o0
0:
#endif /* DEBUG */
andn %o5, %o4, %o5
st %o5, [%o2 + CPU_INTR_ACTV]
!
! If there is still an interrupted thread underneath this one,
! then the interrupt was never blocked and the return is fairly
! simple. Otherwise jump to intr_thread_exit.
!
ldn [THREAD_REG + T_INTR], %o4 ! pinned thread
brz,pn %o4, intr_thread_exit ! branch if none
nop
!
! link the thread back onto the interrupt thread pool
!
ldn [%o2 + CPU_INTR_THREAD], %o3
stn %o3, [THREAD_REG + T_LINK]
stn THREAD_REG, [%o2 + CPU_INTR_THREAD]
!
!
!
!
0:
1:
! are pending, zero out cpu_intrcnt
ldn [THREAD_REG + T_INTR], %o4
brnz,pn %o4, 2f
rd SOFTINT, %o4 ! delay
set SOFTINT_MASK, %o5
andcc %o4, %o5, %g0
bz,a,pt %xcc, 2f
stub %g0, [%o2 + CPU_INTRCNT] ! delay annul
2:
jmp %l0 + 8
nop
SET_SIZE(intr_thread)
/* Not Reached */
!
! An interrupt returned on what was once (and still might be)
! an interrupt thread stack, but the interrupted process is no longer
! there. This means the interrupt must have blocked.
!
! There is no longer a thread under this one, so put this thread back
!
!
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
!
!
!
!
#ifdef DEBUG
!
! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
!
ld [%o2 + CPU_INTR_ACTV], %o5
mov 1, %o4
sll %o4, %l1, %o4
and %o5, %o4, %o4
brz,pt %o4, 0f
nop
! Do not call panic if a panic is already in progress.
sethi %hi(panic_quiesce), %l2
ld [%l2 + %lo(panic_quiesce)], %l2
brnz,pn %l2, 0f
nop
sethi %hi(intr_thread_exit_actv_bit_set), %o0
call panic
or %o0, %lo(intr_thread_exit_actv_bit_set), %o0
0:
#endif /* DEBUG */
!
!
mov TS_FREE, %o4
st %o4, [THREAD_REG + T_STATE]
!
! Put thread on either the interrupt pool or the free pool and
! call swtch() to resume another thread.
!
ldn [%o2 + CPU_INTR_THREAD], %o5 ! get list pointer
stn %o5, [THREAD_REG + T_LINK]
call swtch ! switch to best thread
stn THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
ba,a,pt %xcc, . ! swtch() shouldn't return
.asciz "intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
#ifdef DEBUG
.asciz "intr_thread(): cpu_intr_actv bit already set for PIL"
.asciz "intr_thread(): cpu_intr_actv bit not set for PIL"
.asciz "intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
.asciz "intr_thread(): t_intr_start zero upon handler return"
#endif /* DEBUG */
#endif /* lint */
#if defined(lint)
/*
* Handle an interrupt in the current thread
* Entry:
* %o0 = pointer to regs structure
* %o1 = pointer to current intr_vec_t (iv) to be processed
* %o2 = pil
* %sp = on current thread's kernel stack
* %o7 = return linkage to trap code
* %g7 = current thread
* %pstate = normal globals, interrupts enabled,
* privileged, fp disabled
* %pil = PIL_MAX
*
* Register Usage
* %l0 = return linkage
* %l1 = old stack
* %l2 - %l3 = scratch
* %l4 - %l7 = reserved for sys_trap
* %o3 = cpu
* %o0 = scratch
* %o4 - %o5 = scratch
*/
/* ARGSUSED */
void
{}
#else /* lint */
!
!
6: ld [%o3 + CPU_INTR_ACTV], %o5 ! o5 has cpu_intr_actv b4 chng
mov 1, %o4
sll %o4, %o2, %o4 ! construct mask for level
#ifdef DEBUG
!
! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
!
andcc %o5, %o4, %g0
bz,pt %xcc, 0f
nop
! Do not call panic if a panic is already in progress.
sethi %hi(panic_quiesce), %l2
ld [%l2 + %lo(panic_quiesce)], %l2
brnz,pn %l2, 0f
nop
sethi %hi(current_thread_actv_bit_set), %o0
call panic
or %o0, %lo(current_thread_actv_bit_set), %o0
0:
#endif /* DEBUG */
or %o5, %o4, %o4
!
! See if we are interrupting another high-level interrupt.
!
srl %o5, LOCK_LEVEL + 1, %o5 ! only look at high-level bits
brz,pt %o5, 1f
st %o4, [%o3 + CPU_INTR_ACTV] ! delay - store active mask
!
! We have interrupted another high-level interrupt. Find its PIL,
! compute the interval it ran for, and update its cumulative counter.
!
! Register usage:
! o2 = PIL of this interrupt
! o5 = high PIL bits of INTR_ACTV (not including this PIL)
! l1 = bitmask used to find other active high-level PIL
! o4 = index of bit set in l1
! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
! interrupted high-level interrupt.
! Create mask for cpu_intr_actv. Begin by looking for bits set
! at one level below the current PIL. Since %o5 contains the active
! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
! at bit (current_pil - (LOCK_LEVEL + 2)).
sub %o2, LOCK_LEVEL + 2, %o4
mov 1, %l1
sll %l1, %o4, %l1
2:
#ifdef DEBUG
! Don't panic if a panic is already in progress.
sethi %hi(panic_quiesce), %l3
ld [%l3 + %lo(panic_quiesce)], %l3
brnz,pn %l3, 9f
nop
sethi %hi(current_thread_nested_PIL_not_found), %o0
call panic
or %o0, %lo(current_thread_nested_PIL_not_found), %o0
9:
#endif /* DEBUG */
andcc %l1, %o5, %g0 ! test mask against high-level bits of
bnz %xcc, 3f ! cpu_intr_actv
nop
srl %l1, 1, %l1 ! No match. Try next lower PIL.
ba,pt %xcc, 2b
sub %o4, 1, %o4 ! delay - decrement PIL
3:
sll %o4, 3, %o4 ! index to byte offset
add %o4, CPU_MCPU, %l1 ! CPU_PIL_HIGH_START is too large
add %l1, MCPU_PIL_HIGH_START, %l1
ldx [%o3 + %l1], %l3 ! load starting timestamp
#ifdef DEBUG
brnz,pt %l3, 9f
nop
9:
#endif /* DEBUG */
!
!
2:
!
!
1:
! interrupting a low level interrupt thread. If so, compute its interval
! and update its cumulative counter.
lduh [THREAD_REG + T_FLAGS], %o4
andcc %o4, T_INTR_THREAD, %g0
bz,pt %xcc, 4f
nop
! We have interrupted an interrupt thread. Take timestamp, compute
! interval, update cumulative counter.
! Check t_intr_start. If it is zero, either intr_thread() or
! current_thread() (at a lower PIL, of course) already did
! the accounting for the underlying interrupt thread.
ldx [THREAD_REG + T_INTR_START], %o5
brz,pn %o5, 4f
nop
stx %g0, [THREAD_REG + T_INTR_START]
RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
sub %o4, %o5, %o5 ! o5 has the interval
! Check for Energy Star mode
lduh [%o3 + CPU_DIVISOR], %o4 ! %o4 = clock divisor
cmp %o4, 1
bg,a,pn %xcc, 2f
mulx %o5, %o4, %o5 ! multiply interval by clock divisor iff > 1
2:
ldub [THREAD_REG + T_PIL], %o4
sllx %o4, 4, %o4 ! PIL index to byte offset
add %o4, CPU_MCPU, %o4 ! CPU_INTRSTAT const too large
add %o4, MCPU_INTRSTAT, %o4 ! add parts separately
ldx [%o3 + %o4], %l2 ! old counter in l2
add %l2, %o5, %l2 ! new counter in l2
stx %l2, [%o3 + %o4] ! store new counter
! Also update intracct[]
lduh [%o3 + CPU_MSTATE], %o4
sllx %o4, 3, %o4
add %o4, CPU_INTRACCT, %o4
ldx [%o3 + %o4], %l2
add %l2, %o5, %l2
stx %l2, [%o3 + %o4]
4:
!
! Handle high-level interrupts on separate interrupt stack.
! No other high-level interrupts are active, so switch to int stack.
!
mov %sp, %l1
ldn [%o3 + CPU_INTR_STACK], %l3
sub %l3, STACK_BIAS, %sp
5:
#ifdef DEBUG
!
! ASSERT(%o2 > LOCK_LEVEL)
!
cmp %o2, LOCK_LEVEL
bg,pt %xcc, 3f
nop
mov CE_PANIC, %o0
sethi %hi(current_thread_wrong_pil), %o1
call cmn_err ! %o2 has the %pil already
or %o1, %lo(current_thread_wrong_pil), %o1
#endif
3:
! Store starting timestamp for this PIL in CPU structure at
! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
sub %o2, LOCK_LEVEL + 1, %o4 ! convert PIL to array index
sllx %o4, 3, %o4 ! index to byte offset
add %o4, CPU_MCPU, %o4 ! CPU_PIL_HIGH_START is too large
add %o4, MCPU_PIL_HIGH_START, %o4
RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o5, %l2)
stx %o5, [%o3 + %o4]
wrpr %g0, %o2, %pil ! enable interrupts
!
! call the handler
!
SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1:
SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
brz,a,pt %o2, 0f ! if %o2, more intrs await
rdpr %pil, %o2 ! delay annulled
SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
ba 1b
nop
0:
wrpr %g0, PIL_MAX, %pil ! disable interrupts (1-15)
cmp %o2, PIL_15
bne,pt %xcc, 3f
nop
sethi %hi(cpc_level15_inum), %o1
ldx [%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
brz %o1, 3f
nop
rdpr %pstate, %g5
andn %g5, PSTATE_IE, %g1
wrpr %g0, %g1, %pstate ! Disable vec interrupts
call intr_enqueue_req ! preserves %g5
mov PIL_15, %o0
! clear perfcntr overflow
mov 1, %o0
sllx %o0, PIL_15, %o0
wr %o0, CLEAR_SOFTINT
wrpr %g0, %g5, %pstate ! Enable vec interrupts
3:
cmp %o2, PIL_14
be tick_rtt ! cpu-specific tick processing
nop
.global current_thread_complete
current_thread_complete:
!
! Register usage:
!
! %l1 = stack pointer
! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
! %o2 = PIL
! %o3 = CPU pointer
! %o4, %o5, %l3, %l4, %l5 = scratch
!
ldn [THREAD_REG + T_CPU], %o3
!
!
#ifdef DEBUG
!
!
0:
#endif /* DEBUG */
#ifdef DEBUG
! Don't panic if a panic is already in progress.
sethi %hi(panic_quiesce), %l2
ld [%l2 + %lo(panic_quiesce)], %l2
brnz,pn %l2, 9f
nop
sethi %hi(current_thread_timestamp_zero), %o0
call panic
or %o0, %lo(current_thread_timestamp_zero), %o0
9:
#endif /* DEBUG */
stx %g0, [%o3 + %o4]
sub %o5, %o0, %o5 ! interval in o5
! Check for Energy Star mode
lduh [%o3 + CPU_DIVISOR], %o4 ! %o4 = clock divisor
cmp %o4, 1
bg,a,pn %xcc, 2f
mulx %o5, %o4, %o5 ! multiply interval by clock divisor iff > 1
2:
sllx %o2, 4, %o4 ! PIL index to byte offset
add %o4, CPU_MCPU, %o4 ! CPU_INTRSTAT too large
add %o4, MCPU_INTRSTAT, %o4 ! add parts separately
ldx [%o3 + %o4], %o0 ! old counter in o0
add %o0, %o5, %o0 ! new counter in o0
stx %o0, [%o3 + %o4] ! store new counter
! Also update intracct[]
lduh [%o3 + CPU_MSTATE], %o4
sllx %o4, 3, %o4
add %o4, CPU_INTRACCT, %o4
ldx [%o3 + %o4], %o0
add %o0, %o5, %o0
stx %o0, [%o3 + %o4]
!
! get back on current thread's stack
!
!
!
!
!
!
1:
#ifdef DEBUG
brnz,pt %l1, 9f
nop
sethi %hi(current_thread_nested_PIL_not_found), %o0
call panic
or %o0, %lo(current_thread_nested_PIL_not_found), %o0
9:
#endif /* DEBUG */
andcc %l1, %l2, %g0 ! test mask against high-level bits of
bnz %xcc, 2f ! cpu_intr_actv
nop
srl %l1, 1, %l1 ! No match. Try next lower PIL.
ba,pt %xcc, 1b
sub %o5, 1, %o5 ! delay - decrement PIL
2:
sll %o5, 3, %o5 ! convert array index to byte offset
add %o5, CPU_MCPU, %o5 ! CPU_PIL_HIGH_START is too large
add %o5, MCPU_PIL_HIGH_START, %o5
RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
! Another high-level interrupt is active below this one, so
! there is no need to check for an interrupt thread. That will be
! done by the lowest priority high-level interrupt active.
ba,pt %xcc, 7f
stx %o4, [%o3 + %o5] ! delay - store timestamp
3:
7:
8:
#ifdef DEBUG
.asciz "current_thread: unexpected pil level: %d"
.asciz "current_thread(): cpu_intr_actv bit already set for PIL"
.asciz "current_thread(): cpu_intr_actv bit not set for PIL"
.asciz "current_thread(): timestamp zero for nested PIL %d"
.asciz "current_thread(): timestamp zero upon handler return"
.asciz "current_thread: couldn't find nested high-level PIL"
#endif /* DEBUG */
#endif /* lint */
/*
* Return a thread's interrupt level.
* Since this isn't saved anywhere but in %l4 on interrupt entry, we
* must dig it out of the save area.
*
* Caller 'swears' that this really is an interrupt thread.
*
* int
* intr_level(t)
* kthread_id_t t;
*/
#if defined(lint)
/* ARGSUSED */
int
{ return (0); }
#else /* lint */
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
int
{ return (0); }
#else /* lint */
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
enable_pil_intr(int pil_save)
{}
#else /* lint */
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
disable_vec_intr(void)
{ return (0); }
#else /* lint */
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
{}
#else /* lint */
#endif /* lint */
#if defined(lint)
void
cbe_level14(void)
{}
#else /* lint */
!
!
2:
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
{}
#else /* lint */
!
! set %set_softint.
!
! Register usage
! %i0 - pointer to intr_vec_t (iv)
! %l2 - requested pil
! %l4 - cpu
! %l5 - pstate
! %l1, %l3, %l6 - temps
!
! check if a softint is pending for this softint,
!
!
!
sll %l2, CPTRSHIFT, %l0 ! %l0 = offset to pil entry
add %l4, INTR_TAIL, %l6 ! %l6 = &cpu->m_cpu.intr_tail
ldn [%l6 + %l0], %l1 ! %l1 = cpu->m_cpu.intr_tail[pil]
! current tail (ct)
brz,pt %l1, 2f ! branch if current tail is NULL
stn %i0, [%l6 + %l0] ! make intr_vec_t (iv) as new tail
!
!
1:
!
!
2:
!
!
3:
!
!
4:
#endif /* lint */
#if defined(lint)
/*ARGSUSED*/
void
{}
#else /* lint */
!
! Arguments:
!
! Internal:
!
!
! set %set_softint.
!
CPU_ADDR(%g4, %g2) ! %g4 = cpu
lduh [%g1 + IV_PIL], %g2 ! %g2 = iv->iv_pil
!
!
!
! there's pending intr_vec_t already
!
lduh [%g5 + IV_FLAGS], %g6 ! %g6 = ct->iv_flags
and %g6, IV_SOFTINT_MT, %g6 ! %g6 = ct->iv_flags & IV_SOFTINT_MT
brz,pt %g6, 0f ! check for Multi target softint flag
add %g5, IV_PIL_NEXT, %g3 ! %g3 = &ct->iv_pil_next
ld [%g4 + CPU_ID], %g6 ! for multi target softint, use cpuid
sll %g6, CPTRSHIFT, %g6 ! calculate offset address from cpuid
add %g3, %g6, %g3 ! %g3 = &ct->iv_xpil_next[cpuid]
0:
!
! update old tail
!
ba,pt %xcc, 2f
stn %g1, [%g3] ! [%g3] = iv, set pil_next field
1:
!
! no pending intr_vec_t; make intr_vec_t as new head
!
add %g4, INTR_HEAD, %g6 ! %g6 = &cpu->m_cpu.intr_head[pil]
stn %g1, [%g6 + %g7] ! cpu->m_cpu.intr_head[pil] = iv
2:
#ifdef TRAPTRACE
TRACE_PTR(%g5, %g6)
GET_TRACE_TICK(%g6, %g3)
stxa %g6, [%g5 + TRAP_ENT_TICK]%asi ! trap_tick = %tick
TRACE_SAVE_TL_GL_REGS(%g5, %g6)
rdpr %tt, %g6
stha %g6, [%g5 + TRAP_ENT_TT]%asi ! trap_type = %tt
rdpr %tpc, %g6
stna %g6, [%g5 + TRAP_ENT_TPC]%asi ! trap_pc = %tpc
rdpr %tstate, %g6
stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
stna %sp, [%g5 + TRAP_ENT_SP]%asi ! trap_sp = %sp
stna %g1, [%g5 + TRAP_ENT_TR]%asi ! trap_tr = iv
ldn [%g1 + IV_PIL_NEXT], %g6 !
stna %g6, [%g5 + TRAP_ENT_F1]%asi ! trap_f1 = iv->iv_pil_next
add %g4, INTR_HEAD, %g6
ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_head[pil]
stna %g6, [%g5 + TRAP_ENT_F2]%asi ! trap_f2 = intr_head[pil]
add %g4, INTR_TAIL, %g6
ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_tail[pil]
stna %g6, [%g5 + TRAP_ENT_F3]%asi ! trap_f3 = intr_tail[pil]
stna %g2, [%g5 + TRAP_ENT_F4]%asi ! trap_f4 = pil
TRACE_NEXT(%g5, %g6, %g3)
#endif /* TRAPTRACE */
!
! Write %set_softint with (1<<pil) to cause a "pil" level trap
!
mov 1, %g5 ! %g5 = 1
sll %g5, %g2, %g5 ! %g5 = 1 << pil
wr %g5, SET_SOFTINT ! trigger required pil softint
retry
SET_SIZE(setsoftint_tl1)
#endif /* lint */
#if defined(lint)
/*ARGSUSED*/
void
setvecint_tl1(uint64_t inum, uint64_t dummy)
{}
#else /* lint */
!
! Register usage
! Arguments:
! %g1 - inumber
!
! Internal:
! %g1 - softint pil mask
! %g2 - pil of intr_vec_t
! %g3 - pointer to current intr_vec_t (iv)
! %g4 - cpu
! %g5, %g6,%g7 - temps
!
ENTRY_NP(setvecint_tl1)
!
! Verify the inumber received (should be inum < MAXIVNUM).
!
set MAXIVNUM, %g2
cmp %g1, %g2
bgeu,pn %xcc, .no_ivintr
clr %g2 ! expected in .no_ivintr
!
! Fetch data from intr_vec_table according to the inum.
!
! We have an interrupt number. Fetch the interrupt vector requests
! from the interrupt vector table for a given interrupt number and
!
!
!
!
0:
!
!
! At this point:
! %g1 = softint pil mask
! %g3 = pointer to next intr_vec_t (iv)
! %g4 = cpu
!
lduh [%g3 + IV_PIL], %g2 ! %g2 = iv->iv_pil
sll %g2, CPTRSHIFT, %g7 ! %g7 = offset to pil entry
add %g4, INTR_TAIL, %g6 ! %g6 = &cpu->m_cpu.intr_tail
ldn [%g6 + %g7], %g5 ! %g5 = cpu->m_cpu.intr_tail[pil]
! current tail (ct)
brz,pt %g5, 2f ! branch if current tail is NULL
stn %g3, [%g6 + %g7] ! make intr_vec_t (iv) as new tail
! cpu->m_cpu.intr_tail[pil] = iv
!
!
1:
!
!
2:
!
!
3:
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
#endif /* lint */
#if defined(lint)
/*ARGSUSED*/
void
{}
#else
#endif /* lint */
#if defined(lint)
/*ARGSUSED*/
void
{}
#else /* lint */
/*
* intr_enqueue_req
*
* %o0 - pil
* %o1 - pointer to intr_vec_t (iv)
* %o5 - preserved
* %g5 - preserved
*/
!
!
!
sll %o0, CPTRSHIFT, %o0 ! %o0 = offset to pil entry
add %g4, INTR_TAIL, %g6 ! %g6 = &cpu->m_cpu.intr_tail
ldn [%o0 + %g6], %g1 ! %g1 = cpu->m_cpu.intr_tail[pil]
! current tail (ct)
brz,pt %g1, 2f ! branch if current tail is NULL
stn %o1, [%g6 + %o0] ! make intr_vec_t (iv) as new tail
!
!
1:
!
!
2:
!
!
add %g4, INTR_HEAD, %g6 ! %g6 = &cpu->m_cpu.intr_head[pil]
stn %o1, [%g6 + %o0] ! cpu->m_cpu.intr_head[pil] = iv
3:
retl
nop
SET_SIZE(intr_enqueue_req)
#endif /* lint */
/*
*/
#if defined(lint)
void
set_base_spl(void)
{}
#else /* lint */
/*
* WARNING: non-standard callinq sequence; do not call from C
* %o2 = pointer to CPU
* %o5 = updated CPU_INTR_ACTV
*/
!
!
add %o3, 11-1, %o3 ! delay - add bit number - 1
sra %o5, 6, %o3 ! test bits 10-6
tst %o3
ldub [%o1 + %o3], %o3
bnz,a,pn %xcc, 1f
add %o3, 6-1, %o3
sra %o5, 1, %o3 ! test bits 5-1
ldub [%o1 + %o3], %o3
!
! highest interrupt level number active is in %l6
!
1:
retl
st %o3, [%o2 + CPU_BASE_SPL] ! delay - store base priority
SET_SIZE(set_base_spl)
/*
* Table that finds the most significant bit set in a five bit field.
*/
.align 4
#endif /* lint */
/*
* int
* intr_passivate(from, to)
* kthread_id_t from; interrupt thread
* kthread_id_t to; interrupted thread
*/
#if defined(lint)
/* ARGSUSED */
int
{ return (0); }
#else /* lint */
!
!
!
! thread's stack, pointed to by %l7 in the save area just loaded.
!
ldn [%i1 + T_SP], %i3 ! get stack save area pointer
stn %l0, [%i3 + STACK_BIAS + (0*GREGSIZE)] ! save locals
stn %l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
stn %l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
stn %l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
stn %l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
stn %l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
stn %l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
stn %l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
stn %o0, [%i3 + STACK_BIAS + (8*GREGSIZE)] ! save ins using outs
stn %o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
stn %o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
stn %o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
stn %o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
stn %o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
stn %i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
! fp, %i7 copied using %i4
stn %i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
stn %g0, [%i2 + ((8+6)*GREGSIZE)]
! clear fp in save area
! load saved pil for return
ldub [%i0 + T_PIL], %i0
ret
restore
SET_SIZE(intr_passivate)
#endif /* lint */
#if defined(lint)
/*
* intr_get_time() is a resource for interrupt handlers to determine how
* much time has been spent handling the current interrupt. Such a function
* is needed because higher level interrupts can arrive during the
* processing of an interrupt, thus making direct comparisons of %tick by
* the handler inaccurate. intr_get_time() only returns time spent in the
* current interrupt handler.
*
* The caller must be calling from an interrupt handler running at a pil
* below or at lock level. Timings are not provided for high-level
* interrupts.
*
* The first time intr_get_time() is called while handling an interrupt,
* it returns the time since the interrupt handler was invoked. Subsequent
* calls will return the time since the prior call to intr_get_time(). Time
* is returned as ticks, adjusted for any clock divisor due to power
* management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
* not be the same across CPUs.
*
* Theory Of Intrstat[][]:
*
* uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
* uint64_ts per pil.
*
* intrstat[pil][0] is a cumulative count of the number of ticks spent
* handling all interrupts at the specified pil on this CPU. It is
* exported via kstats to the user.
*
* intrstat[pil][1] is always a count of ticks less than or equal to the
* value in [0]. The difference between [1] and [0] is the value returned
* by a call to intr_get_time(). At the start of interrupt processing,
* [0] and [1] will be equal (or nearly so). As the interrupt consumes
* time, [0] will increase, but [1] will remain the same. A call to
* intr_get_time() will return the difference, then update [1] to be the
* same as [0]. Future calls will return the time since the last call.
* Finally, when the interrupt completes, [1] is updated to the same as [0].
*
* Implementation:
*
* intr_get_time() works much like a higher level interrupt arriving. It
* "checkpoints" the timing information by incrementing intrstat[pil][0]
* to include elapsed running time, and by setting t_intr_start to %tick.
* It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
* and updates intrstat[pil][1] to be the same as the new value of
* intrstat[pil][0].
*
* In the normal handling of interrupts, after an interrupt handler returns
* and the code in intr_thread() updates intrstat[pil][0], it then sets
* intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
* the timings are reset, i.e. intr_get_time() will return [0] - [1] which
* is 0.
*
* Whenever interrupts arrive on a CPU which is handling a lower pil
* between [0] and [1], which is returned the next time intr_get_time() is
* called. Time spent in the higher-pil interrupt will not be returned in
* the next intr_get_time() call from the original interrupt, because
*/
/*ARGSUSED*/
intr_get_time(void)
{ return 0; }
#else /* lint */
#ifdef DEBUG
!
!
!
!
0:
!
!
1:
0:
!
! ASSERT(t_intr_start != 0)
!
2:
#endif /* DEBUG */
!
!
!
!
1:
!
!
#ifdef DEBUG
.asciz "intr_get_time(): %pil > LOCK_LEVEL"
.asciz "intr_get_time(): not called from an interrupt thread"
.asciz "intr_get_time(): t_intr_start == 0"
#endif /* DEBUG */
#endif /* lint */