interrupt.s revision fd71cd2f6f10f4a2e3af3efe8b145d645bb80d85
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
/* All Rights Reserved */
/* Copyright (c) 1987, 1988 Microsoft Corporation */
/* All Rights Reserved */
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/x86_archext.h>
#if defined(__lint)
#else /* __lint */
#include <sys/segments.h>
#include <sys/traptrace.h>
#include "assym.h"
.string "intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x"
#endif /* lint */
#if defined(__i386)
#if defined(__lint)
void
patch_tsc(void)
{}
#else /* __lint */
/*
* To cope with processors that do not implement the rdtsc instruction,
* we patch the kernel to use rdtsc if that feature is detected on the CPU.
* On an unpatched kernel, all locations requiring rdtsc are nop's.
*
* This function patches the nop's to rdtsc.
*/
#endif /* __lint */
#endif /* __i386 */
#if defined(__lint)
void
_interrupt(void)
{}
#else /* __lint */
#if defined(__amd64)
/*
* Common register usage:
*
* %rbx cpu pointer
* %r12 trap trace pointer -and- stash of
* vec across intr_thread dispatch.
* %r13d ipl of isr
* %r14d old ipl (ipl level we entered on)
* %r15 interrupted thread stack pointer
*/
/*
* At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry
*/
/* Uses labels 8 and 9 */
DISABLE_INTR_FLAGS /* (and set kernel flag values) */
#ifdef TRAPTRACE
#endif
/*
* Check to see if the trap number is T_SOFTINT; if it is,
* jump straight to dosoftint now.
*/
/*
* Raise the interrupt priority level, returns newpil.
* (The vector address is in %rsi so setlvl can update it.)
*/
/* &vector */
#ifdef TRAPTRACE
#endif
/*
* check for spurious interrupt
*/
#ifdef TRAPTRACE
#endif
#ifdef TRAPTRACE
#endif
/*
* At this point we can take one of two paths.
* If the new level is at or below lock level, we will
* run this interrupt in a separate thread.
*/
jnz 1f
/*
* Save the thread stack and get on the cpu's interrupt stack
*/
1:
/*
* Walk the list of handlers for this vector, calling
* them as we go until no more interrupts are claimed.
*/
jnz 2f
2: /*
* Check for, and execute, softints before we iret.
*
* (dosoftint expects oldipl in %r14d (which is where it is)
* the cpu pointer in %rbx (which is where it is) and the
* softinfo in %edx (which is where we'll put it right now))
*/
/*NOTREACHED*/
/*
* Handle an interrupt in a new thread
*
* As we branch here, interrupts are still masked,
* %rbx still contains the cpu pointer,
* %r14d contains the old ipl that we came in on, and
* %eax contains the new ipl that we got from the setlvl routine
*/
jz 1f
/*
* ftracing support. do we need this on x86?
*/
1:
/*
* If we return from here (we might not if the interrupted thread
* has exited or blocked, in which case we'll have quietly swtch()ed
* away) then we need to switch back to our old %rsp
*/
/*
* Check for, and execute, softints before we iret.
*
* (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and
* the mcpu_softinfo.st_pending field in %edx.
*/
/*FALLTHROUGH*/
/*
* Process soft interrupts.
* Interrupts are masked, and we have a minimal frame on the stack.
* %edx should contain the mcpu_softinfo.st_pending field
*/
/* cpu->cpu_m.mcpu_softinfo.st_pending */
/*
* dosoftint_prolog() usually returns a stack pointer for the
* interrupt thread that we must switch to. However, if the
* returned stack pointer is NULL, then the software interrupt was
* too low in priority to run now; we'll catch it another time.
*/
/*
* Enabling interrupts (above) could raise the current ipl
* and base spl. But, we continue processing the current soft
* interrupt and we will check the base spl next time around
* so that blocked interrupt threads get a chance to run.
*/
/*
* One day, this should just invoke the C routines that know how to
* do all the interrupt bookkeeping. In the meantime, try
* and make the assembler a little more comprehensible.
*/
/*
* The following macros assume the time value is in %edx:%eax
* e.g. from a rdtsc instruction.
*/
/*
* basereg - pointer to cpu struct
* pilreg - pil or converted pil (pil - (LOCK_LEVEL + 1))
*
* Returns (base + pil * 8) in pilreg
*/
/*
* Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg
*/
/*
* Returns (base + pil * 16) in pilreg
*/
/*
* Returns (cpu + cpu_mstate * 8) in tgt
*/
/*
* cpu_stats.sys.intr[PIL]++
*/
/*
* Unlink thread from CPU's list
*/
/*
* Link a thread into CPU's list
*/
#if defined(DEBUG)
/*
* Do not call panic, if panic is already in progress.
*/
cmpl $0, panic_quiesce; \
/*
* ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
*/
jnc 4f; \
4:
/*
* ASSERT(CPU->cpu_intr_actv & (1 << PIL))
*/
jc 5f; \
5:
/*
* ASSERT(CPU->cpu_pil_high_start != 0)
*/
#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) \
6:
/*
* ASSERT(t->t_intr_start != 0)
*/
#define ASSERT_T_INTR_START_NZ(basereg) \
7:
.string "_interrupt(): cpu_intr_actv bit already set for PIL"
.string "_interrupt(): cpu_intr_actv bit not set for PIL"
.string "_interrupt(): timestamp zero upon handler return"
.string "intr_thread(): cpu_intr_actv bit not set for PIL"
.string "intr_thread(): t_intr_start zero upon handler return"
.string "dosoftint(): cpu_intr_actv bit already set for PIL"
.string "dosoftint(): cpu_intr_actv bit not set for PIL"
#else
#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg)
#define ASSERT_T_INTR_START_NZ(basereg)
#endif
/*
* At the end of TRACE_PTR %esi points to the current TRAPTRACE entry
*/
/* Uses labels 8 and 9 */
/
/ jump straight to dosoftint now.
/
cmpl $T_SOFTINT, (%ecx)
je dosoftint
/ raise interrupt priority level
/ oldipl is in %edi, vectorp is in %ecx
/ newipl is returned in %eax
pushl %ecx
pushl %edi
call *setlvl
popl %edi /* save oldpil in %edi */
popl %ecx
#ifdef TRAPTRACE
movb %al, TTR_IPL(%esi)
#endif
/ check for spurious interrupt
cmp $-1, %eax
je _sys_rtt
#ifdef TRAPTRACE
movl CPU_PRI(%ebx), %edx
movb %dl, TTR_PRI(%esi)
movl CPU_BASE_SPL(%ebx), %edx
movb %dl, TTR_SPL(%esi)
#endif
movl %eax, CPU_PRI(%ebx) /* update ipl */
movl REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */
#ifdef TRAPTRACE
movb %cl, TTR_VECTOR(%esi)
#endif
/ At this point we can take one of two paths. If the new priority
/ level is less than or equal to LOCK LEVEL then we jump to code that
/ will run this interrupt as a separate thread. Otherwise the
/ interrupt is NOT run as a separate thread.
/ %edi - old priority level
/ %ebp - pointer to REGS
/ %ecx - translated vector
/ %eax - ipl of isr
/ %ebx - cpu pointer
cmpl $LOCK_LEVEL, %eax /* compare to highest thread level */
jbe intr_thread /* process as a separate thread */
cmpl $CBE_HIGH_PIL, %eax /* Is this a CY_HIGH_LEVEL interrupt? */
jne 2f
movl REGOFF_PC(%ebp), %esi
movl %edi, CPU_PROFILE_PIL(%ebx) /* record interrupted PIL */
testw $CPL_MASK, REGOFF_CS(%ebp) /* trap from supervisor mode? */
jz 1f
movl %esi, CPU_PROFILE_UPC(%ebx) /* record user PC */
movl $0, CPU_PROFILE_PC(%ebx) /* zero kernel PC */
jmp 2f
1:
movl %esi, CPU_PROFILE_PC(%ebx) /* record kernel PC */
movl $0, CPU_PROFILE_UPC(%ebx) /* zero user PC */
2:
pushl %ecx /* vec */
pushl %eax /* newpil */
/
/ See if we are interrupting another high-level interrupt.
/
movl CPU_INTR_ACTV(%ebx), %eax
andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
jz 0f
/
/ We have interrupted another high-level interrupt.
/ Load starting timestamp, compute interval, update cumulative counter.
/
bsrl %eax, %ecx /* find PIL of interrupted handler */
movl %ecx, %esi /* save PIL for later */
HIGHPILBASE(%ebx, %ecx)
_tsc_patch1:
nop; nop /* patched to rdtsc if available */
TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START)
PILBASE_INTRSTAT(%ebx, %esi)
TSC_ADD_TO(%esi, CPU_INTRSTAT)
INTRACCTBASE(%ebx, %ecx)
TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */
/
/ Another high-level interrupt is active below this one, so
/ there is no need to check for an interrupt thread. That will be
/ done by the lowest priority high-level interrupt active.
/
jmp 1f
0:
/
/ See if we are interrupting a low-level interrupt thread.
/
movl CPU_THREAD(%ebx), %esi
testw $T_INTR_THREAD, T_FLAGS(%esi)
jz 1f
/
/ We have interrupted an interrupt thread. Account for its time slice
/ only if its time stamp is non-zero.
/
cmpl $0, T_INTR_START+4(%esi)
jne 0f
cmpl $0, T_INTR_START(%esi)
je 1f
0:
movzbl T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */
PILBASE_INTRSTAT(%ebx, %ecx)
_tsc_patch2:
nop; nop /* patched to rdtsc if available */
TSC_SUB_FROM(%esi, T_INTR_START)
TSC_CLR(%esi, T_INTR_START)
TSC_ADD_TO(%ecx, CPU_INTRSTAT)
INTRACCTBASE(%ebx, %ecx)
TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */
1:
/ Store starting timestamp in CPU structure for this PIL.
popl %ecx /* restore new PIL */
pushl %ecx
HIGHPILBASE(%ebx, %ecx)
_tsc_patch3:
nop; nop /* patched to rdtsc if available */
TSC_STORE(%ecx, CPU_PIL_HIGH_START)
popl %eax /* restore new pil */
popl %ecx /* vec */
/
/
jne 0f
0:
/
/
/* sti to save on AGI later */
sti /* enable interrupts */
/
/
/* bl is DDI_INTR_CLAIMED status of chain */
pushl $0
.intr_ret:
/
/
ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set)
cmpl $15, %esi
jne 0f
/ Only clear bit if reference count is now zero.
decw CPU_INTR_ACTV_REF(%ebx)
jnz 1f
0:
btrl %esi, CPU_INTR_ACTV(%ebx)
1:
/
/ Take timestamp, compute interval, update cumulative counter.
/ esi = PIL
_tsc_patch4:
nop; nop /* patched to rdtsc if available */
movl %esi, %ecx /* save for later */
HIGHPILBASE(%ebx, %esi)
ASSERT_CPU_PIL_HIGH_START_NZ(%esi)
TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START)
PILBASE_INTRSTAT(%ebx, %ecx)
TSC_ADD_TO(%ecx, CPU_INTRSTAT)
INTRACCTBASE(%ebx, %esi)
TSC_ADD_TO(%esi, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */
/
/ Check for lower-PIL nested high-level interrupt beneath current one
/ If so, place a starting timestamp in its pil_high_start entry.
/
movl CPU_INTR_ACTV(%ebx), %eax
movl %eax, %esi
andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
jz 0f
bsrl %eax, %ecx /* find PIL of nested interrupt */
HIGHPILBASE(%ebx, %ecx)
_tsc_patch5:
nop; nop /* patched to rdtsc if available */
TSC_STORE(%ecx, CPU_PIL_HIGH_START)
/
/ Another high-level interrupt is active below this one, so
/ there is no need to check for an interrupt thread. That will be
/ done by the lowest priority high-level interrupt active.
/
jmp 1f
0:
/ Check to see if there is a low-level interrupt active. If so,
/ place a starting timestamp in the thread structure.
movl CPU_THREAD(%ebx), %esi
testw $T_INTR_THREAD, T_FLAGS(%esi)
jz 1f
_tsc_patch6:
nop; nop /* patched to rdtsc if available */
TSC_STORE(%esi, T_INTR_START)
1:
movl %edi, CPU_PRI(%ebx)
/* interrupt vector already on stack */
pushl %edi /* old ipl */
call *setlvlx
addl $8, %esp /* eax contains the current ipl */
movl CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */
shrl $LOCK_LEVEL + 1, %esi /* HI PRI intrs. */
jnz .intr_ret2
popl %esp /* restore the thread stack pointer */
.intr_ret2:
movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
orl %edx, %edx
jz _sys_rtt
jmp dosoftint /* check for softints before we return. */
SET_SIZE(cmnint)
SET_SIZE(_interrupt)
#endif /* __i386 */
/*
* Declare a uintptr_t which has the size of _interrupt to enable stack
* traceback code to know when a regs structure is on the stack.
*/
.globl _interrupt_size
.align CLONGSIZE
_interrupt_size:
.NWORD . - _interrupt
.type _interrupt_size, @object
#endif /* __lint */
#if defined(__i386)
/*
* Handle an interrupt in a new thread.
* Entry: traps disabled.
* %edi - old priority level
* %ebp - pointer to REGS
* %ecx - translated vector
* %eax - ipl of isr.
* %ebx - pointer to CPU struct
* Uses:
*/
#if !defined(__lint)
ENTRY_NP(intr_thread)
/
/
/
/
/
/
jz 0f
/
/
jne 1f
je 0f
1:
0:
/
/
movl $ONPROC_THREAD, T_STATE(%esi)
/
/ chain the interrupted thread onto list from the interrupt thread.
/ Set the new interrupt thread as the current one.
/
/
/
/* is loaded on some other cpu. */
/
/
sti /* enable interrupts */
jz 1f
1:
/* bl is DDI_INTR_CLAIMED status of * chain */
pushl $0
TSC_ADD_TO(%ecx, 0)
cli /* protect interrupt thread pool and intr_actv */
/
/
/
/
ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set)
btrl %eax, CPU_INTR_ACTV(%ebx)
/ if there is still an interrupted thread underneath this one
/ then the interrupt was never blocked and the return is fairly
/ simple. Otherwise jump to intr_thread_exit
cmpl $0, T_INTR(%esi)
je intr_thread_exit
/
/ link the thread back onto the interrupt thread pool
LINK_INTR_THREAD(%ebx, %esi, %edx)
movl CPU_BASE_SPL(%ebx), %eax /* used below. */
/* intr vector already on stack */
/
_tsc_patch10:
nop; nop /* patched to rdtsc if available */
TSC_STORE(%ecx, T_INTR_START)
movl T_SP(%ecx), %esp /* restore stack pointer */
movl %esp, %ebp
movl %ecx, CPU_THREAD(%ebx)
movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
orl %edx, %edx
jz _sys_rtt
jmp dosoftint /* check for softints before we return. */
/
/ An interrupt returned on what was once (and still might be)
/ an interrupt thread stack, but the interrupted process is no longer
/ there. This means the interrupt must have blocked.
/
/ There is no longer a thread under this one, so put this thread back
/
/
#ifdef DEBUG
#endif
/
call set_base_spl
movl CPU_BASE_SPL(%ebx), %edi
movl %edi, CPU_PRI(%ebx)
/* interrupt vector already on stack */
pushl %edi
call *setlvlx
/* we are ready to switch */
/
/
movl $FREE_THREAD, T_STATE(%esi)
/
/ Put thread on either the interrupt pool or the free pool and
/ call swtch() to resume another thread.
/
LINK_INTR_THREAD(%ebx, %esi, %edx)
call swtch
/ swtch() shouldn't return
#endif /* __lint */
#endif /* __i386 */
/*
* Set Cpu's base SPL level, base on which interrupt levels are active
* Called at spl7 or above.
*/
#if defined(__lint)
void
set_base_spl(void)
{}
#else /* __lint */
/* the 1st bit faster */
#endif /* __lint */
#if defined(__i386)
/*
* int
* intr_passivate(from, to)
* thread_id_t from; interrupt thread
* thread_id_t to; interrupted thread
*
* intr_passivate(t, itp) makes the interrupted thread "t" runnable.
*
* Since t->t_sp has already been saved, t->t_pc is all that needs
* set in this function.
*
* Returns interrupt level of the thread.
*/
#if defined(__lint)
/* ARGSUSED */
int
{ return (0); }
#else /* __lint */
/* of the interrupt thread stack */
/* thing pushed onto the stack */
#endif /* __lint */
#endif /* __i386 */
#if defined(__lint)
void
fakesoftint(void)
{}
#else /* __lint */
/
/
#if defined(__amd64)
/*
* In 64-bit mode, iretq -always- pops all five regs
* Imitate the 16-byte auto-align of the stack, and the
* zero-ed out %ss value.
*/
pushf /* rflags */
pushq $0 /* err */
push $0
#endif /* __i386 */
.NWORD . - fakesoftint
/*
* dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx)
* Process software interrupts
* Interrupts are disabled here.
*/
#if defined(__i386)
lock /* MP protect */
/
/
jz 0f
/
/
jne 1f
je 0f
1:
0:
/
/ Could eliminate the next two instructions with a little work.
/
movl $ONPROC_THREAD, T_STATE(%esi)
/
/ Push interrupted thread onto list from new thread.
/ Set the new thread as the current one.
/
/* see intr_passivate() */
/
/
ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set)
btsl %eax, CPU_INTR_ACTV(%ebx)
/
/ Initialize thread priority level from intr_pri
/
movb %al, T_PIL(%esi) /* store pil */
movzwl intr_pri, %ecx
addl %eax, %ecx /* convert level to dispatch priority */
movw %cx, T_PRI(%esi)
/
/ Store starting timestamp in thread structure.
/ esi = thread, ebx = cpu pointer, eax = PIL
/
movl %eax, %ecx /* save PIL from rdtsc clobber */
_tsc_patch12:
nop; nop /* patched to rdtsc if available */
TSC_STORE(%esi, T_INTR_START)
sti /* enable interrupts */
/
/ Enabling interrupts (above) could raise the current
/ IPL and base SPL. But, we continue processing the current soft
/ interrupt and we will check the base SPL next time in the loop
/ so that blocked interrupt thread would get a chance to run.
/
/
/ dispatch soft interrupts
/
pushl %ecx
call av_dispatch_softvect
addl $4, %esp
cli /* protect interrupt thread pool */
/* and softinfo & sysinfo */
movl CPU_THREAD(%ebx), %esi /* restore thread pointer */
movzbl T_PIL(%esi), %ecx
/ cpu_stats.sys.intr[PIL]++
INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx)
/
/
/
/
/
movl $FREE_THREAD, T_STATE(%esi)
/
/ Switch back to the interrupted thread
movl T_INTR(%esi), %ecx
movl %ecx, CPU_THREAD(%ebx)
movl T_SP(%ecx), %esp /* restore stack pointer */
movl %esp, %ebp
/ If we are returning to an interrupt thread, store a starting
/ timestamp in the thread structure.
testw $T_INTR_THREAD, T_FLAGS(%ecx)
jz 0f
_tsc_patch14:
nop; nop /* patched to rdtsc if available */
TSC_STORE(%ecx, T_INTR_START)
0:
movl CPU_BASE_SPL(%ebx), %eax
cmpl %eax, %edi /* if (oldipl >= basespl) */
jae softintr_restore_ipl /* then use oldipl */
movl %eax, %edi /* else use basespl */
softintr_restore_ipl:
movl %edi, CPU_PRI(%ebx) /* set IPL to old level */
pushl %edi
call *setspl
popl %eax
dosoftint_again:
movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
orl %edx, %edx
jz _sys_rtt
jmp dosoftint /* process more software interrupts */
softintr_thread_exit:
/
/ Put thread back on the interrupt thread list.
/ As a reminder, the regs at this point are
/ %esi interrupt thread
/
/
/
/
movl $FREE_THREAD, T_STATE(%esi)
/
/ Put thread on either the interrupt pool or the free pool and
/ call swtch() to resume another thread.
/
LOADCPU(%ebx)
LINK_INTR_THREAD(%ebx, %esi, %edx)
call splhigh /* block all intrs below lock lvl */
call swtch
/ swtch() shouldn't return
#endif /* __i386 */
#endif /* __lint */
#if defined(lint)
/*
* intr_get_time() is a resource for interrupt handlers to determine how
* much time has been spent handling the current interrupt. Such a function
* is needed because higher level interrupts can arrive during the
* processing of an interrupt, thus making direct comparisons of %tick by
* the handler inaccurate. intr_get_time() only returns time spent in the
* current interrupt handler.
*
* The caller must be calling from an interrupt handler running at a pil
* below or at lock level. Timings are not provided for high-level
* interrupts.
*
* The first time intr_get_time() is called while handling an interrupt,
* it returns the time since the interrupt handler was invoked. Subsequent
* calls will return the time since the prior call to intr_get_time(). Time
* is returned as ticks. Use tsc_scalehrtime() to convert ticks to nsec.
*
* Theory Of Intrstat[][]:
*
* uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
* uint64_ts per pil.
*
* intrstat[pil][0] is a cumulative count of the number of ticks spent
* handling all interrupts at the specified pil on this CPU. It is
* exported via kstats to the user.
*
* intrstat[pil][1] is always a count of ticks less than or equal to the
* value in [0]. The difference between [1] and [0] is the value returned
* by a call to intr_get_time(). At the start of interrupt processing,
* [0] and [1] will be equal (or nearly so). As the interrupt consumes
* time, [0] will increase, but [1] will remain the same. A call to
* intr_get_time() will return the difference, then update [1] to be the
* same as [0]. Future calls will return the time since the last call.
* Finally, when the interrupt completes, [1] is updated to the same as [0].
*
* Implementation:
*
* intr_get_time() works much like a higher level interrupt arriving. It
* "checkpoints" the timing information by incrementing intrstat[pil][0]
* to include elapsed running time, and by setting t_intr_start to rdtsc.
* It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
* and updates intrstat[pil][1] to be the same as the new value of
* intrstat[pil][0].
*
* In the normal handling of interrupts, after an interrupt handler returns
* and the code in intr_thread() updates intrstat[pil][0], it then sets
* intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
* the timings are reset, i.e. intr_get_time() will return [0] - [1] which
* is 0.
*
* Whenever interrupts arrive on a CPU which is handling a lower pil
* interrupt, they update the lower pil's [0] to show time spent in the
* handler that they've interrupted. This results in a growing discrepancy
* between [0] and [1], which is returned the next time intr_get_time() is
* called. Time spent in the higher-pil interrupt will not be returned in
* the next intr_get_time() call from the original interrupt, because
* the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
*/
/*ARGSUSED*/
intr_get_time(void)
{ return 0; }
#else /* lint */
#if defined(__amd64)
cli /* make this easy -- block intrs */
#ifdef DEBUG
.string "intr_get_time(): %pil > LOCK_LEVEL"
.string "intr_get_time(): not called from an interrupt thread"
.string "intr_get_time(): t_intr_start == 0"
/*
* ASSERT(%pil <= LOCK_LEVEL)
*/
#define ASSERT_PIL_BELOW_LOCK_LEVEL(cpureg) \
jz 0f; \
__PANIC(_intr_get_time_high_pil, 0f); \
0:
/*
* ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
*/
#define ASSERT_NO_PIL_0_INTRS(thrreg) \
jz 1f; \
jne 0f; \
1: \
__PANIC(_intr_get_time_not_intr, 0f); \
0:
/*
* ASSERT(t_intr_start != 0)
*/
#define ASSERT_INTR_START_NOT_0(thrreg) \
jnz 0f; \
jnz 0f; \
__PANIC(_intr_get_time_no_start_time, 0f); \
0:
#endif /* DEBUG */
cli /* make this easy -- block intrs */
#ifdef DEBUG
#endif /* DEBUG */
/* %edx/%eax contain difference between old and new intrstat[1] */
#endif /* __i386 */
#endif /* lint */