intr.c revision 95c0a3c85cc8a224af0bc2bc1f7400be641f43fc
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/segments.h>
#include <sys/traptrace.h>
#include <sys/sysmacros.h>
#include <sys/smp_impldefs.h>
#include <sys/pool_pset.h>
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <sys/x86_archext.h>
/*
* Set cpu's base SPL level to the highest active interrupt level
*/
void
set_base_spl(void)
{
}
/*
* Do all the work necessary to set up the cpu and thread structures
* to dispatch a high-level interrupt.
*
* Returns 0 if we're -not- already on the high-level interrupt stack,
* (and *must* switch to it), non-zero if we are already on that stack.
*
* Called with interrupts masked.
* The 'pil' is already set to the appropriate level for rp->r_trapno.
*/
static int
{
if (pil == CBE_HIGH_PIL) {
cpu->cpu_profile_pc = 0;
} else {
cpu->cpu_profile_upc = 0;
}
}
if (mask != 0) {
int nestpil;
/*
* We have interrupted another high-level interrupt.
* Load starting timestamp, compute interval, update
* cumulative counter.
*/
/*
* Another high-level interrupt is active below this one, so
* there is no need to check for an interrupt thread. That
* will be done by the lowest priority high-level interrupt
* active.
*/
} else {
/*
* See if we are interrupting a low-level interrupt thread.
* If so, account for its time slice only if its time stamp
* is non-zero.
*/
t->t_intr_start = 0;
}
}
/*
* Store starting timestamp in CPU structure for this PIL.
*/
if (pil == 15) {
/*
* To support reentrant level 15 interrupts, we maintain a
* recursion count in the top half of cpu_intr_actv. Only
* when this count hits zero do we clear the PIL 15 bit from
* the lower half of cpu_intr_actv.
*/
(*refcntp)++;
}
return (mask & CPU_INTR_ACTV_HIGH_LEVEL_MASK);
}
/*
* Does most of the work of returning from a high level interrupt.
*
* Returns 0 if there are no more high level interrupts (in which
* case we must switch back to the interrupted thread stack) or
* non-zero if there are more (in which case we should stay on it).
*
* Called with interrupts masked
*/
static int
{
if (pil == 15) {
/*
* To support reentrant level 15 interrupts, we maintain a
* recursion count in the top half of cpu_intr_actv. Only
* when this count hits zero do we clear the PIL 15 bit from
* the lower half of cpu_intr_actv.
*/
if (--(*refcntp) == 0)
} else {
}
/*
* Check for lower-pil nested high-level interrupt beneath
* current one. If so, place a starting timestamp in its
* pil_high_start entry.
*/
if (mask != 0) {
int nestpil;
/*
* find PIL of nested interrupt
*/
/*
* (Another high-level interrupt is active below this one,
* so there is no need to check for an interrupt
* thread. That will be done by the lowest priority
* high-level interrupt active.)
*/
} else {
/*
* Check to see if there is a low-level interrupt active.
* If so, place a starting timestamp in the thread
* structure.
*/
if (t->t_flag & T_INTR_THREAD)
t->t_intr_start = now;
}
}
/*
* Set up the cpu, thread and interrupt thread structures for
* executing an interrupt thread. The new stack pointer of the
* interrupt thread (which *must* be switched to) is returned.
*/
static caddr_t
{
/*
* Get set to run an interrupt thread.
* There should always be an interrupt thread, since we
* allocate one for each level on each CPU.
*
* t_intr_start could be zero due to cpu_intr_swtch_enter.
*/
t = cpu->cpu_thread;
t->t_intr_start = 0;
}
/*
* unlink the interrupt thread off the cpu
*
* Note that the code in kcpc_overflow_intr -relies- on the
* ordering of events here - in particular that t->t_lwp of
* the interrupt thread is set to the pinned thread *before*
* curthread is changed.
*/
/*
* (threads on the interrupt thread free list could have state
* preset to TS_ONPROC, but it helps in debugging if
* they're TS_FREE.)
*/
}
#ifdef DEBUG
int intr_thread_cnt;
#endif
/*
* Called with interrupts disabled
*/
static void
{
kthread_t *t;
/*
* If there is still an interrupted thread underneath this one
* then the interrupt was never blocked and the return is
* fairly simple. Otherwise it isn't.
*/
/*
* The interrupted thread is no longer pinned underneath
* the interrupt thread. This means the interrupt must
* have blocked, and the interrupted thread has been
* unpinned, and has probably been running around the
* system for a while.
*
* Since there is no longer a thread under this one, put
* this interrupt thread back on the CPU's free list and
* resume the idle thread which will dispatch the next
* thread to run.
*/
#ifdef DEBUG
#endif
/*
* Set CPU's base SPL based on active interrupts bitmask
*/
set_base_spl();
(void) splhigh();
sti();
/*
* Return interrupt thread to pool
*/
swtch();
panic("intr_thread_epilog: swtch returned");
/*NOTREACHED*/
}
/*
* Return interrupt thread to the pool
*/
t->t_intr_start = now;
cpu->cpu_thread = t;
}
/*
* intr_get_time() is a resource for interrupt handlers to determine how
* much time has been spent handling the current interrupt. Such a function
* is needed because higher level interrupts can arrive during the
* processing of an interrupt. intr_get_time() only returns time spent in the
* current interrupt handler.
*
* The caller must be calling from an interrupt handler running at a pil
* below or at lock level. Timings are not provided for high-level
* interrupts.
*
* The first time intr_get_time() is called while handling an interrupt,
* it returns the time since the interrupt handler was invoked. Subsequent
* calls will return the time since the prior call to intr_get_time(). Time
* is returned as ticks. Use tsc_scalehrtime() to convert ticks to nsec.
*
* Theory Of Intrstat[][]:
*
* uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
* uint64_ts per pil.
*
* intrstat[pil][0] is a cumulative count of the number of ticks spent
* handling all interrupts at the specified pil on this CPU. It is
* exported via kstats to the user.
*
* intrstat[pil][1] is always a count of ticks less than or equal to the
* value in [0]. The difference between [1] and [0] is the value returned
* by a call to intr_get_time(). At the start of interrupt processing,
* [0] and [1] will be equal (or nearly so). As the interrupt consumes
* time, [0] will increase, but [1] will remain the same. A call to
* intr_get_time() will return the difference, then update [1] to be the
* same as [0]. Future calls will return the time since the last call.
* Finally, when the interrupt completes, [1] is updated to the same as [0].
*
* Implementation:
*
* intr_get_time() works much like a higher level interrupt arriving. It
* "checkpoints" the timing information by incrementing intrstat[pil][0]
* to include elapsed running time, and by setting t_intr_start to rdtsc.
* It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
* and updates intrstat[pil][1] to be the same as the new value of
* intrstat[pil][0].
*
* In the normal handling of interrupts, after an interrupt handler returns
* and the code in intr_thread() updates intrstat[pil][0], it then sets
* intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
* the timings are reset, i.e. intr_get_time() will return [0] - [1] which
* is 0.
*
* Whenever interrupts arrive on a CPU which is handling a lower pil
* interrupt, they update the lower pil's [0] to show time spent in the
* handler that they've interrupted. This results in a growing discrepancy
* between [0] and [1], which is returned the next time intr_get_time() is
* called. Time spent in the higher-pil interrupt will not be returned in
* the next intr_get_time() call from the original interrupt, because
* the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
*/
intr_get_time(void)
{
kthread_t *t;
cli();
t = cpu->cpu_thread;
ASSERT(t->t_intr_start != 0);
t->t_intr_start = time;
sti();
return (ret);
}
static caddr_t
{
top:
return (0);
/*
* XX64 Sigh.
*
* This is a transliteration of the i386 assembler code for
* soft interrupts. One question is "why does this need
* to be atomic?" One possible race is -other- processors
* posting soft interrupts to us in set_pending() i.e. the
* CPU might get preempted just after the address computation,
* but just before the atomic transaction, so another CPU would
* actually set the original CPU's st_pending bit. However,
* it looks like it would be simpler to disable preemption there.
* Are there other races for which preemption control doesn't work?
*
* The i386 assembler version -also- checks to see if the bit
* being cleared was actually set; if it wasn't, it rechecks
* for more. This seems a bit strange, as the only code that
* ever clears the bit is -this- code running with interrupts
* disabled on -this- CPU. This code would probably be cheaper:
*
* atomic_and_32((uint32_t *)&mcpu->mcpu_softinfo.st_pending,
* ~(1 << pil));
*
* and t->t_preempt--/++ around set_pending() even cheaper,
* but at this point, correctness is critical, so we slavishly
* emulate the i386 port.
*/
if (atomic_btr32((uint32_t *)
goto top;
}
/*
* Get set to run interrupt thread.
* There should always be an interrupt thread since we
* allocate one for each level on the CPU.
*/
/* t_intr_start could be zero due to cpu_intr_swtch_enter. */
t = cpu->cpu_thread;
t->t_intr_start = 0;
}
/*
* Note that the code in kcpc_overflow_intr -relies- on the
* ordering of events here - in particular that t->t_lwp of
* the interrupt thread is set to the pinned thread *before*
* curthread is changed.
*/
/*
* Push interrupted thread onto list from new thread.
* Set the new thread as the current one.
* Set interrupted thread's T_SP because if it is the idle thread,
* resume() may use that stack between threads.
*/
/*
* Set bit for this pil in CPU's interrupt active bitmask.
*/
/*
* Initialize thread priority level from intr_pri
*/
}
static void
{
/*
* If there is still an interrupted thread underneath this one
* then the interrupt was never blocked and the return is
* fairly simple. Otherwise it isn't.
*/
/*
* Put thread back on the interrupt thread list.
* This was an interrupt thread, so set CPU's base SPL.
*/
set_base_spl();
(void) splhigh();
sti();
swtch();
/*NOTREACHED*/
panic("dosoftint_epilog: swtch returned");
}
cpu->cpu_thread = t;
if (t->t_flag & T_INTR_THREAD)
t->t_intr_start = now;
}
/*
* Make the interrupted thread 'to' be runnable.
*
* Since t->t_sp has already been saved, t->t_pc is all
* that needs to be set in this function.
*
* Returns the interrupt level of the interrupt thread.
*/
int
kthread_t *t) /* interrupted thread */
{
extern void _sys_rtt();
}
/*
* Create interrupt kstats for this CPU.
*/
void
{
int i;
char name[KSTAT_STRLEN];
if (pool_pset_enabled())
else
/*
* Initialize each PIL's named kstat
*/
for (i = 0; i < PIL_MAX; i++) {
i + 1);
i + 1);
}
}
}
/*
* Delete interrupt kstats for this CPU.
*/
void
{
}
/*
* Convert interrupt statistics from CPU ticks to nanoseconds and
* update kstat.
*/
int
{
int i;
if (rw == KSTAT_WRITE)
return (EACCES);
for (i = 0; i < PIL_MAX; i++) {
}
return (0);
}
/*
* An interrupt thread is ending a time slice, so compute the interval it
* ran for and update the statistic for its PIL.
*/
void
{
/*
* We could be here with a zero timestamp. This could happen if:
* an interrupt thread which no longer has a pinned thread underneath
* it (i.e. it blocked at some point in its past) has finished running
* its handler. intr_thread() updated the interrupt statistic for its
* PIL and zeroed its timestamp. Since there was no pinned thread to
* return to, swtch() gets called and we end up here.
*
* Note that we use atomic ops below (cas64 and atomic_add_64), which
* we don't use in the functions above, because we're not called
*/
if (t->t_intr_start) {
do {
start = t->t_intr_start;
interval);
} else
}
/*
* An interrupt thread is returning from swtch(). Place a starting timestamp
* in its thread structure.
*/
void
{
do {
ts = t->t_intr_start;
}
/*
* Dispatch a hilevel interrupt (one above LOCK_LEVEL)
*/
/*ARGSUSED*/
static void
{
sti();
cli();
}
/*
* Dispatch a soft interrupt
*/
/*ARGSUSED*/
static void
{
sti();
cli();
/*
* Must run softint_epilog() on the interrupt thread stack, since
* there may not be a return from it if the interrupt thread blocked.
*/
}
/*
* Dispatch a normal interrupt
*/
static void
{
sti();
cli();
/*
* Must run intr_thread_epilog() on the interrupt thread stack, since
* there may not be a return from it if the interrupt thread blocked.
*/
}
/*
* Deliver any softints the current interrupt priority allows.
* Called with interrupts disabled.
*/
void
{
int oldipl;
/*
* If returned stack pointer is NULL, priority is too high
* to run any of the pending softints now.
* Break out and they will be run later.
*/
break;
}
}
/*
* Interrupt service routine, called with interrupts disabled.
*/
/*ARGSUSED*/
void
{
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
/*
* Handle any pending TLB flushing
*/
tlb_service();
/*
* If it's a softint go do it now.
*/
ASSERT(!interrupts_enabled());
return;
}
/*
* Raise the interrupt priority.
*/
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
/*
* Bail if it is a spurious interrupt
*/
if (newipl == -1)
return;
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
if (newipl > LOCK_LEVEL) {
/*
* High priority interrupts run on this cpu's interrupt stack.
*/
} else { /* already on the interrupt stack */
dispatch_hilevel(vector, 0);
}
} else {
/*
* Run this interrupt in a separate thread.
*/
}
/*
* Deliver any pending soft interrupts.
*/
}
/*
* Common tasks always done by _sys_rtt, called with interrupts disabled.
* Returns 1 if returning to userland, 0 if returning to system mode.
*/
int
{
extern void mutex_exit_critical_start();
extern long mutex_exit_critical_size;
loop:
/*
* Check if returning to user
*/
/*
* Check if AST pending.
*/
/*
* Let trap() handle the AST
*/
sti();
cli();
goto loop;
}
#if defined(__amd64)
/*
* We are done if segment registers do not need updating.
*/
return (1);
/*
* 1 or more of the selectors is bad.
* Deliver a SIGSEGV.
*/
sti();
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
psig();
cli();
}
#endif /* __amd64 */
return (1);
}
/*
* Here if we are returning to supervisor mode.
* Check for a kernel preemption request.
*/
/*
* Do nothing if already in kpreempt
*/
if (!tp->t_preempt_lk) {
sti();
cli();
tp->t_preempt_lk = 0;
}
}
/*
* If we interrupted the mutex_exit() critical region we must
* reset the PC back to the beginning to prevent missed wakeups
* See the comments in mutex_exit() for details.
*/
}
return (0);
}
void
{
}
/*
* do_splx routine, takes new ipl to set
* returns the old ipl.
* We are careful not to set priority lower than CPU->cpu_base_pri,
* even though it seems we're raising the priority, it could be set
* higher at any time by an interrupt routine, so we must block interrupts
* and look at CPU->cpu_base_pri
*/
int
{
flag = intr_clear();
/*
* If we are going to reenable interrupts see if new priority level
* allows pending softint delivery.
*/
fakesoftint();
ASSERT(!interrupts_enabled());
return (curpri);
}
/*
* Common spl raise routine, takes new ipl to set
* returns the old ipl, will not lower ipl.
*/
int
{
flag = intr_clear();
/*
* Only do something if new priority is larger
*/
/*
* See if new priority level allows pending softint delivery
*/
fakesoftint();
}
return (curpri);
}
int
getpil(void)
{
}
int
interrupts_enabled(void)
{
}
#ifdef DEBUG
void
assert_ints_enabled(void)
{
}
#endif /* DEBUG */