apix_intr.c revision 636dfb4b6ac0749387c883053011a3afb4b4893b
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include <sys/cpu_event.h>
#include <sys/machlock.h>
#include <sys/archsystm.h>
#include <sys/processor.h>
#include <sys/smp_impldefs.h>
#include <sys/ddi_impldefs.h>
#include <sys/x86_archext.h>
#include <sys/cpc_impl.h>
#include <sys/machsystm.h>
#include <sys/sysmacros.h>
#include <sys/rm_platter.h>
#include <sys/privregs.h>
#include <sys/pci_intr_lib.h>
#include <sys/dditypes.h>
static void apix_post_hardint(int);
/*
* Insert an vector into the tail of the interrupt pending list
*/
static __inline__ void
{
return;
}
}
/*
* Remove and return an vector from the head of hardware interrupt
* pending list.
*/
static __inline__ struct autovec *
{
return (NULL);
/*
* If there is blocked higher level interrupts, return
* NULL to quit handling of current IPL level.
*/
return (NULL);
}
return (avp);
}
/*
* add_pending_hardint:
*
* Add hardware interrupts to the interrupt pending list.
*/
static void
{
int ipl;
/*
* The MSI interrupt not supporting per-vector masking could
* be triggered on a false vector as a result of rebinding
* operation cannot programme MSI address & data atomically.
* Add ISR of this interrupt to the pending list for such
* suspicious interrupt.
*/
return;
continue; /* skip freed entry */
ipl = p->av_prilevel;
prevp = p;
/* set pending at specified priority level */
if (p->av_flags & AV_PENTRY_PEND)
continue; /* already in the pending list */
p->av_flags |= AV_PENTRY_PEND;
/* insert into pending list by it original IPL */
}
/* last one of the linked list */
}
/*
* Walk pending hardware interrupts at given priority level, invoking
* each interrupt handler as we go.
*/
extern uint64_t intr_get_time(void);
static void
{
uint_t r;
continue;
/* Don't enable interrupts during x-calls */
sti();
cli();
if (vector) {
}
/* mark it as idle */
}
}
static caddr_t
{
/*
* Get set to run interrupt thread.
* There should always be an interrupt thread since we
* allocate one for each level on the CPU.
*/
/* t_intr_start could be zero due to cpu_intr_swtch_enter. */
t = cpu->cpu_thread;
t->t_intr_start = 0;
}
/*
* Note that the code in kcpc_overflow_intr -relies- on the
* ordering of events here - in particular that t->t_lwp of
* the interrupt thread is set to the pinned thread *before*
* curthread is changed.
*/
/*
* Push interrupted thread onto list from new thread.
* Set the new thread as the current one.
* Set interrupted thread's T_SP because if it is the idle thread,
* resume() may use that stack between threads.
*/
/*
* Set bit for this pil in CPU's interrupt active bitmask.
*/
/*
* Initialize thread priority level from intr_pri
*/
}
static void
{
/*
* If there is still an interrupted thread underneath this one
* then the interrupt was never blocked and the return is
* fairly simple. Otherwise it isn't.
*/
/*
* Put thread back on the interrupt thread list.
* This was an interrupt thread, so set CPU's base SPL.
*/
set_base_spl();
/* mcpu->mcpu_pri = cpu->cpu_base_spl; */
(void) splhigh();
sti();
swtch();
/*NOTREACHED*/
panic("dosoftint_epilog: swtch returned");
}
cpu->cpu_thread = t;
if (t->t_flag & T_INTR_THREAD)
t->t_intr_start = now;
}
/*
* Dispatch a soft interrupt
*/
static void
{
sti();
cli();
/*
* Must run softint_epilog() on the interrupt thread stack, since
* there may not be a return from it if the interrupt thread blocked.
*/
}
/*
* Deliver any softints the current interrupt priority allows.
* Called with interrupts disabled.
*/
int
{
int oldipl;
int newipl;
return (-1);
}
return (0);
}
static int
{
cpu->cpu_profile_pc = 0;
cpu->cpu_cpcprofile_pc = 0;
} else {
cpu->cpu_profile_upc = 0;
cpu->cpu_cpcprofile_upc = 0;
}
}
if (mask != 0) {
int nestpil;
/*
* We have interrupted another high-level interrupt.
* Load starting timestamp, compute interval, update
* cumulative counter.
*/
} else {
/*
* See if we are interrupting a low-level interrupt thread.
* If so, account for its time slice only if its time stamp
* is non-zero.
*/
t->t_intr_start = 0;
}
}
/* store starting timestamp in CPu structure for this IPL */
if (pil == 15) {
/*
* To support reentrant level 15 interrupts, we maintain a
* recursion count in the top half of cpu_intr_actv. Only
* when this count hits zero do we clear the PIL 15 bit from
* the lower half of cpu_intr_actv.
*/
(*refcntp)++;
}
/* clear pending ipl level bit */
return (mask);
}
static int
{
if (pil == 15) {
/*
* To support reentrant level 15 interrupts, we maintain a
* recursion count in the top half of cpu_intr_actv. Only
* when this count hits zero do we clear the PIL 15 bit from
* the lower half of cpu_intr_actv.
*/
if (--(*refcntp) == 0)
} else {
}
/*
* Check for lower-pil nested high-level interrupt beneath
* current one. If so, place a starting timestamp in its
* pil_high_start entry.
*/
if (mask != 0) {
int nestpil;
/*
* find PIL of nested interrupt
*/
/*
* (Another high-level interrupt is active below this one,
* so there is no need to check for an interrupt
* thread. That will be done by the lowest priority
* high-level interrupt active.)
*/
} else {
/*
* Check to see if there is a low-level interrupt active.
* If so, place a starting timestamp in the thread
* structure.
*/
if (t->t_flag & T_INTR_THREAD)
t->t_intr_start = now;
}
if (pil < CBE_HIGH_PIL)
return (mask);
}
/*
* Dispatch a hilevel interrupt (one above LOCK_LEVEL)
*/
static void
{
}
static __inline__ int
{
return (-1);
/*
* High priority interrupts run on this cpu's interrupt stack.
*/
newipl, 0);
} else { /* already on the interrupt stack */
}
}
return (0);
}
/*
* Get an interrupt thread and swith to it. It's called from do_interrupt().
* The IF flag is cleared and thus all maskable interrupts are blocked at
* the time of calling.
*/
static caddr_t
{
/*
* Get set to run interrupt thread.
* There should always be an interrupt thread since we
* allocate one for each level on the CPU.
*/
/* t_intr_start could be zero due to cpu_intr_swtch_enter. */
t = cpu->cpu_thread;
t->t_intr_start = 0;
}
/*
* Push interrupted thread onto list from new thread.
* Set the new thread as the current one.
* Set interrupted thread's T_SP because if it is the idle thread,
* resume() may use that stack between threads.
*/
/*
* Note that the code in kcpc_overflow_intr -relies- on the
* ordering of events here - in particular that t->t_lwp of
* the interrupt thread is set to the pinned thread *before*
* curthread is changed.
*/
/*
* (threads on the interrupt thread free list could have state
* preset to TS_ONPROC, but it helps in debugging if
* they're TS_FREE.)
*/
/*
* Initialize thread priority level from intr_pri
*/
}
static void
{
/*
* If there is still an interrupted thread underneath this one
* then the interrupt was never blocked and the return is
* fairly simple. Otherwise it isn't.
*/
/*
* The interrupted thread is no longer pinned underneath
* the interrupt thread. This means the interrupt must
* have blocked, and the interrupted thread has been
* unpinned, and has probably been running around the
* system for a while.
*
* Since there is no longer a thread under this one, put
* this interrupt thread back on the CPU's free list and
* resume the idle thread which will dispatch the next
* thread to run.
*/
/*
* Put thread back on the interrupt thread list.
* This was an interrupt thread, so set CPU's base SPL.
*/
set_base_spl();
/*
* Return interrupt thread to pool
*/
(void) splhigh();
sti();
swtch();
/*NOTREACHED*/
panic("dosoftint_epilog: swtch returned");
}
/*
* Return interrupt thread to the pool
*/
cpu->cpu_thread = t;
if (t->t_flag & T_INTR_THREAD)
t->t_intr_start = now;
}
static void
{
/*
* Must run intr_thread_epilog() on the interrupt thread stack, since
* there may not be a return from it if the interrupt thread blocked.
*/
}
static __inline__ int
{
return (-1);
/*
* Run this interrupt in a separate thread.
*/
oldipl, 0);
}
return (0);
}
/*
* Unmask level triggered interrupts
*/
static void
apix_post_hardint(int vector)
{
}
static void
{
return;
sti();
cli();
}
static void
{
}
static void
{
/*
* Must run intr_thread_epilog() on the interrupt thread stack, since
* there may not be a return from it if the interrupt thread blocked.
*/
}
/*
* Interrupt service routine, called with interrupts disabled.
*/
void
{
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
/*
* If it's a softint go do it now.
*/
/*
* It might be the case that when an interrupt is triggered,
* the spl is raised to high by splhigh(). Later when do_splx()
* is called to restore the spl, both hardware and software
* interrupt pending flags are check and an SOFTINT is faked
* accordingly.
*/
(void) apix_do_softint(rp);
ASSERT(!interrupts_enabled());
#ifdef TRAPTRACE
#endif
return;
}
/*
* Send EOI to local APIC
*/
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
/*
* Bail if it is a spurious interrupt
*/
if (newipl == -1)
return;
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
/*
* Direct dispatch for IPI, MSI, MSI-X
*/
if (newipl > LOCK_LEVEL) {
== 0) {
vector, 0);
} else {
}
} else {
}
} else {
/* Add to per-pil pending queue */
return;
}
return;
do {
/*
* Deliver any pending soft interrupts.
*/
(void) apix_do_softint(rp);
}