softint.c revision f1fa5dcf799749330b7bd6cdee34de6a17ea3fd4
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/kdi_impl.h>
#include <sys/archsystm.h>
/*
* Handle software interrupts through 'softcall' mechanism
*
* At present softcall mechanism uses a global list headed by softhead.
* Entries are added to tail and removed from head so as to preserve FIFO
* nature of entries in the softcall list. softcall() takes care of adding
* entries to the softtail.
*
* softint must take care of executing the entries in the FIFO
* order. It could be called simultaneously from multiple cpus, however only
* one instance of softint should process the softcall list with the exception
* when CPU is stuck due to high interrupt load and can't execute callbacks.
* State diagram is as follows :-
*
* - Upper half which is same as old state machine
* (IDLE->PEND->DRAIN->IDLE)
*
* - Lower half which steals the entries from softcall queue and execute
* in the context of softint interrupt handler. The interrupt handler
* is fired on a different CPU by sending a cross-call.
*
* Starting state is IDLE.
*
* softint()
*
*
* (c)
* ____________________________________________________
* | ^ ^
* v (a) | (b) |
* IDLE--------------------->PEND--------------------->DRAIN
* ^ | |
* | | |
* | | |
* | | |
* | | |
* | d d
* | | |
* | v v
* | PEND DRAIN
* | (e) & &
* |<-----------------------STEAL STEAL
* ^ |
* | |
* | (e) v
* |_________________________<__________________________|
*
*
*
* Edge (a)->(b)->(c) are same as old state machine and these
* are mutually exclusive state.
*
* a - When an entry is being enqueued to softcall queue then the state
* moves from IDLE to PEND.
*
* b - When interrupt handler has started processing softcall queue.
*
* c - When interrupt handler finished processing softcall queue, the
* state of machines goes back to IDLE.
*
* d - softcall() generates another softlevel1 iff interrupt handler
* hasn't run recently.
*
* e - Either PEND|STEAL or DRAIN|STEAL is set. We let softlevel1
* handler exit because we have processed all the entries.
*
* When CPU is being pinned by higher level interrupts for more than
* softcall_delay clock ticks, SOFT_STEAL is OR'ed so that softlevel1
* handler on the other CPU can drain the queue.
*
* These states are needed for softcall mechanism since Solaris has only
* one interface (ie. siron ) as of now for :
*
* - raising a soft interrupt architecture independently (ie not through
* setsoftint(..) )
* - to process the softcall queue.
*/
#define NSOFTCALLS 200
/*
* Defined states for softcall processing.
*/
typedef struct softcall {
void (*sc_func)(void *); /* function to call */
void *sc_arg; /* arg to pass to func */
} softcall_t;
/*
* softcall list and state variables.
*/
static softcall_t *softcalls;
static uint_t softcall_state;
static clock_t softcall_tick;
/*
* This ensures that softcall entries don't get stuck for long. It's expressed
* in 10 milliseconds as 1 unit. When hires_tick is set or other clock frequency
* is used, softcall_init() ensures that it's still expressed as 1 = 10 milli
* seconds.
*/
static int softcall_delay = 1;
/*
* The last CPU which will drain softcall queue.
*/
static int softcall_latest_cpuid = -1;
/*
* CPUSET to hold the CPU which is processing softcall queue
* currently. There can be more than one CPU having bit set
* but it will happen only when they are stuck.
*/
/*
* protects softcall lists and control variable softcall_state.
*/
static kmutex_t softcall_lock;
static void (*kdi_softcall_func)(void);
extern void siron_poke_cpu(cpuset_t);
extern void siron(void);
extern void kdi_siron(void);
void
softcall_init(void)
{
softcall_t *sc;
}
if (softcall_delay < 0)
softcall_delay = 1;
/*
* Since softcall_delay is expressed as 1 = 10 milliseconds.
*/
}
/*
* Gets called when softcall queue is not moving forward. We choose
* a CPU and poke except the ones which are already poked.
*/
static int
{
int cpuid = -1;
int s;
/*
* The hint is to start from current CPU.
*/
do {
/*
* Don't select this CPU if :
* - in cpuset already
* - CPU is not accepting interrupts
* - CPU is being offlined
*/
(cp == cpu_inmotion))
continue;
/* if CPU is not busy */
if (cp->cpu_intrload == 0) {
break;
}
/*
* We want to poke CPUs having similar
* load because we don't know which CPU is
* can acknowledge level1 interrupt. The
* list of such CPUs should not be large.
*/
if (cpuid != -1) {
/*
* Put the last CPU chosen because
* it also has same interrupt load.
*/
cpuid = -1;
}
}
/* if we found a CPU which suits best to poke */
if (cpuid != -1) {
}
if (CPUSET_ISNULL(poke)) {
return (0);
}
/*
* We first set the bit in cpuset and then poke.
*/
/*
* If softcall() was called at low pil then we may
* get preempted before we raise PIL. It should be okay
* because we are just going to poke CPUs now or at most
* another thread may start choosing CPUs in this routine.
*/
s = splhigh();
splx(s);
return (1);
}
/*
* Call function func with argument arg
* at some later time at software interrupt priority
*/
void
{
softcall_t *sc;
clock_t w;
/*
* protect against cross-calls
*/
/* coalesce identical softcalls */
goto intr;
}
}
panic("too many softcalls");
if (softhead) {
} else
intr:
if (softcall_state & SOFT_IDLE) {
siron();
w = lbolt - softcall_tick;
return;
}
if (!(softcall_state & SOFT_STEAL)) {
/*
* We want to give some more chance before
* fishing around again.
*/
}
/* softcall_lock will be released by this routine */
(void) softcall_choose_cpu();
}
}
void
kdi_softcall(void (*func)(void))
{
kdi_siron();
}
/*
* Called to process software interrupts take one off queue, call it,
* repeat.
*
* Note queue may change during call; softcall_lock, state variables
* softcall_state and softcall_latest_cpuid ensures that -
* - we don't have multiple cpus pulling from the list (thus causing
* a violation of FIFO order with an exception when we are stuck).
* - we don't miss a new entry having been added to the head.
* - we don't miss a wakeup.
*/
void
softint(void)
{
void (*func)();
/*
* Don't process softcall queue if current CPU is quiesced or
* offlined. This can happen when a CPU is running pause
* thread but softcall already sent a xcall.
*/
if (softcall_cpuset != NULL &&
goto out;
}
}
} else {
/*
* The check for softcall_cpuset being
* NULL is required because it may get
* called very early during boot.
*/
if (softcall_cpuset != NULL &&
goto out;
}
/*
* Setting softcall_latest_cpuid to current CPU ensures
* that there is only one active softlevel1 handler to
* process softcall queues.
*
* Since softcall_lock lock is dropped before calling
* func (callback), we need softcall_latest_cpuid
* to prevent two softlevel1 hanlders working on the
* queue when the first softlevel1 handler gets
* stuck due to high interrupt load.
*/
/* add ourself to the cpuset */
for (;;) {
}
softcall_latest_cpuid = -1;
break;
}
/*
* No longer need softcall processing from current
* interrupt handler because either
* (a) softcall is in SOFT_IDLE state or
* (b) There is a CPU already draining softcall
* queue and the current softlevel1 is no
* longer required.
*/
if (softcall_latest_cpuid != cpu_id) {
break;
}
}
out:
func();
}
}