x_call.c revision 8b9d661eebb0946e8334fdbe8813867ff6b7c838
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <sys/privregs.h>
#include <sys/sysmacros.h>
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
static int xc_serv_inum; /* software interrupt number for xc_serv() */
static int xc_loop_inum; /* software interrupt number for xc_loop() */
/*
* Mail box for handshaking and xcall request; protected by xc_sys_mutex
*/
static struct xc_mbox {
/* timeout value for xcalls to be received by the target CPU */
/* timeout value for xcall functions to be executed on the target CPU */
/*
* sending x-calls
*/
void send_one_mondo(int cpuid);
/*
* Adjust xc_attention timeout if a faster cpu is dynamically added.
* Ignore the dynamic removal of a cpu that would lower these timeout
* values.
*/
static int
switch (what) {
case CPU_ON:
case CPU_INIT:
case CPU_CONFIG:
case CPU_CPUPART_IN:
}
break;
case CPU_OFF:
case CPU_UNCONFIG:
case CPU_CPUPART_OUT:
default:
break;
}
return (0);
}
/*
* xc_init - initialize x-call related locks
*/
void
xc_init(void)
{
int pix;
#ifdef TRAPTRACE
/* Initialize for all possible CPUs. */
}
#endif /* TRAPTRACE */
/*
* Initialize the calibrated tick limit for send_mondo.
* The value represents the maximum tick count to wait.
*/
/*
* Maximum number of loops to wait before timing out in xc_attention.
*/
}
/*
* Maximum number of loops to wait for a xcall function to be
* executed on the target CPU.
*/
}
/*
* The following routines basically provide callers with two kinds of
* inter-processor interrupt services:
* 1. cross calls (x-calls) - requests are handled at target cpu's TL=0
* 2. cross traps (c-traps) - requests are handled at target cpu's TL>0
*
* Although these routines protect the services from migrating to other cpus
* "after" they are called, it is the caller's choice or responsibility to
* prevent the cpu migration "before" calling them.
*
* X-call routines:
*
* xc_one() - send a request to one processor
* xc_some() - send a request to some processors
* xc_all() - send a request to all processors
*
* Their common parameters:
* func - a TL=0 handler address
* arg1 and arg2 - optional
*
* The services provided by x-call routines allow callers
* to send a request to target cpus to execute a TL=0
* handler.
* The interface of the registers of the TL=0 handler:
* %o0: arg1
* %o1: arg2
*
* X-trap routines:
*
* xt_one() - send a request to one processor
* xt_some() - send a request to some processors
* xt_all() - send a request to all processors
*
* Their common parameters:
* func - a TL>0 handler address or an interrupt number
* arg1, arg2
* optional when "func" is an address;
* 0 when "func" is an interrupt number
*
* If the request of "func" is a kernel address, then
* the target cpu will execute the request of "func" with
* args at "TL>0" level.
* The interface of the registers of the TL>0 handler:
* %g1: arg1
* %g2: arg2
*
* If the request of "func" is not a kernel address, then it has
* to be an assigned interrupt number through add_softintr().
* An interrupt number is an index to the interrupt vector table,
* which entry contains an interrupt handler address with its
* corresponding interrupt level and argument.
* The target cpu will arrange the request to be serviced according
* to its pre-registered information.
* args are assumed to be zeros in this case.
*
* In addition, callers are allowed to capture and release cpus by
* calling the routines: xc_attention() and xc_dismissed().
*/
/*
* xt_one - send a "x-trap" to a cpu
*/
void
{
return;
}
}
/*
* xt_one_unchecked - send a "x-trap" to a cpu without checking for its
* existance in cpu_ready_set
*/
void
{
int lcx;
int opl;
/*
* Make sure the function address will not be interpreted as a
* dmv interrupt
*/
/*
* It's illegal to send software inums through the cross-trap
* interface.
*/
/*
* same cpu - use software fast trap
*/
} else { /* other cpu - send a mondo to the target cpu */
/*
* other cpu - send a mondo to the target cpu
*/
}
}
/*
* xt_some - send a "x-trap" to some cpus
*/
void
{
int lcx;
int opl;
/*
* Make sure the function address will not be interpreted as a
* dmv interrupt
*/
/*
* It's illegal to send software inums through the cross-trap
* interface.
*/
/*
* only send to the CPU_READY ones
*/
/*
* send to nobody; just return
*/
if (CPUSET_ISNULL(xc_cpuset)) {
return;
}
/*
* don't send mondo to self
*/
/*
* same cpu - use software fast trap
*/
if (CPUSET_ISNULL(xc_cpuset)) {
return;
}
}
}
/*
* xt_all - send a "x-trap" to all cpus
*/
void
{
int lcx;
int opl;
/*
* Make sure the function address will not be interpreted as a
* dmv interrupt
*/
/*
* It's illegal to send software inums through the cross-trap
* interface.
*/
/*
* same cpu - use software fast trap
*/
/*
* don't send mondo to self
*/
if (CPUSET_ISNULL(xc_cpuset)) {
return;
}
}
/*
* xc_one - send a "x-call" to a cpu
*/
void
{
int lcx;
int opl;
int first_time = 1;
/*
* send to nobody; just return
*/
return;
return;
}
/*
* target processor's xc_loop should be waiting
* for the work to do; just set up the xc_mbox
*/
membar_stld();
if (loop_cnt++ > xc_func_time_limit) {
if (sendmondo_in_recover) {
drv_usecwait(1);
loop_cnt = 0;
continue;
}
"xc_state[%d] != XC_WAIT", cix);
}
}
return;
}
/*
* Avoid dead lock if someone has sent us a xc_loop request while
* we are trying to grab xc_sys_mutex.
*/
/*
* At this point, since we don't own xc_sys_mutex,
* our pil shouldn't run at or above the XCALL_PIL.
*/
/*
* Since xc_holder is not owned by us, it could be that
* no one owns it, or we are not informed to enter into
* xc_loop(). In either case, we need to grab the
* xc_sys_mutex before we write to the xc_mbox, and
* we shouldn't release it until the request is finished.
*/
/*
* Since we own xc_sys_mutex now, we are safe to
* write to the xc_mobx.
*/
/* xc_serv does membar_stld */
if (loop_cnt++ > xc_func_time_limit) {
if (sendmondo_in_recover) {
drv_usecwait(1);
loop_cnt = 0;
continue;
}
if (first_time) {
first_time = 0;
loop_cnt = 0;
continue;
}
"xc_state[%d] != XC_IDLE", cix);
}
}
xc_spl_enter[lcx] = 0;
}
/*
* xc_some - send a "x-call" to some cpus; sending to self is excluded
*/
void
{
int lcx;
int opl;
/*
* only send to the CPU_READY ones
*/
/*
* send to nobody; just return
*/
if (CPUSET_ISNULL(xc_cpuset)) {
return;
}
/*
* same cpu just do it
*/
if (CPUSET_ISNULL(xc_cpuset)) {
return;
}
}
return;
}
/*
* Avoid dead lock if someone has sent us a xc_loop request while
* we are trying to grab xc_sys_mutex.
*/
/*
* At this point, since we don't own xc_sys_mutex,
* our pil shouldn't run at or above the XCALL_PIL.
*/
/*
* grab xc_sys_mutex before writing to the xc_mbox
*/
xc_spl_enter[lcx] = 0;
}
/*
* xc_all - send a "x-call" to all cpus
*/
void
{
int lcx;
int opl;
/*
* same cpu just do it
*/
if (CPUSET_ISNULL(xc_cpuset)) {
return;
}
return;
}
/*
* Avoid dead lock if someone has sent us a xc_loop request while
* we are trying to grab xc_sys_mutex.
*/
/*
* At this point, since we don't own xc_sys_mutex,
* our pil shouldn't run at or above the XCALL_PIL.
*/
/*
* grab xc_sys_mutex before writing to the xc_mbox
*/
xc_spl_enter[lcx] = 0;
}
/*
* xc_attention - paired with xc_dismissed()
*
* xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it
* session.
*/
void
{
int first_time = 1;
/*
* don't migrate the cpu until xc_dismissed() is finished
*/
/*
* only send to the CPU_READY ones
*/
/*
* don't send mondo to self
*/
if (CPUSET_ISNULL(xc_cpuset))
return;
/*
* inform the target processors to enter into xc_loop()
*/
xc_spl_enter[lcx] = 0;
/*
* make sure target processors have entered into xc_loop()
*/
/*
* membar_stld() is done in xc_loop
*/
}
if (CPUSET_ISNULL(tmpset)) {
break;
}
}
}
if (loop_cnt++ > xc_mondo_time_limit) {
if (sendmondo_in_recover) {
drv_usecwait(1);
loop_cnt = 0;
continue;
}
if (first_time) {
first_time = 0;
loop_cnt = 0;
continue;
}
}
}
/*
* xc_sys_mutex remains held until xc_dismissed() is finished
*/
}
/*
* xc_dismissed - paired with xc_attention()
*
* Called after the critical session is finished.
*/
void
{
int pix;
membar_stld();
/*
* only send to the CPU_READY ones
*/
/*
* exclude itself
*/
if (CPUSET_ISNULL(xc_cpuset)) {
xc_holder = -1;
return;
}
/*
* inform other processors to get out of xc_loop()
*/
membar_stld();
if (CPUSET_ISNULL(tmpset)) {
break;
}
}
}
/*
* make sure target processors have exited from xc_loop()
*/
/*
* membar_stld() is done in xc_loop
*/
}
if (CPUSET_ISNULL(tmpset)) {
break;
}
}
}
if (loop_cnt++ > xc_func_time_limit) {
if (sendmondo_in_recover) {
drv_usecwait(1);
loop_cnt = 0;
continue;
}
}
}
xc_holder = -1;
}
/*
* xc_serv - "x-call" handler at TL=0; serves only one x-call request
* runs at XCALL_PIL level.
*/
xc_serv(void)
{
}
membar_stld();
return (1);
}
/*
* if == 1, an xc_loop timeout will cause a panic
* otherwise print a warning
*/
uint_t xc_loop_panic = 0;
/*
* xc_loop - "x-call" handler at TL=0; capture the cpu for a critial
* session, or serve multiple x-call requests runs at XCALL_PIL level.
*/
xc_loop(void)
{
/*
* Some one must have owned the xc_sys_mutex;
* no further interrupt (at XCALL_PIL or below) can
* be taken by this processor until xc_loop exits.
*
* The owner of xc_sys_mutex (or xc_holder) can expect
* xc requests use xc_mbox's handshaking for their services
* xt requests at TL>0 will be handled immediately
* xt requests at TL=0:
* if their handlers'pils are <= XCALL_PIL, then
* they will be handled after xc_loop exits
* (so, they probably should not be used)
* else they will be handled immediately
*
* For those who are not informed to enter xc_loop, if they
* the requests will be handled as follows:
* xc requests will be handled after they grab xc_sys_mutex
* xt requests at TL>0 will be handled immediately
* xt requests at TL=0:
* if their handlers'pils are <= XCALL_PIL, then
* they will be handled after xc_loop exits
* else they will be handled immediately
*/
membar_stld();
membar_stld();
/*
* reset the timeout counter
* since some work was done
*/
loop_cnt = 0;
} else {
/* patience is a virtue... */
loop_cnt++;
}
if (loop_cnt > xc_func_time_limit) {
if (sendmondo_in_recover) {
drv_usecwait(1);
loop_cnt = 0;
continue;
}
"xc_loop() timeout");
/*
* if the above displayed a warning,
* reset the timeout counter and be patient
*/
loop_cnt = 0;
}
}
membar_stld();
return (1);
}