apix_utils.c revision 1053f4b7f43c9e146005c0eaf3739db935d3987a
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Copyright (c) 2010, Intel Corporation.
* All rights reserved.
*/
#include <sys/processor.h>
#include <sys/smp_impldefs.h>
#include <sys/psm_common.h>
#include <sys/ddi_impldefs.h>
#include <sys/x86_archext.h>
#include <sys/cpc_impl.h>
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <sys/sysmacros.h>
#include <sys/rm_platter.h>
#include <sys/privregs.h>
#include <sys/pci_intr_lib.h>
#include <sys/dditypes.h>
static int apix_get_avail_vector_oncpu(uint32_t, int, int);
static void apix_cleanup_vector(apix_vector_t *);
uint64_t *, int, dev_info_t *);
static void apix_clear_dev_map(dev_info_t *, int, int);
static void apix_wait_till_seen(processorid_t, int);
#define GET_INTR_INUM(ihdlp) \
/*
* Allocate IPI
*
* Return vector number or 0 on error
*/
apix_alloc_ipi(int ipl)
{
int cpun;
int nproc;
if (vector == 0) {
return (0);
}
goto fail;
}
}
}
return (vector);
fail:
while (--cpun >= 0)
return (0);
}
/*
* Add IPI service routine
*/
static int
{
int cpun;
int nproc;
}
return (1);
}
/*
* Find and return first free vector in range (start, end)
*/
static int
{
int i;
if (APIC_CHECK_RESERVE_VECTORS(i))
continue;
return (i);
}
return (0);
}
/*
* Allocate a vector on specified cpu
*
* Return NULL on error
*/
static apix_vector_t *
{
int vector;
/* find free vector */
if (vector == 0)
return (NULL);
return (vecp);
}
/*
* Allocates "count" contiguous MSI vectors starting at the proper alignment.
* Caller needs to make sure that count has to be power of 2 and should not
* be < 1.
*
* Return first vector number
*/
{
/*
* msibits is the no. of lower order message data bits for the
* allocated MSI vectors and is used to calculate the aligned
* starting vector
*/
/* It has to be contiguous */
for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
continue;
/*
* starting vector has to be aligned accordingly for
* multiple MSIs
*/
if (msibits)
break;
if (APIC_CHECK_RESERVE_VECTORS(i))
break;
goto done;
}
}
return (NULL);
done:
for (i = 0; i < count; i++) {
goto fail;
if (i == 0)
}
return (startp);
fail:
while (i-- > 0) { /* Free allocated vectors */
}
return (NULL);
}
do {\
if ((_ctrl) & PCI_MSI_64BIT_MASK)\
else\
static void
{
void *intrmap_tbl[PCI_MSI_MAX_INTRS];
"\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip,
count, 0xff);
for (i = 0; i < count; i++)
&msi_regs);
/* MSI Address */
/* MSI Data: MSI is edge triggered according to spec */
if (type == APIX_TYPE_MSI) {
/* Set the bits to inform how many MSIs are enabled */
if (msi_ctrl & PCI_MSI_64BIT_MASK)
} else if (type == APIX_TYPE_MSIX) {
/* Offset into the "inum"th entry in the MSI-X table */
(inum * PCI_MSIX_VECTOR_SIZE);
}
}
static void
{
if (type == APIX_TYPE_MSI) {
if ((msi_ctrl & PCI_MSI_ENABLE_BIT))
return;
} else if (type == DDI_INTR_TYPE_MSIX) {
/* Offset into "inum"th entry in the MSI-X table & clear mask */
if (!(msi_ctrl & PCI_MSIX_ENABLE_BIT)) {
msi_ctrl);
}
}
}
/*
*/
void
{
else
iflag = intr_clear();
} else {
/* last one */
"apix_pci_msi_enable_vector\n"));
"apix_pci_msi_enable_mode\n"));
}
} else { /* MSI-X */
}
}
}
/*
* Disable the interrupt
*/
void
{
iflag = intr_clear();
case APIX_TYPE_MSI:
/*
* Disable the MSI vector
* Make sure we only disable on the last
* of the multi-MSI support
*/
}
break;
case APIX_TYPE_MSIX:
/*
* Disable the MSI-X vector
*/
/*
* Make sure we only disable on the last MSI-X
*/
}
break;
default:
break;
}
}
/*
* Mark vector as obsoleted or freed. The vector is marked
* obsoleted if there are pending requests on it. Otherwise,
* free the vector. The obsoleted vectors get freed after
* being serviced.
*
* Return 1 on being obosoleted and 0 on being freed.
*/
(AV_PENTRY_PEND | AV_PENTRY_ONPROC)) != 0)
#define LOCAL_WITH_INTR_DISABLED(_cpuid)\
static uint64_t dummy_tick;
int
{
continue;
if (LOCAL_WITH_INTR_DISABLED(cpuid)) {
busy++;
continue;
}
/* check IRR for pending interrupts */
busy++;
if (!busy)
continue;
}
repeats = 0;
do {
repeats++;
tries++)
break;
busy++;
else {
/*
* Interrupt is not in pending list or being serviced.
* However it might be cached in Local APIC's IRR
* register. It's impossible to check another CPU's
* IRR register. Then wait till lower levels finish
* running.
*/
busy++;
}
if (!busy)
}
if (busy) {
return (1);
else {
}
return (1);
}
/* interrupt is not busy */
/* remove from obsoleted list */
}
return (0);
}
/*
* Duplicate number of continuous vectors to specified target vectors.
*/
static void
{
int i, inum;
for (i = 0; i < count; i++) {
/* copy over original one */
continue;
}
}
}
}
static apix_vector_t *
{
return (NULL);
}
}
return (vecp);
}
static void
{
}
static void
{
#ifdef DEBUG
}
case APIX_TYPE_FIXED:
break;
case APIX_TYPE_MSI:
break;
case APIX_TYPE_MSIX:
break;
default:
break;
}
#endif /* DEBUG */
}
/*
* Operations on avintr
*/
do { \
(p)->av_intr_id = intr_id; \
(p)->av_vector = f; \
(p)->av_intarg1 = arg1; \
(p)->av_intarg2 = arg2; \
(p)->av_prilevel = ipl; \
(p)->av_flags = 0; \
/*
* Insert an interrupt service routine into chain by its priority from
* high to low
*/
static void
{
return;
}
/* Free the following autovect chain */
p = prep;
}
return;
}
/* find where it goes in list */
break;
prep = p;
}
return;
}
} else {
/* insert new intpt at beginning of chain */
}
}
/*
* After having made a change to an autovector list, wait until we have
* seen specified cpu not executing an interrupt at that level--so we
* know our change has taken effect completely (no old state in registers,
* etc).
*/
#define APIX_CPU_ENABLED(_cp) \
(quiesce_active == 0 && \
static void
{
return;
/*
* Don't wait if the CPU is quiesced or offlined. This can happen
* when a CPU is running pause thread but hardware triggered an
* interrupt and the interrupt gets queued.
*/
for (;;) {
(!APIX_CPU_ENABLED(cp) ||
return;
}
}
static void
{
int hi_pri = 0;
struct autovec *p;
return;
continue;
}
/*
* This drops the handler from the chain, it can no longer be called.
* However, there is no guarantee that the handler is not currently
* still executing.
*/
/*
* There is a race where we could be just about to pick up the ticksp
* pointer to increment it after returning from the service routine
* in av_dispatch_autovect. Rather than NULL it out let's just point
* it off to something safe so that any final tick update attempt
* won't fault.
*/
}
static struct autovec *
{
struct autovec *p;
/* found the handler */
return (p);
}
}
return (NULL);
}
static apix_vector_t *
{
uchar_t v;
for (n = 0; n < apic_nproc; n++) {
if (!apix_is_cpu_enabled(n))
continue;
for (v = APIX_AVINTR_MIN; v <= APIX_AVINTR_MIN; v++) {
continue;
return (vecp);
}
}
return (NULL);
}
/*
* Add interrupt service routine.
*
* For legacy interrupts (HPET timer, ACPI SCI), the vector is actually
* IRQ no. A vector is then allocated. Otherwise, the vector is already
* allocated. The input argument virt_vect is virtual vector of format
* APIX_VIRTVEC_VECTOR(cpuid, vector).
*
* Return 1 on success, 0 on failure.
*/
int
{
int cpuid;
"on vector 0x%x,0x%x", name,
return (0);
}
if (v >= APIX_IPI_MIN) /* IPIs */
/*
* Senarios include:
* a. add_avintr() is called before irqp initialized (legacy)
* b. irqp is initialized, vector is not allocated (fixed)
* c. irqp is initialized, vector is allocated (fixed & shared)
*/
return (0);
} else { /* got virtual vector */
}
/*
* Basically the allocated but not enabled interrupts
* will not get re-targeted. But MSIs in allocated state
* could be re-targeted due to group re-targeting.
*/
}
return (0);
}
}
return (1);
}
/*
* Remove avintr
*
* For fixed, if it's the last one of shared interrupts, free the vector.
* For msi/x, only disable the interrupt but not free the vector, which
* is freed by PSM_XXX_FREE_XXX.
*/
void
{
avfunc f;
return;
} else /* got virtual vector */
return;
}
/*
* It's possible that the interrupt is rebound to a
* different cpu before rem_avintr() is called. Search
* through all vectors once it happens.
*/
== NULL) {
return;
}
}
/* disable interrupt */
/* remove ISR entry */
}
/*
* Device to vector mapping table
*/
static void
{
char *name;
int found = 0;
found++;
break;
}
}
if (!found) {
return;
}
}
void
{
char *name;
found++;
break;
}
}
if (found == 0) { /* not found */
}
"inum=0x%x vector=0x%x/0x%x\n",
}
{
char *name;
return (NULL);
return (vecp);
}
}
return (NULL);
}
/*
* Get minimum inum for specified device, used for MSI
*/
int
{
char *name;
int inum = -1;
if (inum == -1)
else
}
}
return (inum);
}
int
{
char *name;
int inum = -1;
if (inum == -1)
else
}
}
return (inum);
}
/*
* Major to cpu binding, for INTR_ROUND_ROBIN_WITH_AFFINITY cpu
* binding policy
*/
static uint32_t
{
char *name;
}
return (cpu);
}
static void
{
char *name;
/* setup major to cpu mapping */
}
}
/*
* return the cpu to which this intr should be bound.
* Check properties or any other mechanism to see if user wants it
* bound to a specific CPU. If so, return the cpu id with high bit set.
* If not, use the policy to choose a cpu and return the id.
*/
{
char prop_name[32];
if (apic_intr_policy == INTR_LOWEST_PRIORITY) {
"LOWEST PRIORITY, use ROUND ROBIN instead");
}
if (apic_nproc == 1) {
return (0);
}
if (bind_cpu != IRQ_UNINIT) {
return (bind_cpu);
}
}
/*
* search for "drvname"_intpt_bind_cpus property first, the
* syntax of the property should be "a[,b,c,...]" where
* instance 0 binds to cpu a, instance 1 binds to cpu b,
* instance 3 binds to cpu c...
* ddi_getlongprop() will search /option first, then /
* if "drvname"_intpt_bind_cpus doesn't exist, then find
* intpt_bind_cpus property. The syntax is the same, and
* it applies to all the devices if its "drvname" specific
* property doesn't exist
*/
if (rc != DDI_PROP_SUCCESS) {
}
}
if (rc == DDI_PROP_SUCCESS) {
if (prop_val[i] == ',')
count++;
count++;
/*
* if somehow the binding instances defined in the
* property are not enough for this instno., then
* reuse the pattern for the next instance until
* it reaches the requested instno
*/
i = 0;
while (i < instno)
if (*cptr++ == ',')
i++;
/* if specific cpu is bogus, then default to cpu 0 */
if (bind_cpu >= apic_nproc) {
bind_cpu = 0;
} else {
/* indicate that we are bound at user request */
}
/*
* no need to check apic_cpus[].aci_status, if specific cpu is
* not up, then post_cpu_start will handle it.
*/
} else {
}
}
static boolean_t
{
return (B_FALSE);
return (B_TRUE);
}
/*
* Must be called with apix_lock held. This function can be
* called from above lock level by apix_intr_redistribute().
*
* Arguments:
* vecp : Vector to be rebound
* tocpu : Target cpu. IRQ_UNINIT means target is vecp->v_cpuid.
* count : Number of continuous vectors
*
* Return new vector being bound to
*/
{
int i;
if (!apix_is_cpu_enabled(newcpu))
return (NULL);
return (vecp);
/* allocate vector */
if (count == 1)
else {
}
return (NULL);
}
int inum;
/* undo duplication */
inum);
}
}
"interrupt 0x%x to cpu %d failed\n",
return (NULL);
}
(void) apix_obsolete_vector(vecp);
" 0x%x/0x%x to 0x%x/0x%x\n",
return (newp);
}
for (i = 0; i < count; i++) {
}
(void) apix_obsolete_vector(oldp);
}
"to 0x%x/0x%x, count=%d\n",
}
/*
* Senarios include:
* a. add_avintr() is called before irqp initialized (legacy)
* b. irqp is initialized, vector is not allocated (fixed interrupts)
* c. irqp is initialized, vector is allocated (shared interrupts)
*/
{
/*
* Allocate IRQ. Caller is later responsible for the
* initialization
*/
/* allocate irq */
}
}
/*
* allocate vector
*/
/* select cpu by system policy */
/* allocate vector */
APIX_TYPE_FIXED)) == NULL) {
irqno);
return (NULL);
}
} else {
}
return (vecp);
}
int
{
"inum=0x%x count=0x%x behavior=%d\n",
if (count > 1) {
if (behavior == DDI_INTR_ALLOC_STRICT &&
apic_multi_msi_enable == 0)
return (0);
if (apic_multi_msi_enable == 0)
count = 1;
}
/* Check whether it supports per-vector masking */
/* bind to cpu */
/* if not ISP2, then round it down */
break;
}
"apix_alloc_msi: no %d cont vectors found on cpu 0x%x\n",
return (0);
}
/* major to cpu binding */
if ((apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) &&
return (rcount);
}
int
{
int i;
for (i = 0; i < count; i++) {
/* select cpu by system policy */
/* allocate vector */
APIX_TYPE_MSIX)) == NULL) {
"allocate msix for device dip=%p, inum=%d on"
break;
}
/* major to cpu mapping */
if ((i == 0) &&
}
"strictly allocate %d vectors failed, got %d\n",
count, i));
i = 0;
}
return (i);
}
/*
* A rollback free for vectors allocated by apix_alloc_xxx().
*/
void
{
int i, cpuid;
"count: %x type: %x\n",
"dip=0x%p inum=0x%x type=0x%x apix_find_intr() "
continue;
}
"dip=0x%p inum=0x%x type=0x%x vector 0x%x (share %d)\n",
/* tear down device interrupt to vector mapping */
continue;
}
/* Free apic_irq_table entry */
}
/* free vector */
}
}
/*
* Must be called with apix_lock held
*/
{
int ret;
/*
* Interrupts are enabled on the CPU, programme IOAPIC RDT
*/
return (vecp);
}
/*
* CPU is not up or interrupts are disabled. Fall back to the
* first avialable CPU.
*/
}
/*
* For interrupts which call add_avintr() before apic is initialized.
* ioapix_setup_intr() will
* - allocate vector
* - copy over ISR
*/
static void
{
} else {
irqp->airq_share++;
}
/* copy over autovect */
/* Program I/O APIC */
iflag = intr_clear();
(void) apix_setup_io_intr(vecp);
"(ioapic %x, ipin %x) is bound to cpu %x, vector %x\n",
}
void
{
int ioapicindex;
int i, j;
/* mask interrupt vectors */
for (j = 0; j < apic_io_max && mask_apic; j++) {
int intin_max;
ioapicindex = j;
/* Bits 23-16 define the maximum redirection entries */
& 0xff;
for (i = 0; i <= intin_max; i++)
AV_MASK);
}
/*
*/
if (apic_sci_vect > 0)
/*
*/
if (apic_hpet_vect > 0)
}