apix.c revision 2edb3dcc4f69d09d0b5a60ef77640285ed90e8b5
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Copyright (c) 2010, Intel Corporation.
* All rights reserved.
*/
/*
* PSMI 1.1 extensions are supported only in 2.6 and later versions.
* PSMI 1.2 extensions are supported only in 2.7 and later versions.
* PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
* PSMI 1.5 extensions are supported in Solaris Nevada.
* PSMI 1.6 extensions are supported in Solaris Nevada.
* PSMI 1.7 extensions are supported in Solaris Nevada.
*/
#define PSMI_1_7
#include <sys/processor.h>
#include <sys/smp_impldefs.h>
#include <sys/psm_common.h>
#include <sys/ddi_impldefs.h>
#include <sys/x86_archext.h>
#include <sys/cpc_impl.h>
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <sys/sysmacros.h>
#include <sys/rm_platter.h>
#include <sys/privregs.h>
#include <sys/pci_intr_lib.h>
#include <sys/dditypes.h>
#include <sys/mach_intr.h>
#include <sys/apix_irm_impl.h>
static int apix_probe();
static void apix_init();
static void apix_picinit(void);
static int apix_intr_enter(int, int *);
static void apix_intr_exit(int, int);
static void apix_setspl(int);
static int apix_disable_intr(processorid_t);
static void apix_enable_intr(processorid_t);
static int apix_get_clkvect(int);
static int apix_get_ipivect(int, int);
static void apix_post_cyclic_setup(void *);
static int apix_post_cpu_start();
psm_intr_op_t, int *);
/*
* Helper functions for apix_intr_ops()
*/
static void apix_redistribute_compute(void);
static int apix_get_pending(apix_vector_t *);
static char *apix_get_apic_type(void);
static int apix_intx_get_pending(int);
static void apix_intx_set_mask(int irqno);
static void apix_intx_clear_mask(int irqno);
static int apix_intx_get_shared(int irqno);
struct intrspec *);
extern int apic_clkinit(int);
/* IRM initialization for APIX PSM module */
extern void apix_irm_init(void);
extern int irm_enable;
/*
* Local static data
*/
NULL, /* psm_softlvl_to_irq */
NULL, /* psm_set_softintr */
NULL, /* psm_hrtimeinit */
NULL, /* psm_translate_irq */
NULL, /* psm_notify_error */
NULL, /* psm_notify_func */
apix_intr_ops, /* Advanced DDI Interrupt framework */
apic_state, /* save, restore apic state for S3 */
apic_cpu_ops, /* CPU control interface. */
};
static struct psm_info apix_psm_info = {
PSM_INFO_VER01_7, /* version */
PSM_OWN_EXCLUSIVE, /* ownership */
&apix_ops, /* operation */
APIX_NAME, /* machine name */
"apix MPv1.4 compatible",
};
static void *apix_hdlp;
static int apix_is_enabled = 0;
/*
* Flag to indicate if APIX is to be enabled only for platforms
* with specific hw feature(s).
*/
int apix_hw_chk_enable = 1;
/*
* Hw features that are checked for enabling APIX support.
*/
#define APIX_SUPPORT_X2APIC 0x00000001
/*
* apix_lock is used for cpu selection and vector re-binding
*/
/*
* Mapping between device interrupt and the allocated vector. Indexed
* by major number.
*/
/*
* Mapping between device major number and cpu id. It gets used
* when interrupt binding policy round robin with affinity is
* applied. With that policy, devices with the same major number
* will be bound to the same CPU.
*/
/*
* Maximum number of vectors in a CPU that can be used for interrupt
* allocation (including IPIs and the reserved vectors).
*/
int apix_cpu_nvectors = APIX_NVECTOR;
/* gcpu.h */
extern void apic_change_eoi();
/*
* This is the loadable module wrapper
*/
int
_init(void)
{
if (apic_coarse_hrtime)
}
int
_fini(void)
{
}
int
{
}
static int
{
int rval;
if (apix_enable == 0)
return (PSM_FAILURE);
/* check for hw features if specified */
if (apix_hw_chk_enable) {
/* check if x2APIC mode is supported */
if ((apix_supported_hw & APIX_SUPPORT_X2APIC) ==
if (!((apic_local_mode() == LOCAL_X2APIC) ||
apic_detect_x2apic())) {
/* x2APIC mode is not supported in the hw */
apix_enable = 0;
}
}
if (apix_enable == 0)
return (PSM_FAILURE);
}
if (rval == PSM_SUCCESS)
apix_is_enabled = 1;
else
apix_is_enabled = 0;
return (rval);
}
/*
* Initialize the data structures needed by pcplusmpx module.
* Specifically, the data structures used by addspl() and delspl()
* routines.
*/
static void
{
int i, *iptr;
int nproc;
for (i = 0; i < nproc; i++) {
}
/* cpu 0 is always up (for now) */
iptr = (int *)&apic_irq_table[0];
for (i = 0; i <= APIC_MAX_VECTOR; i++) {
apic_level_intr[i] = 0;
}
KM_SLEEP);
KM_SLEEP);
for (i = 0; i < devcnt; i++)
apix_major_to_cpu[i] = IRQ_UNINIT;
}
}
static int
apix_get_pending_spl(void)
{
}
static uintptr_t
{
if (cpu >= apic_nproc)
return (NULL);
}
#if defined(__amd64)
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0
};
#endif
static void
{
#if defined(__amd64)
/*
* Make cpu-specific interrupt info point to cr8pri vector
*/
#else
if (cpuid_have_cr8access(CPU))
apic_have_32bit_cr8 = 1;
#endif /* __amd64 */
/*
* Initialize IRM pool parameters
*/
if (irm_enable) {
int i;
int lowest_irq;
int highest_irq;
/* number of CPUs present */
/* total number of entries in all of the IOAPICs present */
lowest_irq = apic_io_vectbase[0];
highest_irq = apic_io_vectend[0];
for (i = 1; i < apic_io_max; i++) {
if (apic_io_vectbase[i] < lowest_irq)
lowest_irq = apic_io_vectbase[i];
if (apic_io_vectend[i] > highest_irq)
highest_irq = apic_io_vectend[i];
}
/*
* Number of available per-CPU vectors excluding
* reserved vectors for Dtrace, int80, system-call,
* fast-trap, etc.
*/
/* Number of vectors (pre) allocated (SCI and HPET) */
if (apic_hpet_vect != -1)
if (apic_sci_vect != -1)
}
}
static void
{
extern void cmi_cmci_trap(void);
if (apic_mode == LOCAL_APIC) {
/*
* We are running APIC in MMIO mode.
*/
if (apic_flat_model) {
} else {
}
AV_HIGH_ORDER >> cpun);
}
if (apic_directed_EOI_supported()) {
/*
* Setting the 12th bit in the Spurious Interrupt Vector
* Register suppresses broadcast EOIs generated by the local
* APIC. The suppression of broadcast EOIs happens only when
* interrupts are level-triggered.
*/
}
/* need to enable APIC before unmasking NMI */
/*
* Presence of an invalid vector with delivery mode AV_FIXED can
* cause an error interrupt, even if the entry is masked...so
* write a valid vector to LVT entries along with the mask bit
*/
/* All APICs have timer and LINT0/1 */
/*
* On integrated APICs, the number of LVT entries is
* 'Max LVT entry' + 1; on 82489DX's (non-integrated
* APICs), nlvt is "3" (LINT0, LINT1, and timer)
*/
nlvt = 3;
} else {
0xFF) + 1;
}
if (nlvt >= 5) {
/* Enable performance counter overflow interrupt */
if (apic_enable_cpcovf_intr) {
if (apic_cpcovf_vect == 0) {
int ipl = APIC_PCINT_IPL;
"apic pcint", apic_cpcovf_vect,
}
}
}
if (nlvt >= 6) {
/* Only mask TM intr if the BIOS apparently doesn't use it */
}
}
/* Enable error interrupt */
if (apic_errvect == 0) {
/*
* Not PSMI compliant, but we are going to merge
* with ON anyway
*/
}
}
/* Enable CMCI interrupt */
if (cmi_enable_cmci) {
if (cmci_cpu_setup_registered == 0) {
}
if (apic_cmci_vect == 0) {
int ipl = 0x2;
}
}
}
static void
apix_picinit(void)
{
int i, j;
/*
* initialize interrupt remapping before apic
* hardware initialization
*/
if (apic_vt_ops == psm_vt_ops)
/*
* On UniSys Model 6520, the BIOS leaves vector 0x20 isr
* bit on without clearing it with EOI. Since softint
* uses vector 0x20 to interrupt itself, so softint will
* not work on this machine. In order to fix this problem
* a check is made to verify all the isr bits are clear.
* If not, EOIs are issued to clear the bits.
*/
for (i = 7; i >= 1; i--) {
if (isr != 0)
for (j = 0; ((j < 32) && (isr != 0)); j++)
if (isr & (1 << j)) {
APIC_EOI_REG, 0);
isr &= ~(1 << j);
}
}
/* set a flag so we know we have run apic_picinit() */
apic_picinit_called = 1;
picsetup(); /* initialise the 8259 */
/* add nmi handler - least priority nmi handler */
/* enable apic mode if imcr present */
if (apic_imcrp) {
}
/* setup global IRM pool if applicable */
if (irm_enable)
}
static __inline__ void
apix_send_eoi(void)
{
if (apic_mode == LOCAL_APIC)
else
X2APIC_WRITE(APIC_EOI_REG, 0);
}
/*
* platform_intr_enter
*
* Called at the beginning of the interrupt service routine to
* mask all level equal to and below the interrupt priority
* of the interrupting vector. An EOI should be given to
* the interrupt controller to enable other HW interrupts.
*
* Return -1 for spurious interrupts
*
*/
static int
{
int nipl = -1;
/*
* The real vector delivered is (*vectorp + 0x20), but our caller
* subtracts 0x20 from the vector before passing it to us.
* (That's why APIC_BASE_VECT is 0x20.)
*/
if (vector == APIC_SPUR_INTR) {
return (APIC_INT_SPURIOUS);
}
if (APIX_IS_FAKE_INTR(vector))
return (nipl);
}
/* if interrupted by the clock, increment apic_nsec_since_boot */
if (!apic_oneshot) {
/* NOTE: this is not MT aware */
}
return (nipl);
}
/* pre-EOI handling for level-triggered interrupts */
/* send back EOI */
}
#ifdef DEBUG
if (vector >= APIX_IPI_MIN)
return (nipl); /* skip IPI */
#endif /* DEBUG */
return (nipl);
}
/*
* Any changes made to this function must also change X2APIC
* version of intr_exit.
*/
static void
{
int cpuid = psm_get_cpu_id();
/* ISR above current pri could not be in progress */
if (APIX_CPU_LOCK_HELD(cpuid))
return;
}
}
/*
* Mask all interrupts below or equal to the given IPL.
* Any changes made to this function must also change X2APIC
* version of setspl.
*/
static void
apix_setspl(int ipl)
{
/* interrupts at ipl above this cannot be in progress */
/*
* Mask all interrupts for XC_HI_PIL (i.e set TPR to 0xf).
* Otherwise, enable all interrupts (i.e. set TPR to 0).
*/
ipl = 0;
#if defined(__amd64)
#else
if (apic_have_32bit_cr8)
else
#endif
/*
* this is a patch fix for the ALR QSMP P5 machine, so that interrupts
* have enough time to come in before the priority is raised again
* during the idle() loop.
*/
if (apic_setspl_delay)
(void) apic_reg_ops->apic_get_pri();
}
/*
* X2APIC version of setspl.
*/
static void
x2apix_setspl(int ipl)
{
/* interrupts at ipl above this cannot be in progress */
/*
* Mask all interrupts for XC_HI_PIL (i.e set TPR to 0xf).
* Otherwise, enable all interrupts (i.e. set TPR to 0).
*/
ipl = 0;
}
int
{
/* There are more interrupts, so it's already been enabled */
return (PSM_SUCCESS);
/* return if it is not hardware interrupt */
return (PSM_SUCCESS);
/*
* if apix_picinit() has not been called yet, just return.
* At the end of apic_picinit(), we will call setup_io_intr().
*/
if (!apic_picinit_called)
return (PSM_SUCCESS);
(void) apix_setup_io_intr(vecp);
return (PSM_SUCCESS);
}
int
{
/* There are more interrupts */
return (PSM_SUCCESS);
/* return if it is not hardware interrupt */
return (PSM_SUCCESS);
if (!apic_picinit_called) {
virtvec);
return (PSM_SUCCESS);
}
return (PSM_SUCCESS);
}
/*
* Try and disable all interrupts. We just assign interrupts to other
* processors based on policy. If any were bound by user request, we
* let them continue and return failure. We do not bother to check
* for cache affinity while rebinding.
*/
static int
{
/* if this is for SUSPEND operation, skip rebinding */
for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
if (!IS_VECT_ENABLED(vecp))
continue;
}
return (PSM_SUCCESS);
}
for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
if (!IS_VECT_ENABLED(vecp))
continue;
hardbound++;
continue;
}
/*
* If there are bound interrupts on this cpu, then
* rebind them to other processors.
*/
loop = 0;
do {
if (type != APIX_TYPE_MSI)
else
if (loop >= apic_nproc) {
errbound++;
}
}
"due to user bound interrupts or failed operation",
cpun);
return (PSM_FAILURE);
}
return (PSM_SUCCESS);
}
/*
* Bind interrupts to specified CPU
*/
static void
{
int i, ret;
/* interrupt enabling for system resume */
for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
if (!IS_VECT_ENABLED(vecp))
continue;
}
}
for (n = 0; n < apic_nproc; n++) {
if (!apic_cpu_in_range(n) || n == cpun ||
continue;
for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
if (!IS_VECT_ENABLED(vecp) ||
continue;
else
}
}
}
/*
* Allocate vector for IPI
* type == -1 indicates it is an internal request. Do not change
* resv_vector for these requests.
*/
static int
{
if (type != -1)
return (vector);
}
return (-1); /* shouldn't happen */
}
static int
apix_get_clkvect(int ipl)
{
int vector;
return (-1);
apic_clkvect));
return (vector);
}
static int
{
int cpun;
static int cpus_started = 1;
/* We know this CPU + BSP started successfully. */
cpus_started++;
/*
* On BSP we would have enabled X2APIC, if supported by processor,
* in acpi_probe(), but on AP we do it here.
*
* We enable X2APIC mode only if BSP is running in X2APIC & the
* local APIC mode of the current CPU is MMIO (xAPIC).
*/
apic_local_mode() == LOCAL_APIC) {
}
/*
* Switch back to x2apic IPI sending method for performance when target
* CPU has entered x2apic mode.
*/
if (apic_mode == LOCAL_X2APIC) {
}
/*
* since some systems don't enable the internal cache on the non-boot
* cpus, so we have to enable them here
*/
#ifdef DEBUG
#else
if (apic_mode == LOCAL_APIC)
#endif /* DEBUG */
/*
* We may be booting, or resuming from suspend; aci_status will
* be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
* APIC_CPU_ONLINE flag here rather than setting aci_status completely.
*/
cpun = psm_get_cpu_id();
return (PSM_SUCCESS);
}
/*
* If this module needs a periodic handler for the interrupt distribution, it
* can be added here. The argument to the periodic handler is not currently
* used, but is reserved for future.
*/
static void
apix_post_cyclic_setup(void *arg)
{
/* cpu_lock is held */
/* set up a periodic handler for intr redistribution */
/*
* In peridoc mode intr redistribution processing is done in
* apic_intr_enter during clk intr processing
*/
if (!apic_oneshot)
return;
/*
* Register a periodical handler for the redistribution processing.
* On X86, CY_LOW_LEVEL is mapped to the level 2 interrupt, so
* DDI_IPL_2 should be passed to ddi_periodic_add() here.
*/
(void (*)(void *))apix_redistribute_compute, NULL,
}
void
{
/*
* The xxx_intr_exit() sets TPR and sends back EOI. The
* xxx_setspl() sets TPR. These two routines are not
* needed in new design.
*
* pops->psm_intr_exit = x2apic_intr_exit;
* pops->psm_setspl = x2apic_setspl;
*/
}
/*
* This function provides external interface to the nexus for all
* functionalities related to the new DDI interrupt framework.
*
* Input:
* dip - pointer to the dev_info structure of the requested device
* hdlp - pointer to the internal interrupt handle structure for the
* requested interrupt
* intr_op - opcode for this call
* result - pointer to the integer that will hold the result to be
* passed back if return value is PSM_SUCCESS
*
* Output:
* return value is either PSM_SUCCESS or PSM_FAILURE
*/
static int
{
int cap;
switch (intr_op) {
case DDI_INTR_TYPE_MSI:
/* allocate MSI vectors */
break;
case DDI_INTR_TYPE_MSIX:
/* allocate MSI-X vectors */
break;
case DDI_INTR_TYPE_FIXED:
/* allocate or share vector for fixed */
return (PSM_FAILURE);
}
ispec);
break;
default:
return (PSM_FAILURE);
}
break;
case PSM_INTR_OP_FREE_VECTORS:
break;
case PSM_INTR_OP_XLATE_VECTOR:
/*
* Vectors are allocated by ALLOC and freed by FREE.
* XLATE finds and returns APIX_VIRTVEC_VECTOR(cpu, vector).
*/
break;
}
/*
* No vector to device mapping exists. If this is FIXED type
* then check if this IRQ is already mapped for another device
* then return the vector number for it (i.e. shared IRQ case).
* Otherwise, return PSM_FAILURE.
*/
ispec);
}
if (*result == APIX_INVALID_VECT)
return (PSM_FAILURE);
break;
case PSM_INTR_OP_GET_PENDING:
return (PSM_FAILURE);
break;
case PSM_INTR_OP_CLEAR_MASK:
return (PSM_FAILURE);
return (PSM_FAILURE);
break;
case PSM_INTR_OP_SET_MASK:
return (PSM_FAILURE);
return (PSM_FAILURE);
break;
case PSM_INTR_OP_GET_SHARED:
return (PSM_FAILURE);
return (PSM_FAILURE);
break;
case PSM_INTR_OP_SET_PRI:
/*
* Called prior to adding the interrupt handler or when
* an interrupt handler is unassigned.
*/
return (PSM_SUCCESS);
return (PSM_FAILURE);
break;
case PSM_INTR_OP_SET_CPU:
case PSM_INTR_OP_GRP_SET_CPU:
/*
* The interrupt handle given here has been allocated
* specifically for this command, and ih_private carries
* a CPU value.
*/
if (!apic_cpu_in_range(target)) {
"[grp_]set_cpu: cpu out of range: %d\n", target));
return (PSM_FAILURE);
}
if (!IS_VECT_ENABLED(vecp)) {
"[grp]_set_cpu: invalid vector 0x%x\n",
return (PSM_FAILURE);
}
*result = 0;
if (intr_op == PSM_INTR_OP_SET_CPU)
else
return (PSM_FAILURE);
}
break;
case PSM_INTR_OP_GET_INTR:
/*
* The interrupt handle given here has been allocated
* specifically for this command, and ih_private carries
* a pointer to a apic_get_intr_t.
*/
return (PSM_FAILURE);
break;
case PSM_INTR_OP_CHECK_MSI:
/*
* Check MSI/X is supported or not at APIC level and
* masked off the MSI/X bits in hdlp->ih_type if not
* supported before return. If MSI/X is supported,
* leave the ih_type unchanged and return.
*
* hdlp->ih_type passed in from the nexus has all the
* interrupt types supported by the device.
*/
if (apic_support_msi == 0) { /* uninitialized */
/*
* if apic_support_msi is not set, call
* apic_check_msi_support() to check whether msi
* is supported first
*/
if (apic_check_msi_support() == PSM_SUCCESS)
else
}
if (apic_support_msi == 1) {
if (apic_msix_enable)
else
} else
break;
case PSM_INTR_OP_GET_CAP:
break;
case PSM_INTR_OP_APIC_TYPE:
break;
case PSM_INTR_OP_SET_CAP:
default:
return (PSM_FAILURE);
}
return (PSM_SUCCESS);
}
static void
apix_cleanup_busy(void)
{
int i, j;
for (i = 0; i < apic_nproc; i++) {
if (!apic_cpu_in_range(i))
continue;
for (j = APIX_AVINTR_MIN; j < APIX_AVINTR_MAX; j++) {
}
}
}
static void
{
int i, j, max_busy;
return;
if (++apic_nticks == apic_sample_factor_redistribution) {
/*
* Time to call apic_intr_redistribute().
* reset apic_nticks. This will cause max_busy
* to be calculated below and if it is more than
* apic_int_busy, we will do the whole thing
*/
apic_nticks = 0;
}
max_busy = 0;
for (i = 0; i < apic_nproc; i++) {
if (!apic_cpu_in_range(i))
continue;
/*
* Check if curipl is non zero & if ISR is in
* progress
*/
if (((j = apic_cpus[i].aci_curipl) != 0) &&
int vect;
}
if (!apic_nticks &&
}
if (!apic_nticks) {
if (max_busy > apic_int_busy_mark) {
/*
* We could make the following check be
* skipped > 1 in which case, we get a
* redistribution at half the busy mark (due to
* double interval). Need to be able to collect
* more empirical data to decide if that is a
* good strategy. Punt for now.
*/
} else
}
}
/*
* intr_ops() service routines
*/
static int
{
/* need to get on the bound cpu */
return (pending);
}
static apix_vector_t *
{
switch (flags & PSMGI_INTRBY_FLAGS) {
case PSMGI_INTRBY_IRQ:
case PSMGI_INTRBY_VEC:
if (!apic_cpu_in_range(cpuid))
return (NULL);
break;
case PSMGI_INTRBY_DEFAULT:
break;
default:
return (NULL);
}
return (vecp);
}
static int
{
int i;
if (IS_VECT_FREE(vecp)) {
intr_params_p->avgi_num_devs = 0;
intr_params_p->avgi_cpu_id = 0;
intr_params_p->avgi_req_flags = 0;
return (PSM_SUCCESS);
}
/* Return user bound info for intrd. */
}
}
if (intr_params_p->avgi_req_flags &
/* Get number of devices from apic_irq table shared field. */
/* Some devices have NULL dip. Don't count these. */
if (intr_params_p->avgi_num_devs > 0) {
i++;
}
}
/* There are no viable dips to return. */
if (intr_params_p->avgi_num_devs == 0) {
} else { /* Return list of dips */
/* Allocate space in array for that number of devs. */
sizeof (dev_info_t *),
"apix_get_vector_intr_info: no memory"));
return (PSM_FAILURE);
}
/*
* Loop through the device list of the autovec table
* filling in the dip array.
*
* Note that the autovect table may have some special
* entries which contain NULL dips. These will be
* ignored.
*/
intr_params_p->avgi_dip_list[i++] =
}
}
}
return (PSM_SUCCESS);
}
static char *
apix_get_apic_type(void)
{
return (apix_psm_info.p_mach_idstring);
}
{
/* Fail if this is an MSI intr and is part of a group. */
return (NULL);
else
}
/*
* Mask MSI-X. It's unmasked when MSI-X gets enabled.
*/
return (NULL);
if ((msix_ctrl & PCI_MSIX_FUNCTION_MASK) == 0) {
/*
* Function is not masked, then mask "inum"th
* entry in the MSI-X table
*/
mask | 1);
}
}
*result = 0;
/* Restore mask bit */
return (newp);
}
/*
* Set cpu for MSIs
*/
{
return (NULL);
}
return (NULL);
"set_grp: base vec not part of a grp or not aligned: "
return (NULL);
}
return (NULL);
for (i = 1; i < num_vectors; i++) {
return (NULL);
#ifdef DEBUG
/*
* Sanity check: CPU and dip is the same for all entries.
* May be called when first msi to be enabled, at this time
* add_avintr() is not called for other msi
*/
"set_grp: cpu or dip for vec 0x%x difft than for "
" cpu: %d vs %d, dip: 0x%p vs 0x%p\n", orig_cpu,
(void *)APIX_GET_DIP(vp)));
return (NULL);
}
#endif /* DEBUG */
}
/* MSI Per vector masking is supported. */
if (msi_ctrl & PCI_MSI_PVM_MASK) {
if (msi_ctrl & PCI_MSI_64BIT_MASK)
else
"set_grp: pvm supported. Mask set to 0x%x\n",
}
*result = 0;
/* Reenable vectors if per vector masking is supported. */
if (msi_ctrl & PCI_MSI_PVM_MASK) {
"set_grp: pvm supported. Mask restored to 0x%x\n",
}
return (newp);
}
void
{
}
{
return (NULL);
}
}
/*
* Must called with interrupts disabled and apic_ioapic_lock held
*/
void
apix_intx_enable(int irqno)
{
/* write RDT entry high dword - destination */
/* Write the vector, trigger, and polarity portion of the RDT */
" intin 0x%x rdt_low 0x%x rdt_high 0x%x\n",
}
/*
* Must called with interrupts disabled and apic_ioapic_lock held
*/
void
apix_intx_disable(int irqno)
{
int ioapicindex, intin;
/*
* The assumption here is that this is safe, even for
* systems with IOAPICs that suffer from the hardware
* erratum because all devices have been quiesced before
* they unregister their interrupt handlers. If that
* assumption turns out to be false, this mask operation
* can induce the same erratum result we're trying to
* avoid.
*/
}
void
apix_intx_free(int irqno)
{
if (IS_IRQ_FREE(irqp)) {
return;
}
}
#ifdef DEBUG
int apix_intr_deliver_timeouts = 0;
int apix_intr_rirr_timeouts = 0;
int apix_intr_rirr_reset_failure = 0;
#endif
int apix_max_reps_irr_pending = 10;
int
{
iflag = intr_clear();
/*
* Wait for the delivery status bit to be cleared. This should
* be a very small amount of time.
*/
repeats = 0;
do {
repeats++;
waited++) {
break;
}
if (!level)
break;
/*
* Mask the RDT entry for level-triggered interrupts.
*/
intin_no);
/* Mask it */
}
/*
* If there was a race and an interrupt was injected
* just before we masked, check for that case here.
* Then, unmask the RDT entry and try again. If we're
* on our last try, don't unmask (because we want the
* RDT entry to remain masked for the rest of the
* function).
*/
intin_no);
/* Unmask it */
}
} while ((rdt_entry & AV_PENDING) &&
#ifdef DEBUG
#endif
goto done;
/*
* wait for remote IRR to be cleared for level-triggered
* interrupts
*/
repeats = 0;
do {
repeats++;
waited++) {
== 0)
break;
}
iflag = intr_clear();
}
} while (repeats < apix_max_reps_irr_pending);
if (repeats >= apix_max_reps_irr_pending) {
#ifdef DEBUG
#endif
/*
* If we waited and the Remote IRR bit is still not cleared,
* AND if we've invoked the timeout APIC_REPROGRAM_MAX_TIMEOUTS
* times for this interrupt, try the last-ditch workaround:
*/
/*
* Trying to clear the bit through normal
* channels has failed. So as a last-ditch
* effort, try to set the trigger mode to
* edge, then to level. This has been
* observed to work on many systems.
*/
}
#ifdef DEBUG
#endif
prom_printf("apix: Remote IRR still "
"not clear for IOAPIC %d intin %d.\n"
"\tInterrupts to this pin may cease "
return (1); /* return failure */
}
}
done:
/* change apic_irq_table */
iflag = intr_clear();
/* reprogramme IO-APIC RDT entry */
return (0);
}
static int
{
if (IS_IRQ_FREE(irqp)) {
return (0);
}
/* check IO-APIC delivery status */
iflag = intr_clear();
AV_PENDING) ? 1 : 0;
return (pending);
}
static void
apix_intx_set_mask(int irqno)
{
iflag = intr_clear();
/* clear mask */
}
static void
{
iflag = intr_clear();
/* clear mask */
}
/*
* For level-triggered interrupt, mask the IRQ line. Mask means
* new interrupts will not be delivered. The interrupt already
* accepted by a local APIC is not affected
*/
void
{
return;
return;
}
/*
* This is a IOxAPIC and there is EOI register:
* Change the vector to reserved unused vector, so that
* the EOI from Local APIC won't clear the Remote IRR for
* this level trigger interrupt. Instead, we'll manually
* clear it in apix_post_hardint() after ISR handling.
*/
} else {
}
}
/*
* For level-triggered interrupt, unmask the IRQ line
* or restore the original vector number.
*/
void
{
return;
/*
* Already sent EOI back to Local APIC.
* Send EOI to IO-APIC
*/
} else {
/* clear the mask or restore the vector */
/* send EOI to IOxAPIC */
}
}
static int
{
int share;
return (0);
}
return (share);
}
static void
{
if (IS_IRQ_FREE(irqp)) {
return;
}
}
/*
* Setup IRQ table. Return IRQ no or -1 on failure
*/
static int
{
int newirq;
short intr_index;
/* Find ioapicindex. If destid was ALL, we will exit with 0. */
break;
(ioapic == INTR_ALL_APIC));
/* check whether this intin# has been used by another irqno */
return (newirq);
if (apic_irq_table[irqno] &&
return (irqno);
}
} else { /* default configuration */
ioapicindex = 0;
}
/* allocate a new IRQ no */
} else {
if (newirq == -1) {
return (-1);
}
}
}
irqp->airq_vector = 0;
return (irqno);
}
/*
* Setup IRQ table for non-pci devices. Return IRQ no or -1 on error
*/
static int
{
int newirq, i;
struct apic_io_intr *intrp;
if (!apic_enable_acpi || apic_use_acpi_madt_only) {
int busid;
if (bustype == 0)
for (i = 0; i < 2; i++) {
!= NULL)) {
}
}
/* fall back to default configuration */
return (-1);
}
/* search iso entries first */
if (acpi_iso_cnt != 0) {
i = 0;
while (i < acpi_iso_cnt) {
ACPI_MADT_TRIGGER_MASK) >> 2;
}
i++;
}
}
}
}
/*
* Setup IRQ table for pci devices. Return IRQ no or -1 on error
*/
static int
{
struct apic_io_intr *intrp;
return (-1);
busid = (int)apic_single_pci_busid;
return (-1);
return (-1);
&intr_flag));
}
/* MP configuration table */
if (pci_irq == -1)
return (-1);
}
}
/*
* Translate and return IRQ no
*/
static int
{
int parent_is_pci_or_pciex = 0, child_is_pciex = 0;
char dev_type[16];
if (apic_defconf) {
goto defconf;
}
goto nonpci;
}
/*
* use ddi_getlongprop_buf() instead of ddi_prop_lookup_string()
* to avoid extra buffer allocation.
*/
&dev_len) == DDI_PROP_SUCCESS) {
}
&dev_len) == DDI_PROP_SUCCESS) {
child_is_pciex = 1;
}
if (parent_is_pci_or_pciex) {
if (newirq != -1)
goto done;
bustype = 0;
if (newirq != -1)
goto done;
if (newirq == -1) {
return (-1);
}
done:
return (newirq);
}
static int
{
int irqno;
return (0);
return (0);
"irqno=0x%x cpuid=%d vector=0x%x\n",
return (1);
}
/*
* Return the vector number if the translated IRQ for this device
* has a vector mapping setup. If no IRQ setup exists or no vector is
* allocated to it then return 0.
*/
static apix_vector_t *
{
int irqno;
/* get the IRQ number */
return (NULL);
/* get the vector number if a vector is allocated to this irqno */
return (vecp);
}
/* stub function */
int
apix_loaded(void)
{
return (apix_is_enabled);
}