/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2016 by Delphix. All rights reserved.
*/
/*
* PSMI 1.1 extensions are supported only in 2.6 and later versions.
* PSMI 1.2 extensions are supported only in 2.7 and later versions.
* PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
* PSMI 1.5 extensions are supported in Solaris Nevada.
* PSMI 1.6 extensions are supported in Solaris Nevada.
* PSMI 1.7 extensions are supported in Solaris Nevada.
*/
#define PSMI_1_7
#include <sys/processor.h>
#include <sys/smp_impldefs.h>
#include <sys/psm_common.h>
#include <sys/ddi_impldefs.h>
#include <sys/x86_archext.h>
#include <sys/cpc_impl.h>
#include <sys/archsystm.h>
#include <sys/machsystm.h>
#include <sys/sysmacros.h>
#include <sys/rm_platter.h>
#include <sys/privregs.h>
#include <sys/pci_intr_lib.h>
#include <sys/dditypes.h>
#include <sys/apic_common.h>
#include <sys/apic_timer.h>
static void apic_record_ioapic_rdt(void *intrmap_private,
ioapic_rdt_t *irdt);
/*
* Common routines between pcplusmp & apix (taken from apic.c).
*/
int apic_clkinit(int);
hrtime_t apic_gethrtime(void);
void apic_send_ipi(int, int);
void apic_set_idlecpu(processorid_t);
void apic_unset_idlecpu(processorid_t);
void apic_shutdown(int, int);
void apic_preshutdown(int, int);
/* Now the ones for Dynamic Interrupt distribution */
int apic_enable_dynamic_migration = 0;
/* maximum loop count when sending Start IPIs. */
/*
* These variables are frequently accessed in apic_intr_enter(),
* apic_intr_exit and apic_setspl, so group them together
*/
int apic_clkvect;
/* vector at which error interrupts come in */
int apic_errvect;
/* vector at which performance counter overflow interrupts come in */
int apic_cpcovf_vect;
/* vector at which CMCI interrupts come in */
int apic_cmci_vect;
extern int cmi_enable_cmci;
extern void cmi_cmci_trap(void);
/* number of CPUs in power-on transition state */
static int apic_poweron_cnt = 0;
/*
* Patchable global variables.
*/
int apic_forceload = 0;
int apic_panic_on_nmi = 0;
int apic_panic_on_apic_error = 0;
#ifdef DEBUG
int apic_debug = 0;
int apic_restrict_vector = 0;
int apic_debug_msgbufindex = 0;
#endif /* DEBUG */
volatile int apic_hrtime_stamp = 0;
int apic_hrtime_error = 0;
int apic_remote_hrterr = 0;
int apic_num_nmis = 0;
int apic_apic_error = 0;
int apic_num_apic_errors = 0;
int apic_num_cksum_errors = 0;
int apic_error = 0;
static int apic_cmos_ssb_set = 0;
/* use to make sure only one cpu handles the nmi */
/* use to make sure only one cpu handles the error interrupt */
static struct {
} aspen_bmc[] = {
};
static struct {
int port;
} sitka_bmc[] = {
};
/* Patchable global variables. */
/* default apic ops without interrupt remapping */
(int (*)(int))return_instr,
(void (*)(int))return_instr,
(void (*)(void *, void *, uint16_t, int))return_instr,
(void (*)(void **))return_instr,
};
/* Flag to indicate that we need to shut down all processors */
/*
* Probe the ioapic method for apix module. Called in apic_probe_common()
*/
int
{
if (apix_enable == 0)
return (PSM_SUCCESS);
/*
* Set IOAPIC EOI handling method. The priority from low to high is:
* 1. IOxAPIC: with EOI register
* 2. IOMMU interrupt mapping
* 3. Mask-Before-EOI method for systems without boot
* interrupt routing, such as systems with only one IOAPIC;
* which disables the boot interrupt routing already.
* 4. Directed EOI
*/
if (apic_io_ver[0] >= 0x20)
if (apic_directed_EOI_supported())
/* fall back to pcplusmp */
apix_enable = 0; /* go ahead with pcplusmp install next */
return (PSM_FAILURE);
}
return (PSM_SUCCESS);
}
/*
* handler for APIC Error interrupt. Just print a warning and continue
*/
int
{
uint_t i;
/*
* We need to write before read as per 7.4.17 of system prog manual.
* We do both and or the results to be safe
*/
/*
* Clear the APIC error status (do this on all cpus that enter here)
* (two writes are required due to the semantics of accessing the
* error status register.)
*/
/*
* Prevent more than 1 CPU from handling error interrupt causing
* double printing (interleave of characters from multiple
* CPU's when using prom_printf)
*/
if (lock_try(&apic_error_lock) == 0)
if (error) {
#if DEBUG
if (apic_debug)
debug_enter("pcplusmp: APIC Error interrupt received");
#endif /* DEBUG */
"APIC Error interrupt on CPU %d. Status = %x",
psm_get_cpu_id(), error);
else {
if ((error & ~APIC_CS_ERRORS) == 0) {
/* cksum error only */
apic_apic_error |= error;
} else {
/*
* prom_printf is the best shot we have of
* something which is problem free from
*/
prom_printf("APIC Error interrupt on CPU %d. "
"Status 0 = %x, Status 1 = %x\n",
apic_apic_error |= error;
for (i = 0; i < apic_error_display_delay; i++) {
tenmicrosec();
}
/*
* provide more delay next time limited to
* roughly 1 clock tick time
*/
if (apic_error_display_delay < 500)
apic_error_display_delay *= 2;
}
}
return (DDI_INTR_CLAIMED);
} else {
return (DDI_INTR_UNCLAIMED);
}
}
/*
* Turn off the mask bit in the performance counter Local Vector Table entry.
*/
void
apic_cpcovf_mask_clear(void)
{
}
/*ARGSUSED*/
static int
{
return (0);
}
/*ARGSUSED*/
static int
{
return (0);
}
/*ARGSUSED*/
int
{
switch (what) {
case CPU_ON:
break;
case CPU_OFF:
break;
default:
break;
}
return (0);
}
static void
apic_disable_local_apic(void)
{
/* local intr reg 0 */
/* disable NMI */
/* and error interrupt */
/* and perf counter intr */
}
static void
{
int loop_count;
/*
* Interrupts on current CPU will be disabled during the
* steps in order to avoid unwanted side effects from
* executing interrupt handlers on a problematic BIOS.
*/
iflag = intr_clear();
if (start) {
}
/*
* According to X2APIC specification in section '2.3.5.1' of
* Interrupt Command Register Semantics, the semantics of
* programming the Interrupt Command Register to dispatch an interrupt
* is simplified. A single MSR write to the 64-bit ICR is required
* for dispatching an interrupt. Specifically, with the 64-bit MSR
* interface to ICR, system software is not required to check the
* status of the delivery status bit prior to writing to the ICR
* to send an IPI. With the removal of the Delivery Status bit,
* system software no longer has a reason to read the ICR. It remains
* readable only to aid in debugging.
*/
#ifdef DEBUG
#else
if (apic_mode == LOCAL_APIC) {
}
#endif /* DEBUG */
/* for integrated - make sure there is one INIT IPI in buffer */
/* for external - it will wake up the cpu */
/* If only 1 CPU is installed, PENDING bit will not go low */
if (apic_mode == LOCAL_APIC &&
apic_ret();
else
break;
}
/* integrated apic */
/* to offset the INIT IPI queue up in the buffer */
/*
* send the second SIPI (Startup IPI) as recommended by Intel
* software development manual.
*/
}
}
/*ARGSUSED1*/
int
{
if (!apic_cpu_in_range(cpun)) {
return (EINVAL);
}
/*
* Switch to apic_common_send_ipi for safety during starting other CPUs.
*/
if (apic_mode == LOCAL_X2APIC) {
}
apic_cmos_ssb_set = 1;
return (0);
}
/*
* Put CPU into halted state with interrupts disabled.
*/
/*ARGSUSED1*/
int
{
int rc;
extern cpuset_t cpu_ready_set;
if (!apic_cpu_in_range(cpun)) {
return (EINVAL);
}
return (ENOTSUP);
}
/* Clear CPU_READY flag to disable cross calls. */
if (rc != 0) {
return (rc);
}
/* Intercept target CPU at a safe point before powering it off. */
return (0);
}
int
{
return (EINVAL);
}
case PSM_CPU_ADD:
return (apic_cpu_add(reqp));
case PSM_CPU_REMOVE:
return (apic_cpu_remove(reqp));
case PSM_CPU_STOP:
default:
return (ENOTSUP);
}
}
#ifdef DEBUG
int apic_stretch_interrupts = 0;
#endif /* DEBUG */
/*
* generates an interprocessor interrupt to another CPU. Any changes made to
* this routine must be accompanied by similar changes to
* apic_common_send_ipi().
*/
void
{
int vector;
flag = intr_clear();
vector);
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
void
{
}
void
apic_ret()
{
}
/*
* If apic_coarse_time == 1, then apic_gettime() is used instead of
* apic_gethrtime(). This is used for performance instead of accuracy.
*/
{
int old_hrtime_stamp;
/*
* In one-shot mode, we do not keep time, so if anyone
* calls psm_gettime() directly, we vector over to
* gethrtime().
* one-shot mode MUST NOT be enabled if this psm is the source of
* hrtime.
*/
if (apic_oneshot)
return (gethrtime());
apic_ret();
goto gettime_again;
}
return (temp);
}
/*
* Here we return the number of nanoseconds since booting. Note every
* clock interrupt increments apic_nsec_since_boot by the appropriate
* amount.
*/
apic_gethrtime(void)
{
/*
* In one-shot mode, we do not keep time, so if anyone
* calls psm_gethrtime() directly, we vector over to
* gethrtime().
* one-shot mode MUST NOT be enabled if this psm is the source of
* hrtime.
*/
if (apic_oneshot)
return (gethrtime());
if (apic_mode == LOCAL_APIC)
apic_ret();
/*
* Check to see which CPU we are on. Note the time is kept on
* the local APIC of CPU 0. If on CPU 0, simply read the current
* counter. If on another CPU, issue a remote read command to CPU 0.
*/
} else {
#ifdef DEBUG
#else
if (apic_mode == LOCAL_APIC)
#endif /* DEBUG */
& AV_READ_PENDING) {
apic_ret();
}
else { /* 0 = invalid */
/*
* return last hrtime right now, will need more
* testing if change to retry
*/
return (temp);
}
}
if (countval > last_count_read)
countval = 0;
else
/* we might have clobbered last_count_read. Restore it */
goto gethrtime_again;
}
if (temp < apic_last_hrtime) {
/* return last hrtime if error occurs */
}
else
return (temp);
}
/* apic NMI handler */
/*ARGSUSED*/
void
{
if (apic_shutdown_processors) {
return;
}
if (!lock_try(&apic_nmi_lock))
return;
if (apic_kmdb_on_nmi && psm_debugger()) {
debug_enter("NMI received: entering kmdb\n");
} else if (apic_panic_on_nmi) {
/* Keep panic from entering kmdb. */
nopanicdebug = 1;
panic("NMI received\n");
} else {
/*
* prom_printf is the best shot we have of something which is
*/
prom_printf("NMI received\n");
}
}
{
int i;
if (cpu_id == -1)
return ((processorid_t)0);
if (apic_cpu_in_range(i))
return (i);
}
return ((processorid_t)-1);
}
int
{
int i, rv = 0;
/* Check whether CPU hotplug is supported. */
return (ENOTSUP);
}
case MACH_CPU_ARG_LOCAL_APIC:
"!apic: apicid(%u) or procid(%u) is invalid.",
return (EINVAL);
}
break;
if (localid >= UINT32_MAX) {
"!apic: x2apicid(%u) is invalid.", localid);
return (EINVAL);
"can't support x2APIC processor.");
return (ENOTSUP);
}
break;
default:
"!apic: unknown argument type %d to apic_cpu_add().",
return (EINVAL);
}
/* Use apic_ioapic_lock to sync with apic_get_next_bind_cpu. */
iflag = intr_clear();
/* Check whether local APIC id already exists. */
for (i = 0; i < apic_nproc; i++) {
if (!CPU_IN_SET(apic_cpumask, i))
continue;
"!apic: local apic id %u already exists.",
localid);
return (EEXIST);
"!apic: processor id %u already exists.",
(int)procid);
return (EEXIST);
}
/*
* There's no local APIC version number available in MADT table,
* so assume that all CPUs are homogeneous and use local APIC
* version number of the first existing CPU.
*/
if (first) {
}
}
/*
* Try to assign the same cpuid if APIC id exists in the dirty cache.
*/
for (i = 0; i < apic_max_nproc; i++) {
if (CPU_IN_SET(apic_cpumask, i)) {
continue;
}
cpuid = i;
break;
}
}
/* Avoid the dirty cache and allocate fresh slot if possible. */
for (i = 0; i < apic_max_nproc; i++) {
cpuid = i;
break;
}
}
}
/* Try to find any free slot as last resort. */
for (i = 0; i < apic_max_nproc; i++) {
cpuid = i;
break;
}
}
}
"!apic: failed to allocate cpu id for processor %u.",
procid);
"!apic: failed to build mapping for processor %u.",
procid);
} else {
if (cpuid >= apic_nproc) {
}
}
return (rv);
}
int
{
int i;
/* Check whether CPU hotplug is supported. */
return (ENOTSUP);
}
/* Use apic_ioapic_lock to sync with apic_get_next_bind_cpu. */
iflag = intr_clear();
if (!apic_cpu_in_range(cpuid)) {
"!apic: cpuid %d doesn't exist in apic_cpus array.",
cpuid);
return (ENODEV);
}
return (ENOENT);
}
/*
* We are removing the highest numbered cpuid so we need to
* find the next highest cpuid as the new value for apic_nproc.
*/
for (i = apic_nproc; i > 0; i--) {
apic_nproc = i;
break;
}
}
/* at least one CPU left */
ASSERT(i > 0);
}
/* mark slot as free and keep it in the dirty cache */
return (0);
}
/*
* Return the number of APIC clock ticks elapsed for 8245 to decrement
* (APIC_TIME_COUNT + pit_ticks_adj) ticks.
*/
{
iflag = intr_clear();
do {
} while (pit_tick < APIC_TIME_MIN ||
/*
* Wait for the 8254 to decrement by 5 ticks to ensure
* we didn't start in the middle of a tick.
* Compare with 0x10 for the wrap around case.
*/
do {
/*
* Wait for the 8254 to decrement by
* (APIC_TIME_COUNT + pit_ticks_adj) ticks
*/
do {
return (start_apic_tick - end_apic_tick);
}
/*
* Initialise the APIC timer on the local APIC of CPU 0 to the desired
* frequency. Note at this stage in the boot sequence, the boot processor
* is the only active processor.
* hertz value of 0 indicates a one-shot mode request. In this case
* the function returns the resolution (in nanoseconds) for the hardware
* timer interrupt. If one-shot mode capability is not available,
* the return value will be 0. apic_enable_oneshot is a global switch
* for disabling the functionality.
* A non-zero positive value for hertz indicates a periodic mode request.
* In this case the hardware will be programmed to generate clock interrupts
* at hertz frequency and returns the resolution of interrupts in
* nanosecond.
*/
int
{
int ret;
return (ret);
}
/*
* apic_preshutdown:
* Called early in shutdown whilst we can still access filesystems to do
* things like loading modules which will be required to complete shutdown
* after filesystems are all unmounted.
*/
void
{
APIC_VERBOSE_POWEROFF(("apic_preshutdown(%d,%d); m=%d a=%d\n",
}
void
{
int i;
/* Send NMI to all CPUs except self to do per processor shutdown */
iflag = intr_clear();
#ifdef DEBUG
#else
if (apic_mode == LOCAL_APIC)
#endif /* DEBUG */
/* restore cmos shutdown byte before reboot */
if (apic_cmos_ssb_set) {
}
/* disable apic mode if imcr present */
if (apic_imcrp) {
}
/* remainder of function is for shutdown cases only */
if (cmd != A_SHUTDOWN)
return;
/*
* Switch system back into Legacy-Mode if using ACPI and
* not powering-off. Some BIOSes need to remain in ACPI-mode
* for power-off to succeed (Dell Dimension 4600)
* Do not disable ACPI while doing fastreboot
*/
(void) AcpiDisable();
if (fcn == AD_FASTREBOOT) {
}
/* remainder of function is for shutdown+poweroff case only */
if (fcn != AD_POWEROFF)
return;
switch (apic_poweroff_method) {
case APIC_POWEROFF_VIA_RTC:
/* select the extended NVRAM bank in the RTC */
/* for Predator must toggle the PAB bit */
/*
* clear power active bar, wakeup alarm and
* kickstart
*/
/* delay before next write */
drv_usecwait(1000);
/* for S40 the following would suffice */
/* power active bar control bit */
break;
restarts = 0;
if (++restarts == 3)
break;
attempts = 0;
do {
byte &= MISMIC_BUSY_MASK;
if (byte != 0) {
drv_usecwait(1000);
if (attempts >= 3)
goto restart_aspen_bmc;
++attempts;
}
} while (byte != 0);
byte |= 0x1;
i = 0;
i++) {
attempts = 0;
do {
byte &= MISMIC_BUSY_MASK;
if (byte != 0) {
drv_usecwait(1000);
if (attempts >= 3)
goto restart_aspen_bmc;
++attempts;
}
} while (byte != 0);
byte |= 0x1;
}
break;
restarts = 0;
if (++restarts == 3)
break;
attempts = 0;
do {
byte &= SMS_STATE_MASK;
if ((byte == SMS_READ_STATE) ||
(byte == SMS_WRITE_STATE)) {
drv_usecwait(1000);
if (attempts >= 3)
goto restart_sitka_bmc;
++attempts;
}
} while ((byte == SMS_READ_STATE) ||
(byte == SMS_WRITE_STATE));
i = 0;
i++) {
attempts = 0;
do {
byte &= SMS_IBF_MASK;
if (byte != 0) {
drv_usecwait(1000);
if (attempts >= 3)
goto restart_sitka_bmc;
++attempts;
}
} while (byte != 0);
}
break;
case APIC_POWEROFF_NONE:
/* If no APIC direct method, we will try using ACPI */
if (apic_enable_acpi) {
if (acpi_poweroff() == 1)
return;
} else
return;
break;
}
/*
* Wait a limited time here for power to go off.
* If the power does not go off, then there was a
* problem and we should continue to the halt which
* prints a message for the user to press a key to
* reboot.
*/
}
/*
* The following functions are in the platform specific file so that they
* can be different functions depending on whether we are running on
* bare metal or a hypervisor.
*/
/*
* map an apic for memory-mapped access
*/
uint32_t *
{
}
uint32_t *
{
}
/*
* unmap an apic
*/
void
{
}
void
{
}
{
return (ioapic[APIC_IO_DATA]);
}
void
{
}
void
{
}
/*
* Round-robin algorithm to find the next CPU with interrupts enabled.
* It can't share the same static variable apic_next_bind_cpu with
* apic_get_next_bind_cpu(), since that will cause all interrupts to be
* bound to CPU1 at boot time. During boot, only CPU0 is online with
* interrupts enabled when apic_get_next_bind_cpu() and apic_find_cpu()
* are called. However, the pcplusmp driver assumes that there will be
* boot_ncpus CPUs configured eventually so it tries to distribute all
* interrupts among CPU0 - CPU[boot_ncpus - 1]. Thus to prevent all
* interrupts being targetted at CPU1, we need to use a dedicated static
* variable for find_next_cpu() instead of sharing apic_next_bind_cpu.
*/
{
int i;
/* Find the first CPU with the passed-in flag set */
for (i = 0; i < apic_nproc; i++) {
if (++acid >= apic_nproc) {
acid = 0;
}
if (apic_cpu_in_range(acid) &&
break;
}
}
return (acid);
}
/*
* Switch between safe and x2APIC IPI sending method.
* CPU may power on in xapic mode or x2apic mode. If CPU needs to send IPI to
* other CPUs before entering x2APIC mode, it still needs to xAPIC method.
* Before sending StartIPI to target CPU, psm_send_ipi will be changed to
* apic_common_send_ipi, which detects current local APIC mode and use right
* method to send IPI. If some CPUs fail to start up, apic_poweron_cnt
* won't return to zero, so apic_common_send_ipi will always be used.
* psm_send_ipi can't be simply changed back to x2apic_send_ipi if some CPUs
* failed to start up because those failed CPUs may recover itself later at
* unpredictable time.
*/
void
{
iflag = intr_clear();
if (enter) {
ASSERT(apic_poweron_cnt >= 0);
if (apic_poweron_cnt == 0) {
}
} else {
ASSERT(apic_poweron_cnt > 0);
if (apic_poweron_cnt == 0) {
}
}
}
void
{
int suppress_brdcst_eoi = 0;
/*
* Intel Software Developer's Manual 3A, 10.12.7:
*
* Routing of device interrupts to local APIC units operating in
* x2APIC mode requires use of the interrupt-remapping architecture
* specified in the Intel Virtualization Technology for Directed
* I/O, Revision 1.3. Because of this, BIOS must enumerate support
* for and software must enable this interrupt remapping with
* Extended Interrupt Mode Enabled before it enabling x2APIC mode in
* the local APIC units.
*
*
* In other words, to use the APIC in x2APIC mode, we need interrupt
* remapping. Since we don't start up the IOMMU by default, we
* won't be able to do any interrupt remapping and therefore have to
* use the APIC in traditional 'local APIC' mode with memory mapped
* I/O.
*/
if (psm_vt_ops != NULL) {
if (((apic_intrmap_ops_t *)psm_vt_ops)->
/*
* We leverage the interrupt remapping engine to
* suppress broadcast EOI; thus we must send the
* directed EOI with the directed-EOI handler.
*/
if (apic_directed_EOI_supported() == 0) {
suppress_brdcst_eoi = 1;
}
if (apic_detect_x2apic()) {
}
if (apic_directed_EOI_supported() == 0) {
}
}
}
}
/*ARGSUSED*/
static void
{
}
/*ARGSUSED*/
static void
{
}
/*
* Functions from apic_introp.c
*
* Those functions are used by apic_intr_ops().
*/
/*
* MSI support flag:
* reflects whether MSI is supported at APIC level
*
* 0 = default value - don't know and need to call apic_check_msi_support()
* to find out then set it accordingly
* 1 = supported
* -1 = not supported
*/
int apic_support_msi = 0;
/* Multiple vector support for MSI-X */
/* Multiple vector support for MSI */
/*
* Check whether the system supports MSI.
*
* MSI is required for PCI-E and for PCI versions later than 2.2, so if we find
* a PCI-E bus or we find a PCI bus whose version we know is >= 2.2, then we
* return PSM_SUCCESS to indicate this system supports MSI.
*
* (Currently the only way we check whether a given PCI bus supports >= 2.2 is
* by detecting if we are running inside the KVM hypervisor, which guarantees
* this version number.)
*/
int
{
int dev_len;
/*
* check whether the first level children of root_node have
* PCI-E or PCI capability.
*/
" driver: %s, binding: %s, nodename: %s\n", (void *)cdip,
ddi_node_name(cdip)));
!= DDI_PROP_SUCCESS)
continue;
return (PSM_SUCCESS);
return (PSM_SUCCESS);
}
/* MSI is not supported on this system */
"device_type found\n"));
return (PSM_FAILURE);
}
/*
* apic_pci_msi_unconfigure:
*
* This and next two interfaces are copied from pci_intr_lib.c
* Do ensure that these two files stay in sync.
* These needed to be copied over here to avoid a deadlock situation on
* certain mp systems that use MSI interrupts.
*
* IMPORTANT regards next three interfaces:
* i) are called only for MSI/X interrupts.
* ii) called with interrupts disabled, and must not block
*/
void
{
if (type == DDI_INTR_TYPE_MSI) {
msi_ctrl &= (~PCI_MSI_MME_MASK);
if (msi_ctrl & PCI_MSI_64BIT_MASK) {
cap_ptr + PCI_MSI_64BIT_DATA, 0);
} else {
cap_ptr + PCI_MSI_32BIT_DATA, 0);
}
} else if (type == DDI_INTR_TYPE_MSIX) {
/* Offset into "inum"th entry in the MSI-X table & mask it */
/* Offset into the "inum"th entry in the MSI-X table */
(inum * PCI_MSIX_VECTOR_SIZE);
/* Reset the "data" and "addr" bits */
}
}
/*
* apic_pci_msi_disable_mode:
*/
void
{
if (type == DDI_INTR_TYPE_MSI) {
if (!(msi_ctrl & PCI_MSI_ENABLE_BIT))
return;
} else if (type == DDI_INTR_TYPE_MSIX) {
if (msi_ctrl & PCI_MSIX_ENABLE_BIT) {
msi_ctrl);
}
}
}
{
}
{
return (apic_io_id[ioapicindex]);
}