hxge_main.c revision 57c5371a68b0454ec94109f38027ab6099bad130
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
*/
#include <hxge_impl.h>
#include <hxge_pfc.h>
/*
* PSARC/2007/453 MSI-X interrupt limit override
* (This PSARC case is limited to MSI-X vectors
* and SPARC platforms only).
*/
#if defined(_BIG_ENDIAN)
#else
#endif
/*
*
*/
static hxge_os_mutex_t hxgedebuglock;
static int hxge_debug_init = 0;
/*
* Debugging flags:
* hxge_no_tx_lb : transmit load balancing
* 1 - From the Stack
* 2 - Destination IP Address
*/
uint32_t hxge_no_tx_lb = 0;
/*
* Add tunable to reduce the amount of time spent in the
* ISR doing Rx Processing.
*/
#if defined(__sparc)
#else
#endif
/*
* Tunables to manage the receive buffer blocks.
*
* hxge_rx_threshold_hi: copy all buffers.
* hxge_rx_bcopy_size_type: receive buffer block size type.
* hxge_rx_threshold_lo: copy only up to tunable block size type.
*/
/*
* Function Prototypes
*/
static void hxge_unattach(p_hxge_t);
static void hxge_destroy_mutexes(p_hxge_t);
static void hxge_suspend(p_hxge_t);
static void hxge_destroy_dev(p_hxge_t);
static void hxge_free_mem_pool(p_hxge_t);
static void hxge_free_rx_mem_pool(p_hxge_t);
static void hxge_free_tx_mem_pool(p_hxge_t);
static void hxge_dma_mem_free(p_hxge_dma_common_t);
static int hxge_init_common_dev(p_hxge_t);
static void hxge_uninit_common_dev(p_hxge_t);
/*
* The next declarations are for the GLDv3 interface.
*/
static int hxge_m_start(void *);
static void hxge_m_stop(void *);
static int hxge_m_unicst(void *, const uint8_t *);
static int hxge_m_promisc(void *, boolean_t);
static void hxge_link_poll(void *arg);
{"_rxdma_intr_time", MAC_PROP_PERM_RW},
{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
};
#define HXGE_MAX_PRIV_PROPS \
(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
#define HXGE_MAGIC 0x4E584745UL
#define MAX_DUMP_SZ 256
#define HXGE_M_CALLBACK_FLAGS \
static mac_callbacks_t hxge_m_callbacks = {
NULL,
NULL,
};
/* PSARC/2007/453 MSI-X interrupt limit override. */
#define HXGE_MSIX_REQUEST_10G 8
static int hxge_create_msi_property(p_hxge_t);
/* Enable debug messages as necessary. */
uint64_t hxge_debug_level = 0;
/*
* This list contains the instance structures for the Hydra
* devices present in the system. The lock exists to guarantee
* mutually exclusive access to the list.
*/
void *hxge_hw_list = NULL;
extern uint64_t hpi_debug_level;
extern hxge_status_t hxge_ldgv_init();
extern hxge_status_t hxge_ldgv_uninit();
extern hxge_status_t hxge_intr_ldgv_init();
/*
* Count used to maintain the number of buffers being used
* by Hydra instances and loaned up to the upper layers.
*/
/*
* Device register access attributes for PIO.
*/
static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
};
/*
* Device descriptor access attributes for DMA.
*/
static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
};
/*
* Device buffer access attributes for DMA.
*/
static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
0x80000, /* alignment */
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int)1, /* granularity */
0 /* attribute flags */
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
0x100000, /* alignment */
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int)1, /* granularity */
0 /* attribute flags */
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
0x40000, /* alignment */
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int)1, /* granularity */
0 /* attribute flags */
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
#if defined(_BIG_ENDIAN)
0x2000, /* alignment */
#else
0x1000, /* alignment */
#endif
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int)1, /* granularity */
0 /* attribute flags */
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
#if defined(_BIG_ENDIAN)
0x2000, /* alignment */
#else
0x1000, /* alignment */
#endif
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int)1, /* granularity */
0 /* attribute flags */
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
0x10000, /* alignment */
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int)1, /* granularity */
DDI_DMA_RELAXED_ORDERING /* attribute flags */
};
(uint_t)0, /* dlim_addr_lo */
0x1, /* dlim_minxfer */
1024 /* dlim_speed */
};
/*
* dma chunk sizes.
*
* Try to allocate the largest possible size
* so that fewer number of dma chunks would be managed
*/
size_t alloc_sizes[] = {
0x1000, 0x2000, 0x4000, 0x8000,
0x10000, 0x20000, 0x40000, 0x80000,
0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
};
/*
* Translate "dev_t" to a pointer to the associated "dev_info_t".
*/
static int
{
int instance;
int status = DDI_SUCCESS;
/*
* Get the device instance since we'll need to setup or retrieve a soft
* state for this instance.
*/
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
break;
}
break;
}
} else {
(void) hxge_resume(hxgep);
}
goto hxge_attach_exit;
case DDI_PM_RESUME:
break;
}
break;
}
(void) hxge_resume(hxgep);
goto hxge_attach_exit;
default:
goto hxge_attach_exit;
}
"ddi_soft_state_zalloc failed"));
goto hxge_attach_exit;
}
status = HXGE_ERROR;
"ddi_get_soft_state failed"));
goto hxge_attach_fail2;
}
goto hxge_attach_fail3;
}
/* Scrub the MSI-X memory */
"hxge_init_common_dev failed"));
goto hxge_attach_fail4;
}
/*
* Setup the Ndd parameters for this instance.
*/
/*
* Setup Register Tracing Buffer.
*/
/* init stats ptr */
goto hxge_attach_fail;
}
goto hxge_attach_fail;
}
/*
* Setup the Kstats for the driver.
*/
goto hxge_attach_fail;
}
if (status != DDI_SUCCESS) {
goto hxge_attach_fail;
}
if (status != DDI_SUCCESS) {
goto hxge_attach_fail;
}
if (status != DDI_SUCCESS) {
goto hxge_attach_fail;
}
/*
* Enable interrupts.
*/
/* Keep copy of MSIx table written */
"unable to register to mac layer (%d)", status));
goto hxge_attach_fail;
}
instance));
goto hxge_attach_exit;
goto hxge_attach_fail1;
/*
* Tear down the ndd parameters setup.
*/
/*
* Tear down the kstat setup.
*/
}
/*
* Unmap the register setup.
*/
status));
return (status);
}
static int
{
int status = DDI_SUCCESS;
int instance;
goto hxge_detach_exit;
}
switch (cmd) {
case DDI_DETACH:
break;
case DDI_PM_SUSPEND:
break;
case DDI_SUSPEND:
}
break;
default:
break;
}
if (cmd != DDI_DETACH)
goto hxge_detach_exit;
/*
* Stop the xcvr polling.
*/
"<== hxge_detach status = 0x%08X", status));
return (DDI_FAILURE);
}
"<== hxge_detach (mac_unregister) status = 0x%08X", status));
status));
return (status);
}
static void
{
return;
}
}
if (hxgep->hxge_timerid) {
hxgep->hxge_timerid = 0;
}
/* Stop any further interrupts. */
/* Remove soft interrups */
/* Stop the device and free resources. */
/* Tear down the ndd parameters setup. */
/* Tear down the kstat setup. */
/*
* Remove the list of ndd parameters which were setup during attach.
*/
" hxge_unattach: remove all properties"));
}
/*
* Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
* previous state before unmapping the registers.
*/
HXGE_DELAY(1000);
/*
* Unmap the register setup.
*/
/* Destroy all mutexes. */
/*
* Free the soft state data structures allocated with this instance.
*/
}
static hxge_status_t
{
int ddi_status = DDI_SUCCESS;
#ifdef HXGE_DEBUG
char *sysname;
#endif
int nregs;
return (HXGE_ERROR);
"hxge_map_regs: pci config size 0x%x", regsize));
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs, hxge bus config regs failed"));
goto hxge_map_regs_fail0;
}
"hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
dev_regs->hxge_pciregh));
"hxge_map_regs: pio size 0x%x", regsize));
/* set up the device mapped register */
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs for Hydra global reg failed"));
goto hxge_map_regs_fail1;
}
"hxge_map_regs: msix size 0x%x", regsize));
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs for msi reg failed"));
goto hxge_map_regs_fail2;
}
goto hxge_map_regs_exit;
if (dev_regs->hxge_msix_regh) {
}
}
if (dev_regs->hxge_pciregh) {
}
if (ddi_status != DDI_SUCCESS)
return (status);
}
static void
{
"==> hxge_unmap_regs: bus"));
}
"==> hxge_unmap_regs: device registers"));
}
"==> hxge_unmap_regs: device interrupts"));
}
}
}
static hxge_status_t
{
int ddi_status = DDI_SUCCESS;
/*
* Get the interrupt cookie so the mutexes can be Initialised.
*/
if (ddi_status != DDI_SUCCESS) {
"<== hxge_setup_mutexes: failed 0x%x", ddi_status));
goto hxge_setup_mutexes_exit;
}
/*
* Initialize mutex's for this device.
*/
"<== hxge_setup_mutexes status = %x", status));
if (ddi_status != DDI_SUCCESS)
return (status);
}
static void
{
if (hxge_debug_init == 1) {
hxge_debug_init = 0;
}
}
{
return (status);
}
/*
*/
goto hxge_init_fail1;
}
/*
* Initialize and enable TXDMA channels.
*/
goto hxge_init_fail3;
}
/*
* Initialize and enable RXDMA channels.
*/
goto hxge_init_fail4;
}
/*
* Initialize TCAM
*/
goto hxge_init_fail5;
}
/*
* Initialize the VMAC block.
*/
goto hxge_init_fail5;
}
/* Bringup - this may be unnecessary when PXE and FCODE available */
"Default Address Failure\n"));
goto hxge_init_fail5;
}
/* Keep copy of MSIx table written */
/*
* Enable hardware interrupts.
*/
goto hxge_init_exit;
"<== hxge_init status (failed) = 0x%08x", status));
return (status);
status));
return (status);
}
{
}
return (NULL);
}
/*ARGSUSED*/
void
{
if (timerid) {
}
}
void
{
"==> hxge_uninit: not initialized"));
return;
}
/* Stop timer */
if (hxgep->hxge_timerid) {
hxgep->hxge_timerid = 0;
}
(void) hxge_intr_hw_disable(hxgep);
/* Reset the receive VMAC side. */
(void) hxge_rx_vmac_disable(hxgep);
/* Free classification resources */
(void) hxge_classify_uninit(hxgep);
/* Reset the transmit VMAC side. */
(void) hxge_tx_vmac_disable(hxgep);
}
void
{
#if defined(__i386)
#else
#endif
int i, retry;
regdata = 0;
retry = 1;
for (i = 0; i < retry; i++) {
}
}
void
{
#if defined(__i386)
#else
#endif
#if defined(__i386)
#else
#endif
}
/*ARGSUSED*/
/*VARARGS*/
void
{
char msg_buffer[1048];
char prefix_buffer[32];
int instance;
(level == HXGE_ERR_CTL)) {
/* do the msg processing */
if (hxge_debug_init == 0) {
hxge_debug_init = 1;
}
}
if (level & HXGE_ERR_CTL) {
}
instance = -1;
} else {
(void) sprintf(prefix_buffer,
}
}
}
char *
{
int i;
static char etherbuf[1024];
char digits[] = "0123456789abcdef";
if (!size)
size = 60;
if (size > MAX_DUMP_SZ) {
/* Dump the leading bytes */
for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
if (*ap > 0x0f)
*cp++ = ':';
}
for (i = 0; i < 20; i++)
*cp++ = '.';
/* Dump the last MAX_DUMP_SZ/2 bytes */
for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
if (*ap > 0x0f)
*cp++ = ':';
}
} else {
for (i = 0; i < size; i++) {
if (*ap > 0x0f)
*cp++ = ':';
}
}
*--cp = 0;
return (etherbuf);
}
static void
{
/*
* Stop the link status timer before hxge_intrs_disable() to avoid
* accessing the the MSIX table simultaneously. Note that the timer
* routine polls for MSIX parity errors.
*/
}
static hxge_status_t
{
(void) hxge_rx_vmac_enable(hxgep);
(void) hxge_tx_vmac_enable(hxgep);
/* Keep copy of MSIx table written */
/*
* Resume the link status timer after hxge_intrs_enable to avoid
* accessing MSIX table simultaneously.
*/
"<== hxge_resume status = 0x%x", status));
return (status);
}
{
"Bad register acc handle"));
status = HXGE_ERROR;
}
" hxge_setup_dev status (link init 0x%08x)", status));
goto hxge_setup_dev_exit;
}
"<== hxge_setup_dev status = 0x%08x", status));
return (status);
}
static void
{
(void) hxge_hw_stop(hxgep);
}
static hxge_status_t
{
int ddi_status = DDI_SUCCESS;
" hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
" default_block_size %d iommu_pagesize %d",
if (iommu_pagesize != 0) {
/* Hydra support up to 8K pages */
if (iommu_pagesize > 0x2000)
} else {
}
}
"==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
"default_block_size %d page mask %d",
switch (hxgep->sys_page_sz) {
default:
break;
case 0x1000:
break;
case 0x2000:
break;
}
/*
* Get the system DMA burst size.
*/
if (ddi_status != DDI_SUCCESS) {
"ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
}
if (ddi_status != DDI_DMA_MAPPED) {
"Binding spare handle to find system burstsize failed."));
}
if (ddi_status != DDI_SUCCESS)
"<== hxge_setup_system_dma_pages status = 0x%08x", status));
return (status);
}
{
return (HXGE_ERROR);
}
return (HXGE_ERROR);
}
return (HXGE_OK);
}
static void
{
}
static hxge_status_t
{
int i, j;
/*
* Allocate memory for each receive DMA channel.
*/
KM_SLEEP);
KM_SLEEP);
/*
* Assume that each DMA channel will be configured with default block
* size. rbr block counts are mod of batch count (16).
*/
if (!hxge_port_rbr_size) {
}
if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
}
}
/*
* Addresses of receive block ring, receive completion ring and the
* mailbox must be all cache-aligned (64 bytes).
*/
rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
"hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
"hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
/*
* Allocate memory for receive buffers and descriptor rings. Replace
* allocation functions with interface functions provided by the
* partition manager when it is available.
*/
/*
* Allocate memory for the receive buffer blocks.
*/
for (i = 0; i < ndmas; i++) {
" hxge_alloc_rx_mem_pool to alloc mem: "
" dma %d dma_buf_p %llx &dma_buf_p %llx",
num_chunks[i] = 0;
&num_chunks[i]);
break;
}
st_rdc++;
" hxge_alloc_rx_mem_pool DONE alloc mem: "
"dma %d dma_buf_p %llx &dma_buf_p %llx", i,
}
if (i < ndmas) {
goto hxge_alloc_rx_mem_fail1;
}
/*
* Allocate memory for descriptor rings and mailbox.
*/
for (j = 0; j < ndmas; j++) {
rx_rbr_cntl_alloc_size)) != HXGE_OK) {
break;
}
rx_rcr_cntl_alloc_size)) != HXGE_OK) {
break;
}
rx_mbox_cntl_alloc_size)) != HXGE_OK) {
break;
}
st_rdc++;
}
if (j < ndmas) {
goto hxge_alloc_rx_mem_fail2;
}
/* Free control buffers */
"==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
for (; j >= 0; j--) {
"==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
}
"==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
/* Free data buffers */
i--;
"==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
for (; i >= 0; i--) {
num_chunks[i]);
}
"==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
"<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
return (status);
}
static void
{
"(null rx buf pool or buf not allocated"));
return;
}
if (dma_rbr_cntl_poolp == NULL ||
(!dma_rbr_cntl_poolp->buf_allocated)) {
"<== hxge_free_rx_mem_pool "
"(null rbr cntl buf pool or rbr cntl buf not allocated"));
return;
}
if (dma_rcr_cntl_poolp == NULL ||
(!dma_rcr_cntl_poolp->buf_allocated)) {
"<== hxge_free_rx_mem_pool "
"(null rcr cntl buf pool or rcr cntl buf not allocated"));
return;
}
if (dma_mbox_cntl_poolp == NULL ||
(!dma_mbox_cntl_poolp->buf_allocated)) {
"<== hxge_free_rx_mem_pool "
"(null mbox cntl buf pool or mbox cntl buf not allocated"));
return;
}
for (i = 0; i < ndmas; i++) {
}
for (i = 0; i < ndmas; i++) {
}
for (i = 0; i < ndmas; i++) {
sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
}
}
static hxge_status_t
{
int i, size_index, array_size;
" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
i = 0;
size_index = 0;
while ((size_index < array_size) &&
size_index++;
if (size_index >= array_size) {
}
while ((allocated < total_alloc_size) &&
(size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
rx_dmap[i].dma_chunk_index = i;
"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
"i %d nblocks %d alength %d",
(p_hxge_dma_common_t)(&rx_dmap[i]));
" hxge_alloc_rx_buf_dma: Alloc Failed: "
size_index--;
} else {
" alloc_rx_buf_dma allocated rdc %d "
"chunk %d size %x dvma %x bufp %llx ",
i++;
}
}
if (allocated < total_alloc_size) {
" hxge_alloc_rx_buf_dma failed due to"
" allocated(%d) < required(%d)",
goto hxge_alloc_rx_mem_fail1;
}
" alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
*num_chunks = i;
goto hxge_alloc_rx_mem_exit;
"<== hxge_alloc_rx_buf_dma status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
"==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
for (i = 0; i < num_chunks; i++) {
"==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
}
}
/*ARGSUSED*/
static hxge_status_t
{
" hxge_alloc_rx_cntl_dma: Alloc Failed: "
" for size: %d", size));
}
"<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
}
static hxge_status_t
{
int i, j;
"p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
/*
* Allocate memory for each transmit DMA channel.
*/
KM_SLEEP);
/*
* Assume that each DMA channel will be configured with default
* transmit bufer size for copying transmit data. (For packet payload
* over this limit, packets will not be copied.)
*/
/*
* Addresses of transmit descriptor ring and the mailbox must be all
* cache-aligned (64 bytes).
*/
tx_cntl_alloc_size *= (sizeof (tx_desc_t));
tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
KM_SLEEP);
/*
* Allocate memory for transmit buffers and descriptor rings. Replace
* allocation functions with interface functions provided by the
* partition manager when it is available.
*
* Allocate memory for the transmit buffer pool.
*/
for (i = 0; i < ndmas; i++) {
num_chunks[i] = 0;
break;
}
st_tdc++;
}
if (i < ndmas) {
}
/*
* Allocate memory for descriptor rings and mailbox.
*/
for (j = 0; j < ndmas; j++) {
break;
}
st_tdc++;
}
if (j < ndmas) {
}
"==> hxge_alloc_tx_mem_pool: start_tdc %d "
/* Free control buffers */
j--;
for (; j >= 0; j--) {
}
/* Free data buffers */
i--;
for (; i >= 0; i--) {
num_chunks[i]);
}
"<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
return (status);
}
static hxge_status_t
{
int i, size_index, array_size;
i = 0;
size_index = 0;
while ((size_index < array_size) &&
size_index++;
if (size_index >= array_size) {
}
while ((allocated < total_alloc_size) &&
(size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
tx_dmap[i].dma_chunk_index = i;
(p_hxge_dma_common_t)(&tx_dmap[i]));
" hxge_alloc_tx_buf_dma: Alloc Failed: "
size_index--;
} else {
i++;
}
}
if (allocated < total_alloc_size) {
" hxge_alloc_tx_buf_dma: failed due to"
" allocated(%d) < required(%d)",
goto hxge_alloc_tx_mem_fail1;
}
*num_chunks = i;
"==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
*dmap, i));
goto hxge_alloc_tx_mem_exit;
"<== hxge_alloc_tx_buf_dma status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
for (i = 0; i < num_chunks; i++) {
}
}
/*ARGSUSED*/
static hxge_status_t
{
KM_SLEEP);
" hxge_alloc_tx_cntl_dma: Alloc Failed: "
" for size: %d", size));
}
"<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
}
static void
{
"<== hxge_free_tx_mem_pool "
"(null rx buf pool or buf not allocated"));
return;
}
"<== hxge_free_tx_mem_pool "
"(null tx cntl buf pool or cntl buf not allocated"));
return;
}
for (i = 0; i < ndmas; i++) {
}
for (i = 0; i < ndmas; i++) {
}
for (i = 0; i < ndmas; i++) {
sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
}
}
/*ARGSUSED*/
static hxge_status_t
struct ddi_dma_attr *dma_attrp,
{
int ddi_status = DDI_SUCCESS;
if (ddi_status != DDI_SUCCESS) {
"hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
&dma_p->acc_handle);
if (ddi_status != DDI_SUCCESS) {
/* The caller will decide whether it is fatal */
"hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
"hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
return (HXGE_ERROR);
}
if (ddi_status != DDI_DMA_MAPPED) {
"hxge_dma_mem_alloc:di_dma_addr_bind failed "
if (dma_p->acc_handle) {
}
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
"hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
if (dma_p->acc_handle) {
}
return (HXGE_ERROR);
}
#if defined(__i386)
#else
#endif
"dma buffer allocated: dma_p $%p "
"return dmac_ladress from cookie $%p dmac_size %d "
"dma_p->ioaddr_p $%p "
"dma_p->orig_ioaddr_p $%p "
"orig_vatopa $%p "
"alength %d (0x%x) "
"kaddrp $%p "
"length %d (0x%x)",
return (HXGE_OK);
}
static void
{
return;
}
}
}
}
/*
* hxge_m_start() -- start transmitting and receiving.
*
* This function is called by the MAC layer when the first
* stream is open to prepare the hardware ready for sending
* and transmitting packets.
*/
static int
hxge_m_start(void *arg)
{
"<== hxge_m_start: initialization failed"));
return (EIO);
}
/*
* Start timer to check the system error and tx hangs
*/
/* Start the link status timer to check the link status */
}
return (0);
}
/*
* hxge_m_stop(): stop transmitting and receiving.
*/
static void
hxge_m_stop(void *arg)
{
if (hxgep->hxge_timerid) {
hxgep->hxge_timerid = 0;
}
/* Stop the link status timer before unregistering */
}
}
static int
{
struct ether_addr addrp;
"<== hxge_m_unicst: set unitcast failed"));
return (EINVAL);
}
return (0);
}
static int
{
struct ether_addr addrp;
if (add) {
"<== hxge_m_multicst: add multicast failed"));
return (EINVAL);
}
} else {
"<== hxge_m_multicst: del multicast failed"));
return (EINVAL);
}
}
return (0);
}
static int
{
"<== hxge_m_promisc: set promisc failed"));
return (EINVAL);
}
return (0);
}
static void
{
int err;
int cmd;
switch (cmd) {
default:
return;
case LB_GET_INFO_SIZE:
case LB_GET_INFO:
case LB_GET_MODE:
break;
case LB_SET_MODE:
break;
case ND_GET:
break;
case ND_SET:
break;
case HXGE_GET64:
case HXGE_PUT64:
case HXGE_GET_TX_RING_SZ:
case HXGE_GET_TX_DESC:
case HXGE_TX_SIDE_RESET:
case HXGE_RX_SIDE_RESET:
case HXGE_GLOBAL_RESET:
case HXGE_RESET_MAC:
case HXGE_PUT_TCAM:
case HXGE_GET_TCAM:
case HXGE_RTRACE:
break;
}
if (need_privilege) {
if (err != 0) {
"<== hxge_m_ioctl: no priv"));
return;
}
}
switch (cmd) {
case ND_GET:
case ND_SET:
break;
case LB_GET_MODE:
case LB_SET_MODE:
case LB_GET_INFO_SIZE:
case LB_GET_INFO:
break;
case HXGE_PUT_TCAM:
case HXGE_GET_TCAM:
case HXGE_GET64:
case HXGE_PUT64:
case HXGE_GET_TX_RING_SZ:
case HXGE_GET_TX_DESC:
case HXGE_TX_SIDE_RESET:
case HXGE_RX_SIDE_RESET:
case HXGE_GLOBAL_RESET:
case HXGE_RESET_MAC:
"==> hxge_m_ioctl: cmd 0x%x", cmd));
break;
}
}
/*ARGSUSED*/
{
switch (cap) {
case MAC_CAPAB_HCKSUM:
break;
default:
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
{
/*
* All adv_* parameters are locked (read-only) while
* the device is in any sort of loopback mode ...
*/
switch (pr_num) {
case MAC_PROP_ADV_1000FDX_CAP:
case MAC_PROP_EN_1000FDX_CAP:
case MAC_PROP_ADV_1000HDX_CAP:
case MAC_PROP_EN_1000HDX_CAP:
case MAC_PROP_ADV_100FDX_CAP:
case MAC_PROP_EN_100FDX_CAP:
case MAC_PROP_ADV_100HDX_CAP:
case MAC_PROP_EN_100HDX_CAP:
case MAC_PROP_ADV_10FDX_CAP:
case MAC_PROP_EN_10FDX_CAP:
case MAC_PROP_ADV_10HDX_CAP:
case MAC_PROP_EN_10HDX_CAP:
case MAC_PROP_AUTONEG:
case MAC_PROP_FLOWCTRL:
return (B_TRUE);
}
return (B_FALSE);
}
/*
*/
static int
{
int err = 0;
/*
* All adv_* parameters are locked (read-only)
* while the device is in any sort of loopback mode.
*/
"==> hxge_m_setprop: loopback mode: read only"));
return (EBUSY);
}
switch (pr_num) {
/*
* These properties are either not exist or read only
*/
case MAC_PROP_EN_1000FDX_CAP:
case MAC_PROP_EN_100FDX_CAP:
case MAC_PROP_EN_10FDX_CAP:
case MAC_PROP_EN_1000HDX_CAP:
case MAC_PROP_EN_100HDX_CAP:
case MAC_PROP_EN_10HDX_CAP:
case MAC_PROP_ADV_1000FDX_CAP:
case MAC_PROP_ADV_1000HDX_CAP:
case MAC_PROP_ADV_100FDX_CAP:
case MAC_PROP_ADV_100HDX_CAP:
case MAC_PROP_ADV_10FDX_CAP:
case MAC_PROP_ADV_10HDX_CAP:
case MAC_PROP_STATUS:
case MAC_PROP_SPEED:
case MAC_PROP_DUPLEX:
case MAC_PROP_AUTONEG:
/*
* Flow control is handled in the shared domain and
* it is readonly here.
*/
case MAC_PROP_FLOWCTRL:
"==> hxge_m_setprop: read only property %d",
pr_num));
break;
case MAC_PROP_MTU:
"==> hxge_m_setprop: set MTU: %d", new_mtu));
err = 0;
break;
}
break;
}
if (new_framesize < MIN_FRAME_SIZE ||
break;
}
if (hxge_vmac_set_framesize(hxgep)) {
break;
}
if (err) {
(void) hxge_vmac_set_framesize(hxgep);
}
"==> hxge_m_setprop: set MTU: %d maxframe %d",
break;
case MAC_PROP_PRIVATE:
"==> hxge_m_setprop: private property"));
pr_val);
break;
default:
break;
}
"<== hxge_m_setprop (return %d)", err));
return (err);
}
/* ARGSUSED */
static int
void *pr_val)
{
int err = 0;
switch (pr_num) {
case MAC_PROP_DUPLEX:
break;
case MAC_PROP_AUTONEG:
break;
case MAC_PROP_FLOWCTRL:
if (pr_valsize < sizeof (link_flowctrl_t))
return (EINVAL);
break;
default:
break;
}
return (err);
}
static int
{
int err = 0;
"==> hxge_m_getprop: pr_num %d", pr_num));
if (pr_valsize == 0)
return (EINVAL);
*perm = MAC_PROP_PERM_RW;
return (err);
}
switch (pr_num) {
case MAC_PROP_DUPLEX:
"==> hxge_m_getprop: duplex mode %d",
break;
case MAC_PROP_SPEED:
if (pr_valsize < sizeof (uint64_t))
return (EINVAL);
break;
case MAC_PROP_STATUS:
if (pr_valsize < sizeof (link_state_t))
return (EINVAL);
else
ls = LINK_STATE_UP;
break;
case MAC_PROP_FLOWCTRL:
/*
* Flow control is supported by the shared domain and
* it is currently transmit only
*/
if (pr_valsize < sizeof (link_flowctrl_t))
return (EINVAL);
break;
case MAC_PROP_AUTONEG:
/* 10G link only and it is not negotiable */
break;
case MAC_PROP_ADV_1000FDX_CAP:
case MAC_PROP_ADV_100FDX_CAP:
case MAC_PROP_ADV_10FDX_CAP:
case MAC_PROP_ADV_1000HDX_CAP:
case MAC_PROP_ADV_100HDX_CAP:
case MAC_PROP_ADV_10HDX_CAP:
case MAC_PROP_EN_1000FDX_CAP:
case MAC_PROP_EN_100FDX_CAP:
case MAC_PROP_EN_10FDX_CAP:
case MAC_PROP_EN_1000HDX_CAP:
case MAC_PROP_EN_100HDX_CAP:
case MAC_PROP_EN_10HDX_CAP:
break;
case MAC_PROP_PRIVATE:
pr_valsize, pr_val);
break;
default:
break;
}
return (err);
}
/* ARGSUSED */
static int
const void *pr_val)
{
int err = 0;
return (EINVAL);
}
/* Blanking */
/* Classification */
} else {
}
"<== hxge_set_priv_prop: err %d", err));
return (err);
}
static int
{
char valstr[MAXNAMELEN];
int err = 0;
int value = 0;
"==> hxge_get_priv_prop: property %s", pr_name));
if (pr_flags & MAC_PROP_DEFAULT) {
/* Receive Interrupt Blanking Parameters */
/* Classification and Load Distribution Configuration */
} else {
}
} else {
/* Receive Interrupt Blanking Parameters */
/* Classification and Load Distribution Configuration */
} else {
}
}
if (err == 0) {
if (pr_valsize < strsize) {
} else {
}
}
"<== hxge_get_priv_prop: return %d", err));
return (err);
}
/*
* Module loading and removing entry points.
*/
extern struct mod_ops mod_driverops;
#define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
/*
* Module linkage information for the kernel.
*/
static struct modldrv hxge_modldrv = {
};
static struct modlinkage modlinkage = {
};
int
_init(void)
{
int status;
if (status != 0) {
"failed to init device soft state"));
goto _init_exit;
}
if (status != 0) {
goto _init_exit;
}
return (status);
}
int
_fini(void)
{
int status;
if (hxge_mblks_pending)
return (EBUSY);
if (status != DDI_SUCCESS) {
"Module removal failed 0x%08x", status));
goto _fini_exit;
}
return (status);
}
int
{
int status;
return (status);
}
/*ARGSUSED*/
{
int intr_types;
int type = 0;
int ddi_status = DDI_SUCCESS;
if (hxge_msi_enable) {
}
/* Get the supported interrupt types */
!= DDI_SUCCESS) {
"ddi_intr_get_supported_types failed: status 0x%08x",
ddi_status));
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
"ddi_intr_get_supported_types: 0x%08x", intr_types));
/*
* Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
* (1): 1 - MSI
* (2): 2 - MSI-X
* others - FIXED
*/
switch (hxge_msi_enable) {
default:
"use fixed (intx emulation) type %08x", type));
break;
case 2:
"ddi_intr_get_supported_types: 0x%08x", intr_types));
if (intr_types & DDI_INTR_TYPE_MSIX) {
"==> hxge_add_intrs: "
"ddi_intr_get_supported_types: MSIX 0x%08x", type));
} else if (intr_types & DDI_INTR_TYPE_MSI) {
"==> hxge_add_intrs: "
"ddi_intr_get_supported_types: MSI 0x%08x", type));
} else if (intr_types & DDI_INTR_TYPE_FIXED) {
"ddi_intr_get_supported_types: MSXED0x%08x", type));
}
break;
case 1:
if (intr_types & DDI_INTR_TYPE_MSI) {
"==> hxge_add_intrs: "
"ddi_intr_get_supported_types: MSI 0x%08x", type));
} else if (intr_types & DDI_INTR_TYPE_MSIX) {
"==> hxge_add_intrs: "
"ddi_intr_get_supported_types: MSIX 0x%08x", type));
} else if (intr_types & DDI_INTR_TYPE_FIXED) {
"==> hxge_add_intrs: "
"ddi_intr_get_supported_types: MSXED0x%08x", type));
}
}
type == DDI_INTR_TYPE_FIXED) &&
" hxge_add_intrs: "
" hxge_add_intrs_adv failed: status 0x%08x",
status));
return (status);
} else {
"interrupts registered : type %d", type));
"\nAdded advanced hxge add_intr_adv "
"intr type 0x%x\n", type));
return (status);
}
}
"==> hxge_add_intrs: failed to register interrupts"));
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
return (status);
}
/*ARGSUSED*/
static hxge_status_t
{
int ddi_status = DDI_SUCCESS;
if (ddi_status != DDI_SUCCESS) {
"ddi_add_softintrs failed: status 0x%08x", ddi_status));
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
return (status);
}
/*ARGSUSED*/
static hxge_status_t
{
int intr_type;
intr_type));
switch (intr_type) {
case DDI_INTR_TYPE_MSI: /* 0x2 */
case DDI_INTR_TYPE_MSIX: /* 0x4 */
break;
case DDI_INTR_TYPE_FIXED: /* 0x1 */
break;
default:
status = HXGE_ERROR;
break;
}
return (status);
}
/*ARGSUSED*/
static hxge_status_t
{
int behavior;
int inum = 0;
int loop = 0;
int x, y;
int ddi_status = DDI_SUCCESS;
"ddi_intr_get_nintrs() failed, status: 0x%x%, "
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
"ddi_intr_get_navail() failed, status: 0x%x%, "
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
"ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
/* PSARC/2007/453 MSI-X interrupt limit override */
if (int_type == DDI_INTR_TYPE_MSIX) {
"hxge_add_intrs_adv_type: nintrs %d "
"navail %d (nrequest %d)",
}
}
/* MSI must be power of 2 */
navail = 16;
navail = 8;
navail = 4;
navail = 2;
} else {
navail = 1;
}
"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
}
"requesting: intr type %d nintrs %d, navail %d",
" ddi_intr_alloc() failed: %d", ddi_status));
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
"ddi_intr_alloc() returned: navail %d nactual %d",
" ddi_intr_get_pri() failed: %d", ddi_status));
/* Free already allocated interrupts */
for (y = 0; y < nactual; y++) {
}
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
nrequired = 0;
"hxge_add_intrs_adv_typ:hxge_ldgv_init "
"failed: 0x%x", status));
/* Free already allocated interrupts */
for (y = 0; y < nactual; y++) {
}
return (status);
}
else
"hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
"1-1 int handler (entry %d)\n",
"hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
"nldevs %d int handler (entry %d)\n",
}
"==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
DDI_SUCCESS) {
"==> hxge_add_intrs_adv_type: failed #%d "
"status 0x%x", x, ddi_status));
for (y = 0; y < intrp->intr_added; y++) {
(void) ddi_intr_remove_handler(
}
/* Free already allocated intr */
for (y = 0; y < nactual; y++) {
}
(void) hxge_ldgv_uninit(hxgep);
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
intrp->intr_added++;
}
"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
(void) hxge_intr_ldgv_init(hxgep);
return (status);
}
/*ARGSUSED*/
static hxge_status_t
{
int behavior;
int inum = 0;
int x, y;
int ddi_status = DDI_SUCCESS;
"ddi_intr_get_nintrs() failed, status: 0x%x%, "
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
"ddi_intr_get_navail() failed, status: 0x%x%, "
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
" ddi_intr_alloc() failed: %d", ddi_status));
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
" ddi_intr_get_pri() failed: %d", ddi_status));
/* Free already allocated interrupts */
for (y = 0; y < nactual; y++) {
}
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
nrequired = 0;
"hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
"failed: 0x%x", status));
/* Free already allocated interrupts */
for (y = 0; y < nactual; y++) {
}
return (status);
}
"hxge_add_intrs_adv_type_fix: "
"1-1 int handler(%d) ldg %d ldv %d "
"arg1 $%p arg2 $%p\n",
"hxge_add_intrs_adv_type_fix: "
"shared ldv %d int handler(%d) ldv %d ldg %d"
"arg1 0x%016llx arg2 0x%016llx\n",
}
DDI_SUCCESS) {
"==> hxge_add_intrs_adv_type_fix: failed #%d "
"status 0x%x", x, ddi_status));
for (y = 0; y < intrp->intr_added; y++) {
(void) ddi_intr_remove_handler(
}
for (y = 0; y < nactual; y++) {
}
/* Free already allocated intr */
(void) hxge_ldgv_uninit(hxgep);
return (HXGE_ERROR | HXGE_DDI_FAILED);
}
intrp->intr_added++;
}
return (status);
}
/*ARGSUSED*/
static void
{
int i, inum;
if (!intrp->intr_registered) {
"<== hxge_remove_intrs: interrupts not registered"));
return;
}
intrp->intr_added);
} else {
for (i = 0; i < intrp->intr_added; i++) {
}
}
}
}
"hxge_remove_intrs: ddi_intr_free inum %d "
"msi_intx_cnt %d intr_added %d",
}
}
intrp->msi_intx_cnt = 0;
intrp->intr_added = 0;
(void) hxge_ldgv_uninit(hxgep);
}
/*ARGSUSED*/
static void
{
if (hxgep->resched_id) {
"==> hxge_remove_soft_intrs: removed"));
}
}
/*ARGSUSED*/
void
{
int i;
int status;
if (!intrp->intr_registered) {
"interrupts are not registered"));
return;
}
if (intrp->intr_enabled) {
"<== hxge_intrs_enable: already enabled"));
return;
}
intrp->intr_added);
"block enable - status 0x%x total inums #%d\n",
} else {
for (i = 0; i < intrp->intr_added; i++) {
"ddi_intr_enable:enable - status 0x%x "
"total inums %d enable inum #%d\n",
if (status == DDI_SUCCESS) {
}
}
}
}
/*ARGSUSED*/
static void
{
int i;
if (!intrp->intr_registered) {
"interrupts are not registered"));
return;
}
intrp->intr_added);
} else {
for (i = 0; i < intrp->intr_added; i++) {
}
}
}
static hxge_status_t
{
int status;
return (HXGE_ERROR);
"hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
macp->m_src_addr[0],
if (status != 0) {
"hxge_mac_register failed (status %d instance %d)",
return (HXGE_ERROR);
}
return (HXGE_OK);
}
static int
{
/*
* Loop through existing per Hydra hardware list.
*/
"==> hxge_init_common_dev: hw_p $%p parent dip $%p",
"==> hxge_init_common_device: "
"hw_p $%p parent dip $%p ndevs %d (found)",
break;
}
}
"==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
hxge_hw_list = hw_p;
}
"==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
return (HXGE_OK);
}
static void
{
"<== hxge_uninit_common_dev (no common)"));
return;
}
"==> hxge_uninit_common_dev: "
"hw_p $%p parent dip $%p ndevs %d (found)",
}
"==> hxge_uninit_common_dev: "
"hw_p $%p parent dip $%p ndevs %d (last)",
if (hw_p == hxge_hw_list) {
"==> hxge_uninit_common_dev:"
"remove head "
"hw_p $%p parent dip $%p "
"ndevs %d (head)",
} else {
"==> hxge_uninit_common_dev:"
"remove middle "
"hw_p $%p parent dip $%p "
"ndevs %d (middle)",
}
}
break;
} else {
}
}
"==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
}
#define HXGE_MSIX_ENTRIES 32
#define HXGE_MSIX_WAIT_COUNT 10
#define HXGE_MSIX_PARITY_CHECK_COUNT 30
static void
hxge_link_poll(void *arg)
{
if (to->report_link_status ||
} else {
}
}
hxgep->msix_count = 0;
hxgep->msix_index++;
hxgep->msix_index = 0;
}
/* Restart the link status timer to check the link status */
}
static void
{
if (state == LINK_STATE_UP) {
} else {
}
}
static void
{
int i;
/* Change to use MSIx bar instead of indirect access */
for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
data0 = 0xffffffff - i;
}
/* Initialize ram data out buffer. */
for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
}
}
static void
{
int i;
for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
&msix_entry1);
&msix_entry2);
}
}
static void
{
"==> hxge_check_1entry_msix_table: "
"eic_msix_parerr at index: %d", i));
}
}
}
/*
* The following function is to support
* PSARC/2007/453 MSI-X interrupt limit override.
*/
static int
{
int nmsi;
extern int ncpus;
/*
* The maximum MSI-X requested will be 8.
* If the # of CPUs is less than 8, we will reqeust
* # MSI-X based on the # of CPUs.
*/
if (ncpus >= HXGE_MSIX_REQUEST_10G) {
} else {
}
"==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
return (nmsi);
}