/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 QLogic Corporation. All rights reserved.
*/
#include <qlge.h>
/*
* Local variables
*/
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*
* Local function prototypes
*/
static void ql_free_resources(qlge_t *);
static void ql_fini_kstats(qlge_t *);
static void ql_read_conf(qlge_t *);
static int ql_bringdown_adapter(qlge_t *);
static int ql_bringup_adapter(qlge_t *);
static int ql_asic_reset(qlge_t *);
static void ql_wake_mpi_reset_soft_intr(qlge_t *);
/*
* TX dma maping handlers allow multiple sscatter-gather lists
*/
DMA_ATTR_V0, /* dma_attr_version */
QL_DMA_LOW_ADDRESS, /* low DMA address range */
QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
QL_DMA_XFER_COUNTER, /* DMA counter register */
QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */
QL_DMA_BURSTSIZES, /* DMA burstsizes */
QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
QL_MAX_TX_DMA_HANDLES, /* s/g list length */
QL_DMA_GRANULARITY, /* granularity of device */
DDI_DMA_RELAXED_ORDERING /* DMA transfer flags */
};
/*
*/
DMA_ATTR_V0, /* dma_attr_version */
QL_DMA_LOW_ADDRESS, /* low DMA address range */
QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
QL_DMA_XFER_COUNTER, /* DMA counter register */
QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */
QL_DMA_BURSTSIZES, /* DMA burstsizes */
QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
1, /* s/g list length, i.e no sg list */
QL_DMA_GRANULARITY, /* granularity of device */
QL_DMA_XFER_FLAGS /* DMA transfer flags */
};
/*
* Receive buffers do not allow scatter-gather lists
*/
DMA_ATTR_V0, /* dma_attr_version */
QL_DMA_LOW_ADDRESS, /* low DMA address range */
QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
QL_DMA_XFER_COUNTER, /* DMA counter register */
0x1, /* DMA address alignment, default - 8 */
QL_DMA_BURSTSIZES, /* DMA burstsizes */
QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
1, /* s/g list length, i.e no sg list */
QL_DMA_GRANULARITY, /* granularity of device */
DDI_DMA_RELAXED_ORDERING /* DMA transfer flags */
};
/*
* DMA access attribute structure.
*/
/* device register access from host */
};
/* host ring descriptors */
};
/* host ring buffer */
};
/*
* Hash key table for Receive Side Scaling (RSS) support
*/
0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
/*
* Shadow Registers:
* Outbound queues have a consumer index that is maintained by the chip.
* Inbound queues have a producer index that is maintained by the chip.
* For lower overhead, these registers are "shadowed" to host memory
* which allows the device driver to track the queue progress without
* PCI reads. When an entry is placed on an inbound queue, the chip will
* update the relevant index register and then copy the value to the
* shadow register in host memory.
* Currently, ql_read_sh_reg only read Inbound queues'producer index.
*/
static inline unsigned int
{
/* re-synchronize shadow prod index dma buffer before reading */
sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
return (rtn);
}
/*
* Read 32 bit atomically
*/
{
/*
* atomic_add_32_nv returns the new value after the add,
* we are adding 0 so we should get the original value
*/
return (atomic_add_32_nv(target, 0));
}
/*
* Set 32 bit atomically
*/
void
{
}
/*
* Setup device PCI configuration registers.
* Kernel context.
*/
static void
{
uint16_t w;
/*
* we want to respect framework's setting of PCI
* configuration space command register and also
* want to make sure that all bits of interest to us
* are properly set in PCI Command register(0x04).
* PCI_COMM_IO 0x1 I/O access enable
* PCI_COMM_MAE 0x2 Memory access enable
* PCI_COMM_ME 0x4 bus master enable
* PCI_COMM_MEMWR_INVAL 0x10 memory write and invalidate enable.
*/
w = (uint16_t)(w & (~PCI_COMM_IO));
/* PCI_COMM_MEMWR_INVAL | */
w = (uint16_t)(w & (~0x7000));
w = (uint16_t)(w | 0x5000);
}
/*
* This routine parforms the neccessary steps to set GLD mac information
* such as Function number, xgmac mask and shift bits
*/
static int
{
/* set default value */
goto exit;
} else {
"nic0 function number %d,"
"nic1 function number %d "
"use default\n",
goto exit;
} else {
}
}
/* Get the function number that the driver is associated with */
/* The driver is loaded on a non-NIC function? */
goto exit;
}
/* network port 0? */
} else {
}
rval = DDI_SUCCESS;
exit:
return (rval);
}
/*
* write to doorbell register
*/
void
{
}
/*
* read from doorbell register
*/
{
return (ret);
}
/*
* This function waits for a specific bit to come ready
* in a given register. It is used mostly by the initialize
* process, but is also used in kernel thread API such as
* netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
*/
static int
{
while (count) {
/* check for errors */
break;
return (DDI_SUCCESS);
count--;
}
"Waiting for reg %x to come ready failed.", reg);
}
return (DDI_FAILURE);
}
/*
* The CFG register is used to download TX and RX control blocks
* to the chip. This function waits for an operation to complete.
*/
static int
{
}
/*
* Used to issue init control blocks to hw. Maps control block,
* sets address, triggers download, waits for completion.
*/
static int
{
if (status != DDI_SUCCESS) {
goto exit;
}
if (status != DDI_SUCCESS) {
goto exit;
}
/*
* Wait for the bit to clear after signaling hw.
*/
exit:
return (status);
}
/*
* Initialize adapter instance
*/
static int
{
int i;
/* Default value */
for (i = 0; i < MAX_RX_RINGS; i++) {
qlge->rx_interrupts[i] = 0;
}
/*
* Set up the operating parameters.
*/
qlge->multicast_list_count = 0;
/*
* Set up the max number of unicast list
*/
/*
* read user defined properties in .conf file
*/
/* choose Memory Space mapping and get Vendor Id, Device ID etc */
qlge->ip_hdr_offset = 0;
/* Schultz card */
/* enable just ipv4 chksum offload for Schultz */
/*
* Schultz firmware does not do pseduo IP header checksum
* calculation, needed to be done by driver
*/
if (qlge->lso_enable)
/* Schultz must split packet header */
}
/* Set Function Number and some of the iocb mac information */
return (DDI_FAILURE);
/* Read network settings from NVRAM */
/* After nvram is read successfully, update dev_addr */
for (i = 0; i < ETHERADDRL; i++) {
}
} else {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* This hardware semaphore provides the mechanism for exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
*/
static int
{
switch (sem_mask) {
case SEM_XGMAC0_MASK:
break;
case SEM_XGMAC1_MASK:
break;
case SEM_ICB_MASK:
break;
case SEM_MAC_ADDR_MASK:
break;
case SEM_FLASH_MASK:
break;
case SEM_PROBE_MASK:
break;
case SEM_RT_IDX_MASK:
break;
case SEM_PROC_REG_MASK:
break;
default:
return (DDI_FAILURE);
}
}
/*
* Lock a specific bit of Semaphore register to gain
* access to a particular shared register
*/
int
{
while (wait_count) {
return (DDI_SUCCESS);
qlge_delay(100);
wait_count--;
}
return (DDI_FAILURE);
}
/*
* Unock a specific bit of Semaphore register to release
* access to a particular shared register
*/
void
{
}
/*
* Get property value from configuration file.
*
* string = property string pointer.
*
* Returns:
* 0xFFFFFFFF = no property else property value.
*/
static uint32_t
{
/* Get adapter instance parameter. */
(int)0xffffffff);
/* Adapter instance parameter found? */
if (data == 0xffffffff) {
/* No, get default parameter. */
string, (int)0xffffffff);
}
return (data);
}
/*
* Read user setting from configuration file.
*/
static void
{
/* clear configuration flags */
/* Set up the default ring sizes. */
/* if data is valid */
}
}
/* if data is valid */
}
}
/* if data is valid */
}
}
/* if data is valid */
}
}
data == 0) {
} else if (data == 1) {
}
}
/* Get mtu packet size. */
}
}
}
/* Get pause mode, default is Per Priority mode. */
if (data <= PAUSE_MODE_PER_PRIORITY) {
}
}
/* Receive interrupt delay */
/* if data is valid */
}
}
/* Rx inter-packet delay. */
/* if data is valid */
}
}
/* Transmit interrupt delay */
/* if data is valid */
}
}
/* Tx inter-packet delay. */
/* if data is valid */
}
}
/* Get split header payload_copy_thresh. */
/* if data is valid */
}
}
/* large send offload (LSO) capability. */
/* if data is valid */
}
}
/* dcbx capability. */
/* if data is valid */
}
}
/* fault management enable */
}
}
/*
* Enable global interrupt
*/
static void
{
}
/*
* Disable global interrupt
*/
static void
{
}
/*
* Enable one ring interrupt
*/
void
{
/*
* Always enable if we're MSIX multi interrupts and
* it's not the default (zeroeth) interrupt.
*/
return;
}
("%s(%d): write %x to intr enable register \n",
}
}
/*
* ql_forced_disable_completion_interrupt
* Used by call from OS, may be called without
* a pending interrupt so force the disable
*/
{
return (var);
}
return (var);
}
/*
* Disable a completion interrupt
*/
void
{
/*
* HW disables for us if we're MSIX multi interrupts and
* it's not the default (zeroeth) interrupt.
*/
return;
}
}
/*
* Enable all completion interrupts
*/
static void
{
int i;
/*
* Set the count to 1 for Legacy / MSI interrupts or for the
* default interrupt (0)
*/
}
}
}
/*
* Disable all completion interrupts
*/
static void
{
int i;
/*
* Set the count to 0 for Legacy / MSI interrupts or for the
* default interrupt (0)
*/
}
}
/*
* Update small buffer queue producer index
*/
static void
{
/* Update the buffer producer index */
rx_ring->sbq_prod_idx));
}
/*
* Update large buffer queue producer index
*/
static void
{
/* Update the buffer producer index */
rx_ring->lbq_prod_idx));
}
/*
* Adds a small buffer descriptor to end of its in use list,
* assumes sbq_lock is already taken
*/
static void
{
inuse_idx++;
inuse_idx = 0;
}
/*
* Get a small buffer descriptor from its in use list
*/
static struct bq_desc *
{
/* Pick from head of in use list */
inuse_idx++;
inuse_idx = 0;
/* if mp is NULL */
/* try to remap mp again */
}
}
return (sbq_desc);
}
/*
* Add a small buffer descriptor to its free list
*/
static void
{
/* Add to the end of free list */
free_idx++;
free_idx = 0;
}
/*
* Get a small buffer descriptor from its free list
*/
static struct bq_desc *
{
/* Pick from top of free list */
free_idx++;
free_idx = 0;
}
return (sbq_desc);
}
/*
* Add a large buffer descriptor to its in use list
*/
static void
{
inuse_idx++;
inuse_idx = 0;
}
/*
* Get a large buffer descriptor from in use list
*/
static struct bq_desc *
{
/* Pick from head of in use list */
inuse_idx++;
inuse_idx = 0;
/* if mp is NULL */
/* try to remap mp again */
}
}
return (lbq_desc);
}
/*
* Add a large buffer descriptor to free list
*/
static void
{
/* Add to the end of free list */
free_idx++;
free_idx = 0;
}
/*
* Get a large buffer descriptor from its free list
*/
static struct bq_desc *
{
/* Pick from head of free list */
free_idx++;
free_idx = 0;
}
return (lbq_desc);
}
/*
* Add a small buffer descriptor to free list
*/
static void
{
/*
* Sync access
*/
/*
* If we are freeing the buffers as a result of adapter unload, get out
*/
return;
}
#ifdef QLGE_LOAD_UNLOAD
if (rx_ring->rx_indicate == 0)
#endif
#ifdef QLGE_TRACK_BUFFER_USAGE
if (sb_consumer_idx > sb_producer_idx)
else
#endif
#ifdef QLGE_LOAD_UNLOAD
" sbq_desc index %d.",
#endif
if (alloc_memory) {
}
}
/* Got the packet from the stack decrement rx_indicate count */
/* Rearm if possible */
/* Get first one from free list */
sbq_entry++;
rx_ring->sbq_prod_idx++;
rx_ring->sbq_prod_idx = 0;
}
/* Add to end of in use list */
}
/* Update small buffer queue producer index */
}
}
/*
* rx recycle call back function
*/
static void
{
return;
}
/*
* Add a large buffer descriptor to free list
*/
static void
{
/* Sync access */
/*
* If we are freeing the buffers as a result of adapter unload, get out
*/
return;
}
#ifdef QLGE_LOAD_UNLOAD
if (rx_ring->rx_indicate == 0)
#endif
#ifdef QLGE_TRACK_BUFFER_USAGE
if (lb_consumer_idx > lb_producer_idx)
else
}
#endif
#ifdef QLGE_LOAD_UNLOAD
"lbq_desc index %d",
#endif
if (alloc_memory) {
}
}
/* Got the packet from the stack decrement rx_indicate count */
/* Rearm if possible */
/* Get first one from free list */
lbq_entry++;
rx_ring->lbq_prod_idx++;
rx_ring->lbq_prod_idx = 0;
}
/* Add to end of in use list */
}
/* Update large buffer queue producer index */
}
}
/*
* rx recycle call back function
*/
static void
{
return;
}
/*
* free small buffer queue buffers
*/
static void
{
uint32_t i;
int force_cnt = 0;
for (i = 0; i < rx_ring->sbuf_free_count; i++) {
j++;
j = 0;
}
}
}
rx_ring->sbuf_free_count = 0;
j = rx_ring->sbq_use_head;
for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
j++;
j = 0;
}
}
}
rx_ring->sbuf_in_use_count = 0;
/*
* Set flag so that the callback does not allocate a new buffer
*/
force_cnt++;
}
}
}
#ifdef QLGE_LOAD_UNLOAD
#endif
sizeof (struct bq_desc *)));
}
sizeof (struct bq_desc *)));
}
}
/* Allocate small buffers */
static int
{
int i;
rx_ring->sbq_use_head = 0;
rx_ring->sbq_use_tail = 0;
rx_ring->sbuf_in_use_count = 0;
rx_ring->sbq_free_head = 0;
rx_ring->sbq_free_tail = 0;
rx_ring->sbuf_free_count = 0;
sizeof (struct bq_desc *), KM_NOSLEEP);
"!%s: sbuf_free_list alloc: failed",
__func__);
goto alloc_sbuf_err;
}
sizeof (struct bq_desc *), KM_NOSLEEP);
"!%s: sbuf_inuse_list alloc: failed",
__func__);
goto alloc_sbuf_err;
}
/* Allocate buffer */
(size_t)0, /* default alignment */
&dma_cookie) != 0) {
"!%s: ddi_dma_alloc_handle: failed",
__func__);
goto alloc_sbuf_err;
}
/* Set context for Return buffer callback */
goto alloc_sbuf_err;
}
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
static void
{
uint32_t i, j;
int force_cnt = 0;
j = rx_ring->lbq_free_head;
for (i = 0; i < rx_ring->lbuf_free_count; i++) {
j++;
j = 0;
}
}
rx_ring->lbuf_free_count = 0;
j = rx_ring->lbq_use_head;
for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
j++;
j = 0;
}
}
}
rx_ring->lbuf_in_use_count = 0;
/* Set flag so that callback will not allocate a new buffer */
force_cnt++;
}
}
}
#ifdef QLGE_LOAD_UNLOAD
if (force_cnt) {
}
#endif
sizeof (struct bq_desc *)));
}
sizeof (struct bq_desc *)));
}
}
/* Allocate large buffers */
static int
{
int i;
rx_ring->lbq_use_head = 0;
rx_ring->lbq_use_tail = 0;
rx_ring->lbuf_in_use_count = 0;
rx_ring->lbq_free_head = 0;
rx_ring->lbq_free_tail = 0;
rx_ring->lbuf_free_count = 0;
sizeof (struct bq_desc *), KM_NOSLEEP);
"!%s: lbuf_free_list alloc: failed",
__func__);
goto alloc_lbuf_err;
}
sizeof (struct bq_desc *), KM_NOSLEEP);
"!%s: lbuf_inuse_list alloc: failed",
__func__);
goto alloc_lbuf_err;
}
/* Allocate buffer */
(size_t)0, /* default alignment */
&dma_cookie) != 0) {
"!%s: ddi_dma_alloc_handle: failed",
__func__);
goto alloc_lbuf_err;
}
/* Set context for Return buffer callback */
goto alloc_lbuf_err;
}
} /* For all large buffers */
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* Free rx buffers
*/
static void
{
int i;
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
}
/*
* Allocate rx buffers
*/
static int
{
int i;
for (i = 0; i < qlge->rx_ring_count; i++) {
goto alloc_err;
goto alloc_err;
}
}
#ifdef QLGE_TRACK_BUFFER_USAGE
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
#endif
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* Initialize large buffer queue ring
*/
static void
{
uint16_t i;
}
}
/*
* Initialize small buffer queue ring
*/
static void
{
uint16_t i;
}
}
/*
* Calculate the pseudo-header checksum if hardware can not do
*/
static void
{
/*
* update the checksum field.
*/
}
/*
* Transmit an incoming packet.
*/
mblk_t *
{
int rval;
/* can not send message while link is down */
}
goto exit;
}
/* if mac is not started, driver is not ready, can not send */
" return packets",
goto exit;
}
/* we must try to send all */
/*
* if number of available slots is less than a threshold,
* then quit
*/
rval = DDI_FAILURE;
#ifdef QLGE_LOAD_UNLOAD
#endif
/*
* If we return the buffer back we are expected to call
* mac_tx_ring_update() when resources are available
*/
break;
}
if (rval != DDI_SUCCESS) {
break;
}
tx_count++;
}
/*
* After all msg blocks are mapped or copied to tx buffer,
* trigger the hardware to send!
*/
if (tx_count > 0) {
}
exit:
return (mp);
}
/*
* This function builds an mblk list for the given inbound
* completion.
*/
static mblk_t *
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
/*
* Check if error flags are set
*/
if (err_flag != 0) {
if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
#ifdef QLGE_LOAD_UNLOAD
#endif
(size_t)sizeof (struct ib_mac_iocb_rsp));
}
/* header should not be in large buffer */
err_flag |= 1;
}
/* if whole packet is too big than rx buffer size */
err_flag |= 1;
}
}
/* if using rx copy mode, we need to allocate a big enough buffer */
if (rx_copy) {
BPRI_MED);
} else {
}
}
/*
* Handle the header buffer if present.
* packet header must be valid and saved in one small buffer
*/
header_len));
/* Sync access */
/*
* Validate addresses from the ASIC with the
* expected sbuf address
*/
!= ib_mac_rsp->hdr_addr) {
/* Small buffer address mismatch */
" in wrong small buffer",
goto fatal_error;
}
/* get this packet */
/* Flush DMA'd data */
/* failed on this packet, put it back for re-arming */
#ifdef QLGE_LOAD_UNLOAD
#endif
} else if (rx_copy) {
}
} else {
if ((qlge->ip_hdr_offset != 0)&&
(header_len < SMALL_BUFFER_SIZE)) {
/*
* copy entire header to a 2 bytes boundary
* address for 8100 adapters so that the IP
* header can be on a 4 byte boundary address
*/
}
/*
* Adjust the mp payload_len to match
* the packet header payload_len
*/
}
}
/*
* packet data or whole packet can be in small or one or
* several large buffer(s)
*/
/*
* The data is in a single small buffer.
*/
("%d bytes in a single small buffer, sbq_desc = %p, "
"sbq_desc->bd_dma.dma_addr = %x,"
" ib_mac_rsp->data_addr = %x, mp = %p\n",
/*
* Validate addresses from the ASIC with the
* expected sbuf address
*/
!= ib_mac_rsp->data_addr) {
/* Small buffer address mismatch */
" in wrong small buffer",
goto fatal_error;
}
/* get this packet */
#ifdef QLGE_LOAD_UNLOAD
/* failed on this packet, put it back for re-arming */
#endif
} else if (rx_copy) {
}
} else {
/* Adjust the buffer length to match the payload_len */
/* Flush DMA'd data */
/*
* if payload is too small , copy to
* the end of packet header
*/
(pkt_len <
}
}
/*
* The data is in a single large buffer.
*/
("%d bytes in a single large buffer, lbq_desc = %p, "
"lbq_desc->bd_dma.dma_addr = %x,"
" ib_mac_rsp->data_addr = %x, mp = %p\n",
/*
* Validate addresses from the ASIC with
* the expected lbuf address
*/
!= ib_mac_rsp->data_addr) {
/* Large buffer address mismatch */
" in wrong large buffer",
goto fatal_error;
}
/* Flush DMA'd data */
#ifdef QLGE_LOAD_UNLOAD
#endif
/* failed on this packet, put it back for re-arming */
} else if (rx_copy) {
}
} else {
/*
* Adjust the buffer length to match
* the packet payload_len
*/
/*
* if payload is too small , copy to
* the end of packet header
*/
(pkt_len<
}
}
} else if (payload_len) { /* ial case */
/*
* payload available but not in sml nor lrg buffer,
* so, it is saved in IAL
*/
#ifdef QLGE_LOAD_UNLOAD
#endif
/* lrg buf addresses are saved in one small buffer */
done = 0;
cp_offset = 0;
while (!done) {
0xFFFFFFFE);
/* check if this is the last packet fragment */
curr_ial_ptr++;
/*
* The data is in one or several large buffer(s).
*/
0xFFFFFFFE);
if (ial_data_addr_low != actual_data_addr_low) {
"packet saved in wrong ial lrg buffer"
" expected %x, actual %lx",
goto fatal_error;
}
payload_len -= size;
#ifdef QLGE_LOAD_UNLOAD
"ignore bad data from large buffer");
#endif
} else if (rx_copy) {
(void) ddi_dma_sync(
0, size, DDI_DMA_SYNC_FORKERNEL);
size);
}
} else {
} else {
}
/* Flush DMA'd data */
0, size, DDI_DMA_SYNC_FORKERNEL);
size));
}
}
if (err_flag != 0) {
#ifdef QLGE_LOAD_UNLOAD
/* failed on this packet, put it back for re-arming */
#endif
} else {
}
}
/*
* some packets' hdr not split, then send mp2 upstream, otherwise,
* concatenate message block mp2 to the tail of message header, mp1
*/
if (!err_flag) {
if (rx_copy) {
}
} else {
if (mp1) {
if (mp2) {
("packet in mp1 and mp2\n"));
/* mp1->b_cont = mp2; */
} else {
("packet in mp1 only\n"));
}
} else if (mp2) {
}
}
}
return (mp);
/* fatal Error! */
}
if (tp) {
}
/* *mp->b_wptr = 0; */
return (NULL);
}
/*
* Bump completion queue consumer index.
*/
static void
{
rx_ring->curr_entry++;
}
}
/*
* Update completion queue consumer index.
*/
static void
{
}
/*
* Processes a SYS-Chip Event Notification Completion Event.
* or some sorts of error happens.
*/
static void
{
switch (eventType) {
case SYS_EVENT_PORT_LINK_UP: /* 0x0h */
break;
case SYS_EVENT_PORT_LINK_DOWN: /* 0x1h */
break;
case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
"occurred");
break;
case SYS_EVENT_SOFT_ECC_ERR: /* 0x7h */
break;
case SYS_EVENT_MGMT_FATAL_ERR: /* 0x8h */
" error occured");
break;
case SYS_EVENT_MAC_INTERRUPT: /* 0x9h */
break;
case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF: /* 0x40h */
"buffers occured");
break;
default:
"type 0x%x occured",
break;
}
if ((soft_req & NEED_MPI_RESET) != 0) {
}
} else if ((soft_req & NEED_HW_RESET) != 0) {
}
}
}
/*
* set received packet checksum flag
*/
void
{
/* Not TCP or UDP packet? nothing more to do */
return;
/* No CKO support for IPv6 */
return;
/*
* If checksum error, don't set flags; stack will calculate
* checksum, detect the error and update statistics
*/
return;
/* TCP or UDP packet and checksum valid */
}
}
}
/*
* This function goes through h/w descriptor in one specified rx ring,
* receives the data if the descriptor status shows the data is ready.
* It returns a chain of mblks containing the received data, to be
* passed up to mac_rx_ring().
*/
mblk_t *
{
#ifdef QLGE_PERFORMANCE
#endif
#ifdef QLGE_TRACK_BUFFER_USAGE
if (consumer_idx > producer_idx)
else
#endif
("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
case OPCODE_IB_MAC_IOCB:
/* Adding length of pkt header and payload */
if ((poll_bytes != QLGE_POLL_ALL) &&
continue;
}
received_bytes += length;
#ifdef QLGE_PERFORMANCE
pkt_ct++;
#endif
/*
* Increment number of packets we have
* indicated to the stack, should be
* decremented when we get it back
* or when freemsg is called
*/
#ifdef QLGE_LOAD_UNLOAD
" mac_flags %d, indicate %d",
#endif
("cq_id = %d, packet "
"dropped, mac not "
"enabled.\n",
/* rx_lock is expected to be held */
}
/*
* IP full packet has been
* successfully verified by
* H/W and is correct
*/
rx_ring->rx_packets++;
}
} else {
("cq_id = %d, packet dropped\n",
}
break;
case OPCODE_IB_SYS_EVENT_IOCB:
(struct ib_sys_event_iocb_rsp *)
net_rsp);
break;
default:
"%s Ring(%d)Hit default case, not handled!"
" dropping the packet, "
break;
}
/* increment cnsmr_idx and curr_entry */
}
#ifdef QLGE_PERFORMANCE
if (pkt_ct >= 7)
else if (pkt_ct == 6)
else if (pkt_ct == 5)
else if (pkt_ct == 4)
else if (pkt_ct == 3)
else if (pkt_ct == 2)
else if (pkt_ct == 1)
else if (pkt_ct == 0)
#endif
/* update cnsmr_idx */
/* do not enable interrupt for polling mode */
if (poll_bytes == QLGE_POLL_ALL)
return (mblk_head);
}
/* Process an outbound completion from an rx ring. */
static void
{
int j;
/*
* Release the DMA resource that is used for
* DMA binding.
*/
for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
(void) ddi_dma_unbind_handle(
tx_ring_desc->tx_dma_handle[j]);
}
/*
* Free the mblk after sending completed
*/
}
}
/* EMPTY */
("Total descriptor length did not match "
"transfer length.\n"));
}
/* EMPTY */
("Frame too short to be legal, not sent.\n"));
}
/* EMPTY */
("Frame too long, but sent anyway.\n"));
}
/* EMPTY */
("PCI backplane error. Frame not sent.\n"));
}
}
}
/*
* clean up tx completion iocbs
*/
int
{
int count = 0;
#ifdef QLGE_TRACK_BUFFER_USAGE
{
if (consumer_idx > producer_idx)
else
(producer_idx - consumer_idx);
}
#endif
/* While there are entries in the completion queue. */
("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
"response packet data\n",
case OPCODE_OB_MAC_IOCB:
break;
default:
"%s Hit default case, not handled! "
"dropping the packet,"
" opcode = %x.",
break;
}
count++;
}
if (tx_ring->queue_stopped &&
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
*/
tx_ring->queue_stopped = 0;
}
/* Don't hold the lock during OS callback */
if (resume_tx)
return (count);
}
/*
* reset asic when error happens
*/
/* ARGSUSED */
static uint_t
{
int status;
(void) ql_do_stop(qlge);
/*
* Write default ethernet address to chip register Mac
* Address slot 0 and Enable Primary Mac Function.
*/
(void) ql_unicst_set(qlge,
if (status != DDI_SUCCESS)
goto error;
return (DDI_INTR_CLAIMED);
}
return (DDI_INTR_CLAIMED);
}
/*
* Reset MPI
*/
/* ARGSUSED */
static uint_t
{
(void) ql_reset_mpi_risc(qlge);
return (DDI_INTR_CLAIMED);
}
/*
* Process MPI mailbox messages
*/
/* ARGSUSED */
static uint_t
{
return (DDI_INTR_CLAIMED);
}
/* Fire up a handler to reset the MPI processor. */
void
{
}
static void
{
}
static void
{
}
/*
* This handles a fatal error, MPI activity, and the default
* rx_ring in an MSI-X multiple interrupt vector environment.
* the rx_rings.
*/
/* ARGSUSED */
static uint_t
{
int i;
int work_done = 0;
return (DDI_INTR_CLAIMED);
}
/*
* process send completes on first stride tx ring if available
*/
if (qlge->isr_stride) {
(void) ql_clean_outbound_rx_ring(ob_ring);
}
}
/*
* Check the default queue and wake handler if active.
*/
/* check if interrupt is due to incoming packet */
work_done++;
} else {
/*
* If interrupt is not due to incoming packet, read status
* register to see if error happens or mailbox interrupt.
*/
}
"Resetting chip. Error Status Register = 0x%x",
var);
return (DDI_INTR_CLAIMED);
}
/*
* Check MPI processor activity.
*/
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
work_done++;
}
}
/*
* Start the DPC for each active queue.
*/
("Waking handler for rx_ring[%d].\n", i));
(void) ql_clean_outbound_rx_ring(
rx_ring);
} else {
#ifdef QLGE_LOAD_UNLOAD
"%s rx_indicate(%d) %d\n",
__func__, i,
#endif
}
work_done++;
}
}
}
}
/*
* MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
*/
/* ARGSUSED */
static uint_t
{
(void) ql_clean_outbound_rx_ring(rx_ring);
return (DDI_INTR_CLAIMED);
}
/*
* MSI-X Multiple Vector Interrupt Handler
*/
/* ARGSUSED */
static uint_t
{
/*
* process send completes on stride tx ring if available
*/
if (qlge->isr_stride) {
(void) ql_clean_outbound_rx_ring(ob_ring);
}
}
return (DDI_INTR_CLAIMED);
}
/*
* Poll n_bytes of chained incoming packets
*/
mblk_t *
{
if (n_bytes == 0)
return (mp);
/*
* Check for fatal error.
*/
}
}
/*
* Check MPI processor activity.
*/
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
}
}
return (mp);
}
/*
* MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
*/
/* ARGSUSED */
static uint_t
{
return (DDI_INTR_CLAIMED);
}
/*
*
* Allocate DMA Buffer for ioctl service
*
*/
static int
{
(size_t)0, /* alignment */
&dma_cookie) != 0) {
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
"phy_addr = 0x%lx\n",
return (DDI_SUCCESS);
}
/*
* Function to free physical memory.
*/
static void
{
if (*dma_handle != NULL) {
(void) ddi_dma_unbind_handle(*dma_handle);
if (*acc_handle != NULL)
*acc_handle = NULL;
*dma_handle = NULL;
}
}
/*
* Function to free ioctl dma buffer.
*/
static void
{
}
}
/*
* Free shadow register space used for request and completion queues
*/
static void
{
sizeof (qlge->host_copy_shadow_dma_attr));
}
sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
}
}
/*
* Allocate shadow register space for request and completion queues
*/
static int
{
&dma_cookie) != 0) {
sizeof (qlge->host_copy_shadow_dma_attr));
return (DDI_FAILURE);
}
&dma_cookie) != 0) {
sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
"for request shadow registers",
goto err_wqp_sh_area;
}
return (DDI_SUCCESS);
sizeof (qlge->host_copy_shadow_dma_attr));
return (DDI_FAILURE);
}
/*
* Initialize a tx ring
*/
static void
{
int i;
tx_ring_desc->index = i;
mac_iocb_ptr++;
tx_ring_desc++;
}
tx_ring->queue_stopped = 0;
}
/*
* Free one tx ring resources
*/
static void
{
int i, j;
}
for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
if (tx_ring_desc->tx_dma_handle[j]) {
/*
* The unbinding will happen in tx
* completion, here we just free the
* handles
*/
&(tx_ring_desc->tx_dma_handle[j]));
}
}
tx_ring_desc->oal_dma_addr = 0;
}
}
}
/* free the wqicb struct */
}
}
/*
* Allocate work (request) queue memory and transmit
* descriptors for this transmit ring
*/
static int
{
int i, j;
/* allocate dma buffers for obiocbs */
&dma_cookie) != 0) {
return (DDI_FAILURE);
}
goto err;
} else {
/*
* Allocate a large enough structure to hold the following
* 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
* 2. copy buffer of QL_MAX_COPY_LENGTH bytes
*/
(size_t)0, /* default alignment:8 bytes boundary */
&dma_cookie) != 0) {
sizeof (tx_ring_desc->oal_dma));
"oal alloc failed.",
goto err;
}
+ (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
+ (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
/* Allocate dma handles for transmit buffers */
for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
0, &tx_ring_desc->tx_dma_handle[j])
!= DDI_SUCCESS) {
"!%s: ddi_dma_alloc_handle: "
"tx_dma_handle "
"alloc failed", __func__);
goto err;
}
}
}
}
/* alloc a wqicb control block to load this tx ring to hw */
(size_t)0, /* alignment:128 bytes boundary */
&dma_cookie) != 0) {
goto err;
}
return (DDI_SUCCESS);
err:
return (DDI_FAILURE);
}
/*
* Free one rx ring resources
*/
static void
{
/* Free the small buffer queue. */
}
/* Free the small buffer queue control blocks. */
sizeof (struct bq_desc));
}
/* Free the large buffer queue. */
}
/* Free the large buffer queue control blocks. */
sizeof (struct bq_desc));
}
/* Free cqicb struct */
}
/* Free the rx queue. */
}
}
/*
* Allocate queues and buffers for this completions queue based
* on the values in the parameter structure.
*/
static int
{
&dma_cookie) != 0) {
return (DDI_FAILURE);
}
/*
* Allocate small buffer queue.
*/
&dma_cookie) != 0) {
"%s(%d): small buffer queue allocation failed.",
goto err_mem;
}
/*
* Allocate small buffer queue control blocks.
*/
"sbq control block allocation failed.");
goto err_mem;
}
}
/*
* Allocate large buffer queue.
*/
&dma_cookie) != 0) {
goto err_mem;
}
/*
* Allocate large buffer queue control blocks.
*/
"Large buffer queue control block allocation "
"failed.");
goto err_mem;
}
}
(size_t)0, /* alignment:128 bytes boundary */
&dma_cookie) != 0) {
goto err_mem;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
*/
static void
{
int i;
/* free the ricb struct */
}
for (i = 0; i < qlge->tx_ring_count; i++)
for (i = 0; i < qlge->rx_ring_count; i++)
}
/*
* Allocate buffer queues, large buffers and small buffers etc
*
* This API is called in the gld_attach member function. It is called
* only once. Later reset,reboot should not re-allocate all rings and
* buffers.
*/
static int
{
int i;
/* Allocate space for our shadow registers */
if (ql_alloc_shadow_space(qlge))
return (DDI_FAILURE);
for (i = 0; i < qlge->rx_ring_count; i++) {
goto err_mem;
}
}
/* Allocate tx queue resources */
for (i = 0; i < qlge->tx_ring_count; i++) {
goto err_mem;
}
}
goto err_mem;
}
goto err_mem;
}
(size_t)0, /* alignment:128 bytes boundary */
&dma_cookie) != 0) {
goto err_mem;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* Function used to allocate physical memory and zero it.
*/
static int
{
/*
* Workaround for SUN XMITS buffer must end and start on 8 byte
* boundary. Else, hardware will overrun the buffer. Simple fix is
* to make sure buffer has enough room for overrun.
*/
if (size & 7) {
}
/* Adjust the alignment if requested */
if (alignment) {
}
/*
* Allocate DMA handle
*/
dma_handle) != DDI_SUCCESS) {
__func__);
*dma_handle = NULL;
return (QL_ERROR);
}
/*
* Allocate DMA memory
*/
*acc_handle = NULL;
*dma_handle = NULL;
return (QL_ERROR);
}
__func__);
*acc_handle = NULL;
*dma_handle = NULL;
return (QL_ERROR);
}
if (cnt != 1) {
__func__);
return (QL_ERROR);
}
return (0);
}
/*
* Function used to allocate physical memory and zero it.
*/
static int
{
/*
* Workaround for SUN XMITS buffer must end and start on 8 byte
* boundary. Else, hardware will overrun the buffer. Simple fix is
* to make sure buffer has enough room for overrun.
*/
if (size & 7) {
}
/* Adjust the alignment if requested */
if (alignment) {
}
/*
* Allocate DMA handle
*/
dma_handle) != DDI_SUCCESS) {
__func__);
*dma_handle = NULL;
return (QL_ERROR);
}
/*
* Allocate DMA memory
*/
*acc_handle = NULL;
*dma_handle = NULL;
return (QL_ERROR);
}
__func__);
*acc_handle = NULL;
*dma_handle = NULL;
return (QL_ERROR);
}
if (cnt != 1) {
__func__);
return (QL_ERROR);
}
return (0);
}
/*
* Add interrupt handlers based on the interrupt type.
* Before adding the interrupt handlers, the interrupt vectors should
*/
static int
{
int vector = 0;
int rc, i;
case DDI_INTR_TYPE_MSIX:
/*
* Add interrupt handler for rx and tx rings: vector[0 -
* (qlge->intr_cnt -1)].
*/
value = 0;
/*
* associate interrupt vector with interrupt handler
*/
if (rc != DDI_SUCCESS) {
("Add rx interrupt handler failed. "
(void) ddi_intr_remove_handler(
}
return (DDI_FAILURE);
}
intr_ctx++;
}
break;
case DDI_INTR_TYPE_MSI:
/*
* Add interrupt handlers for the only vector
*/
if (rc != DDI_SUCCESS) {
("Add MSI interrupt handler failed: %d\n", rc));
return (DDI_FAILURE);
}
break;
case DDI_INTR_TYPE_FIXED:
/*
* Add interrupt handlers for the only vector
*/
if (rc != DDI_SUCCESS) {
("Add legacy interrupt handler failed: %d\n", rc));
return (DDI_FAILURE);
}
break;
default:
return (DDI_FAILURE);
}
/* Enable interrupts */
/* Block enable */
} else { /* Non block enable */
}
}
return (DDI_SUCCESS);
}
/*
* Here we build the intr_ctx structures based on
* our rx_ring count and intr vector count.
* The intr_ctx structure is used to hook each vector
* to possibly different handlers.
*/
static void
{
int i = 0;
/*
* Each rx_ring has its own intr_ctx since we
* have separate vectors for each queue.
* This only true when MSI-X is enabled.
*/
/*
*/
INTR_EN_IHD | i;
INTR_EN_IHD | i;
| i;
if (i == 0) {
/*
* async events.
*/
/*
* Outbound queue is for outbound completions
* only.
*/
if (qlge->isr_stride)
else
} else {
/*
* Inbound queues handle unicast frames only.
*/
if (qlge->isr_stride)
else
}
}
/*
*/
INTR_EN_IHD | iv;
INTR_EN_IHD | iv;
| iv;
/*
* Outbound queue is for outbound completions
* only.
*/
} else {
/*
* Inbound queues handle unicast frames only.
*/
}
}
} else {
/*
* All rx_rings use the same intr_ctx since
* there is only one vector.
*/
/*
*/
/*
* Single interrupt means one handler for all rings.
*/
for (i = 0; i < qlge->rx_ring_count; i++)
}
}
/*
* Free allocated interrupts.
*/
static void
{
int i;
int rc;
/* Disable all interrupts */
/* Call ddi_intr_block_disable() */
} else {
}
}
}
if (rc != DDI_SUCCESS) {
/* EMPTY */
rc));
}
}
}
}
}
/*
* Allocate interrupt vectors
* For legacy and MSI, only 1 handle is needed.
* For MSI-X, if fewer than 2 vectors are available, return failure.
* Upon success, this maps the vectors to rx and tx rings for
* interrupts.
*/
static int
{
int minimum;
int rc;
switch (intr_type) {
case DDI_INTR_TYPE_FIXED:
minimum = 1;
break;
case DDI_INTR_TYPE_MSI:
minimum = 1;
break;
case DDI_INTR_TYPE_MSIX:
/*
* Ideal number of vectors for the adapter is
* # rss rings + tx completion rings for default completion
* queue.
*/
if (request > (MAX_RX_RINGS))
minimum = 2;
break;
default:
return (DDI_FAILURE);
}
/*
* Get number of supported interrupts
*/
return (DDI_FAILURE);
}
/*
* Get number of available interrupts
*/
("Get interrupt available number failed. Return:"
return (DDI_FAILURE);
}
}
actual = 0;
/*
* Allocate an array of interrupt handles
*/
if (rc != DDI_SUCCESS) {
" %d, request: %d, actual: %d",
goto ql_intr_alloc_fail;
}
/*
* If the actual number of vectors is less than the minumum
* then fail.
*/
"Insufficient interrupt handles available: %d", actual);
goto ql_intr_alloc_fail;
}
/*
* For MSI-X, actual might force us to reduce number of tx & rx rings
*/
} else if (actual < MAX_RX_RINGS) {
}
}
/*
* Get priority for first vector, assume remaining are all the same
*/
if (rc != DDI_SUCCESS) {
goto ql_intr_alloc_fail;
}
if (rc != DDI_SUCCESS) {
goto ql_intr_alloc_fail;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
* MSI or Legacy. In MSI and Legacy modes we only support a single receive and
* transmit queue.
*/
int
{
int intr_types;
int rval;
/*
* Get supported interrupt types
*/
!= DDI_SUCCESS) {
return (DDI_FAILURE);
}
/* Install MSI-X interrupts */
if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
if (rval == DDI_SUCCESS) {
return (rval);
}
}
/*
* We will have 2 completion queues in MSI / Legacy mode,
* Queue 0 for default completions
* Queue 1 for transmit completions
*/
/*
* Add the h/w interrupt handler and initialise mutexes
*/
rval = DDI_FAILURE;
/*
* If OS supports MSIX interrupt but fails to allocate, then try
* MSI interrupt. If MSI interrupt allocation fails also, then roll
* back to fixed interrupt.
*/
if (intr_types & DDI_INTR_TYPE_MSI) {
if (rval == DDI_SUCCESS) {
}
}
/* Try Fixed interrupt Legacy mode */
if (rval != DDI_SUCCESS) {
if (rval != DDI_SUCCESS) {
"allocation failed",
} else {
}
}
return (rval);
}
static void
{
int i;
for (i = 0; i < qlge->tx_ring_count; i++) {
}
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
/*
* Frees all resources allocated during attach.
*
* Input:
* dip = pointer to device information structure.
* sequence = bits indicating resources to free.
*
* Context:
* Kernel context.
*/
static void
{
/* Disable driver timer */
}
/* Nothing to do, macp is already freed */
}
}
}
}
}
}
}
}
}
}
/*
* free flash flt table that allocated in attach stage
*/
}
}
/* finally, free qlge structure */
}
}
/*
* Set promiscuous mode of the driver
* Caller must catch HW_LOCK
*/
void
{
if (mode) {
RT_IDX_VALID, 1);
} else {
RT_IDX_VALID, 0);
}
}
/*
* Write 'data1' to Mac Protocol Address Index Register and
* 'data2' to Mac Protocol Address Data Register
* Assuming that the Mac Protocol semaphore lock has been acquired.
*/
static int
{
"timeout.");
goto out;
}
out:
return (return_value);
}
/*
* Enable the 'index'ed multicast address in the host memory's multicast_list
*/
int
{
/* Acquire the required semaphore */
return (rtn_val);
}
/* Program Offset0 - lower 32 bits of the MAC address */
offset = 0;
goto out;
/* Program offset1: upper 16 bits of the MAC address */
offset = 1;
goto out;
}
out:
return (rtn_val);
}
/*
* Disable the 'index'ed multicast address in the host memory's multicast_list
*/
int
{
/* Acquire the required semaphore */
return (rtn_val);
}
/* Program Offset0 - lower 32 bits of the MAC address */
offset = 0;
value2 =
goto out;
}
/* Program offset1: upper 16 bits of the MAC address */
offset = 1;
value2 = 0;
goto out;
}
out:
return (rtn_val);
}
/*
* Add a new multicast address to the list of supported list
* This API is called after OS called gld_set_multicast (GLDv2)
* or m_multicst (GLDv3)
*
* Restriction:
* The number of maximum multicast address is limited by hardware.
*/
int
{
int status;
if ((ep[0] & 01) == 0) {
goto exit;
}
/* if there is an availabe space in multicast_list, then add it */
if (index < MAX_MULTICAST_LIST_SIZE) {
/* increment the total number of addresses in multicast list */
("%s(%d): added to index of multicast list= 0x%x, "
if (index > MAX_MULTICAST_HW_SIZE) {
if (!qlge->multicast_promisc) {
RT_IDX_MCAST, 1);
if (status) {
"Failed to init routing reg "
"for mcast promisc mode.");
goto exit;
}
}
}
} else {
}
exit:
return (rval);
}
/*
* Remove an old multicast address from the list of supported multicast
* addresses. This API is called after OS called gld_set_multicast (GLDv2)
* or m_multicst (GLDv3)
* The number of maximum multicast address is limited by hardware.
*/
int
{
int i = 0;
int rmv_index = 0;
int status;
for (i = 0; i < total; i++) {
continue;
}
rmv_index = i;
/* block move the reset of other multicast address forward */
if (length > 0) {
}
/*
* there is a deletion in multicast list table,
* re-enable them
*/
i++) {
(void) ql_add_multicast_address(qlge, i);
}
/* and disable the last one */
(void) ql_remove_multicast_address(qlge, i);
/* disable multicast promiscuous mode */
if (qlge->multicast_promisc) {
RT_IDX_MCAST, 0);
if (status) {
"Failed to init routing reg for "
"mcast promisc mode.");
goto exit;
}
/* write to config register */
}
}
break;
}
exit:
return (DDI_SUCCESS);
}
/*
* Read a XGMAC register
*/
int
{
/* wait for XGMAC Address register RDY bit set */
goto out;
}
/* start rx transaction */
/*
* wait for XGMAC Address register RDY bit set,
* which indicates data is ready
*/
goto out;
}
/* read data from XGAMC_DATA register */
out:
return (rtn_val);
}
/*
* Implement checksum offload for IPv4 IP packets
*/
static void
struct ob_mac_iocb_req *mac_iocb_ptr)
{
/* Is this vlan packet? */
mac_hdr_len = sizeof (struct ether_vlan_header);
} else {
mac_hdr_len = sizeof (struct ether_header);
}
/* Is this IPv4 or IPv6 packet? */
IPV4_VERSION) {
} else {
/* EMPTY */
("%s(%d) : IPv4 None IP packet type 0x%x\n",
}
}
/* ipV4 packets */
("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
if (pflags & HCK_IPV4_HDRCKSUM) {
}
if (pflags & HCK_FULLCKSUM) {
tcp_hdr =
(struct tcphdr *)(void *)
tcp_udp_hdr_off <<= 6;
/*
* if the chip is unable to do pseudo header
* cksum calculation, do it in then put the
* result to the data passed to the chip
*/
}
tcp_udp_hdr_len = sizeof (struct udphdr);
tcp_udp_hdr_off <<= 6;
+ tcp_udp_hdr_len);
/*
* if the chip is unable to calculate pseudo
* hdr cksum,do it in then put the result to
* the data passed to the chip
*/
}
}
}
}
}
/*
* MAC frame transmission with TCP large segment offload is performed in the
* same way as the MAC frame transmission with checksum offload with the
* exception that the maximum TCP segment size (MSS) must be specified to
* allow the chip to segment the data into legal sized frames.
* The host also needs to calculate a pseudo-header checksum over the
* following fields:
* Source IP Address, Destination IP Address, and the Protocol.
* The TCP length is not included in the pseudo-header calculation.
* The pseudo-header checksum is place in the TCP checksum field of the
* prototype header.
*/
static void
{
/*
* Calculate the LSO pseudo-header checksum.
*/
/*
* update the checksum field.
*/
}
/*
* For IPv4 IP packets, distribute the tx packets evenly among tx rings
*/
#define mix(a, b, c) \
{ \
a -= b; a -= c; a ^= (c>>13); \
b -= c; b -= a; b ^= (a<<8); \
c -= a; c -= b; c ^= (b>>13); \
a -= b; a -= c; a ^= (c>>12); \
b -= c; b -= a; b ^= (a<<16); \
c -= a; c -= b; c ^= (b>>5); \
a -= b; a -= c; a ^= (c>>3); \
b -= c; b -= a; b ^= (a<<10); \
c -= a; c -= b; c ^= (b>>15); \
}
register ub1 *k; /* the key */
{
/* Set up the internal state */
a = b = 0x9e3779b9; /* the golden ratio; an arbitrary value */
c = initval; /* the previous hash value */
/* handle most of the key */
while (len >= 12) {
mix(a, b, c);
k += 12;
len -= 12;
}
/* handle the last 11 bytes */
c += length;
/* all the case statements fall through */
switch (len) {
/* FALLTHRU */
/* FALLTHRU */
/* FALLTHRU */
/* the first byte of c is reserved for the length */
/* FALLTHRU */
/* FALLTHRU */
/* FALLTHRU */
/* FALLTHRU */
case 5 : b += k[4];
/* FALLTHRU */
/* FALLTHRU */
/* FALLTHRU */
/* FALLTHRU */
case 1 : a += k[0];
/* case 0: nothing left to add */
}
mix(a, b, c);
/* report the result */
return (c);
}
{
uint32_t h = 0; /* 0 by default */
return (tx_ring_id);
/* Is this vlan packet? */
mac_hdr_len = sizeof (struct ether_vlan_header);
} else {
mac_hdr_len = sizeof (struct ether_header);
}
/* Is this IPv4 or IPv6 packet? */
== IPV4_VERSION) {
}
if (((unsigned long)iphdr) & 0x3) {
/* IP hdr not 4-byte aligned */
return (tx_ring_id);
}
}
/* ipV4 packets */
if (iphdr) {
}
tx_ring_id = 0;
}
}
return (tx_ring_id);
}
/*
* Tell the hardware to do Large Send Offload (LSO)
*
* Some fields in ob_mac_iocb need to be set so hardware can know what is
* the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
* in the right place of the packet etc, thus, hardware can process the
* packet correctly.
*/
static void
struct ob_mac_iocb_req *mac_iocb_ptr)
{
/* Is this vlan packet? */
mac_hdr_len = sizeof (struct ether_vlan_header);
} else {
mac_hdr_len = sizeof (struct ether_header);
}
/* Is this IPv4 or IPv6 packet? */
IPV4_VERSION) {
} else {
/* EMPTY */
" type 0x%x\n",
}
}
("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
"packet\n",
tcp_udp_hdr_off <<= 6;
/*
* if the chip is unable to calculate pseudo
* header checksum, do it in then put the result
* to the data passed to the chip
*/
+ ip_hdr_len);
"packet\n",
tcp_udp_hdr_off <<= 6;
/*
* if the chip is unable to do pseudo header
* checksum calculation, do it here then put the
* result to the data passed to the chip
*/
}
}
}
}
/*
* Generic packet sending function which is used to send one packet.
*/
int
{
uint_t i = 0;
int j = 0, frags = 0;
int status;
int force_pullup = 0;
/* Calculate number of data and segments in the incoming message */
frags++;
}
if (total_len >= QL_LSO_MAX) {
#ifdef QLGE_LOAD_UNLOAD
#endif
return (NULL);
}
if (bp[0] & 1) {
ETHERADDRL) == 0) {
} else {
}
}
/* claim a free slot in tx ring */
/* get the tx descriptor */
/*
* Decide to use DMA map or copy mode.
* DMA map mode must be used when the total msg length is more than the
* tx buffer length.
*/
if (total_len > tx_buf_len)
else if (max_seg_len > QL_MAX_COPY_LENGTH)
else
if (qlge->chksum_cap) {
if (qlge->lso_enable) {
}
}
/* concatenate all frags into one large packet if too fragmented */
force_pullup) {
frags = 1;
} else {
goto bad;
}
}
tx_cb->tx_dma_handle_used = 0;
/* if too many tx dma handles needed */
if (j >= QL_MAX_TX_DMA_HANDLES) {
if (!force_pullup) {
force_pullup = 1;
goto do_pullup;
} else {
goto bad;
}
}
if (nbyte == 0)
continue;
0, &dma_cookie, &ncookies);
"length: %d, spans in %d cookies\n",
if (status != DDI_DMA_MAPPED) {
goto bad;
}
/*
* Each fragment can span several cookies. One cookie
* will use one tx descriptor to transmit.
*/
oal_entries++) {
/*
* The number of TX descriptors that can be
* saved in tx iocb and oal list is limited
*/
if (oal_entries > max_oal_entries) {
if (!force_pullup) {
force_pullup = 1;
goto do_pullup;
} else {
goto bad;
}
}
if ((oal_entries == TX_DESC_PER_IOCB) &&
!oal_entry) {
/*
* Time to switch to an oal list
* The last entry should be copied
* to first entry in the oal list
*/
tx_entry =
TX_DESC_PER_IOCB-1];
sizeof (*oal_entry));
/*
* last entry should be updated to
* point to the extended oal list itself
*/
/*
* Point tx_entry to the oal list
* second entry
*/
}
if (i > 1)
tx_cb->tx_dma_handle[j],
&dma_cookie);
}
j++;
}
/*
* if OAL is used, the last oal entry in tx iocb indicates
*/
if (oal_entries > TX_DESC_PER_IOCB) {
(cpu_to_le32((sizeof (struct oal_entry) *
}
tx_cb->tx_dma_handle_used = j;
j, oal_entries));
}
off = 0;
nbyte = 0;
frags = 0;
/*
* Copy up to tx_buf_len of the transmit data
* from mp to tx buffer
*/
frags ++;
}
}
} /* End of Copy Mode */
/* Do checksum offloading */
}
/* let device know the latest outbound IOCB */
/* let device know the latest outbound OAL if necessary */
if (oal_entries > TX_DESC_PER_IOCB) {
(off_t)0,
(sizeof (struct oal_entry) *
}
} else { /* for USE_COPY mode, tx buffer has changed */
/* let device know the latest change */
/* copy buf offset */
}
/* save how the packet was sent */
/* reduce the number of available tx slot */
now = ddi_get_lbolt();
return (DDI_SUCCESS);
bad:
/*
* if for any reason driver can not send, delete
* the message pointer, mp
*/
now = ddi_get_lbolt();
for (i = 0; i < j; i++)
return (DDI_SUCCESS);
}
/*
* Initializes hardware and driver software flags before the driver
* is finally ready to work.
*/
int
{
int i;
int rings_done;
/* Reset adapter */
(void) ql_asic_reset(qlge);
lbq_buf_size = (uint16_t)
#ifdef QLGE_LOAD_UNLOAD
#endif
/*
* Check if any ring has buffers still with upper layers
* If buffers are pending with upper layers, we use the
* existing buffers and don't reallocate new ones
* Unfortunately there is no way to evict buffers from
* upper layers. Using buffers with the current size may
* cause slightly sub-optimal performance, but that seems
* to be the easiest way to handle this situation.
*/
rings_done = 0;
for (i = 0; i < qlge->rx_ring_count; i++) {
if (rx_ring->rx_indicate == 0)
rings_done++;
else
break;
}
/*
* No buffers pending with upper layers;
* reallocte them for new MTU size
*/
/* free large buffer pool */
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
/* reallocate large buffer pool */
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
}
}
}
return (DDI_FAILURE);
}
/* if adapter is up successfully but was bad before */
}
}
/* Get current link state */
/* If driver detects a carrier on */
} else {
/* If driver detects a lack of carrier */
}
return (DDI_SUCCESS);
}
/*
* Stop currently running driver
* Driver needs to stop routing new packets to driver and wait until
*/
int
{
uint32_t i, j, k;
if (rc != DDI_SUCCESS) {
} else
rc = DDI_SUCCESS;
for (k = 0; k < qlge->rx_ring_count; k++) {
j = rx_ring->lbq_use_head;
#ifdef QLGE_LOAD_UNLOAD
" to free list %d\n total %d\n",
k, rx_ring->lbuf_in_use_count,
#endif
for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
j++;
j = 0;
}
}
}
rx_ring->lbq_use_head = j;
rx_ring->lbq_use_tail = j;
rx_ring->lbuf_in_use_count = 0;
j = rx_ring->sbq_use_head;
#ifdef QLGE_LOAD_UNLOAD
" to free list %d\n total %d \n",
k, rx_ring->sbuf_in_use_count,
#endif
for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
j++;
j = 0;
}
}
}
rx_ring->sbq_use_head = j;
rx_ring->sbq_use_tail = j;
rx_ring->sbuf_in_use_count = 0;
}
}
return (rc);
}
/*
* Support
*/
void
{
/*
* disable the hardware interrupt
*/
}
/*
* busy wait for 'usecs' microseconds.
*/
void
{
}
/*
* retrieve firmware details.
*/
{
}
/*
* Get current Link status
*/
static uint32_t
{
} else {
}
if (temp & bitToCheck) {
} else {
}
/* for Schultz, link Speed is fixed to 10G, full duplex */
}
return (linkState);
}
/*
* Get current link status and report to OS
*/
static void
{
/* Get current link state */
/* if link state has changed */
/* If driver detects a carrier on */
} else {
/* If driver detects a lack of carrier */
}
}
}
/*
* timer callback function executed after timer expires
*/
static void
{
}
/*
* stop the running timer if activated
*/
static void
{
/* Disable driver timer */
}
}
/*
* stop then restart timer
*/
void
{
}
/* ************************************************************************* */
/*
* Hardware K-Stats Data Structures and Subroutines
*/
/* ************************************************************************* */
/* PCI related hardware information */
{ 0, "Vendor Id" },
{ 1, "Device Id" },
{ 2, "Command" },
{ 3, "Status" },
{ 4, "Revision Id" },
{ 5, "Cache Line Size" },
{ 6, "Latency Timer" },
{ 7, "Header Type" },
{ 9, "I/O base addr" },
{ 10, "Control Reg Base addr low" },
{ 11, "Control Reg Base addr high" },
{ 12, "Doorbell Reg Base addr low" },
{ 13, "Doorbell Reg Base addr high" },
{ 14, "Subsystem Vendor Id" },
{ 15, "Subsystem Device ID" },
{ 16, "PCIe Device Control" },
{ 17, "PCIe Link Status" },
{ -1, NULL },
};
/*
* kstat update function for PCI registers
*/
static int
{
if (flag != KSTAT_READ)
return (EACCES);
return (0);
}
{ 0, "mtu"},
{ -1, NULL},
};
/*
* kstat update function for MII related information.
*/
static int
{
if (flag != KSTAT_READ)
return (EACCES);
return (0);
}
/* Register information */
{ 0, "System (0x08)" },
{ 2, "Function Specific Control(0x10)" },
{ 3, "Status (0x30)" },
{ 4, "Intr Enable (0x34)" },
{ 5, "Intr Status1 (0x3C)" },
{ 6, "Error Status (0x54)" },
{ 7, "XGMAC Flow Control(0x11C)" },
{ 8, "XGMAC Tx Pause Frames(0x230)" },
{ 9, "XGMAC Rx Pause Frames(0x388)" },
{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)" },
{ 11, "interrupts actually allocated" },
{ 12, "interrupts on rx ring 0" },
{ 13, "interrupts on rx ring 1" },
{ 14, "interrupts on rx ring 2" },
{ 15, "interrupts on rx ring 3" },
{ 16, "interrupts on rx ring 4" },
{ 17, "interrupts on rx ring 5" },
{ 18, "interrupts on rx ring 6" },
{ 19, "interrupts on rx ring 7" },
{ 20, "polls on rx ring 0" },
{ 21, "polls on rx ring 1" },
{ 22, "polls on rx ring 2" },
{ 23, "polls on rx ring 3" },
{ 24, "polls on rx ring 4" },
{ 25, "polls on rx ring 5" },
{ 26, "polls on rx ring 6" },
{ 27, "polls on rx ring 7" },
{ 28, "tx no resource on ring 0" },
{ 29, "tx dma bind fail on ring 0" },
{ 30, "tx dma no handle on ring 0" },
{ 31, "tx dma no cookie on ring 0" },
{ 32, "MPI firmware major version" },
{ 33, "MPI firmware minor version" },
{ 34, "MPI firmware sub version" },
{ 35, "rx no resource" },
{ -1, NULL},
};
/*
* kstat update function for device register set
*/
static int
{
int i = 0;
if (flag != KSTAT_READ)
return (EACCES);
return (0);
}
for (i = 0; i < 8; i++) {
}
for (i = 0; i < 8; i++) {
}
for (i = 0; i < qlge->rx_ring_count; i++) {
}
return (0);
}
static kstat_t *
{
char *np;
int type;
size /= sizeof (ql_ksindex_t);
return (NULL);
switch (*np) {
default:
break;
case '&':
np += 1;
break;
}
}
return (ksp);
}
/*
* Setup various kstat
*/
int
{
/* Hardware KStats */
sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
return (DDI_FAILURE);
}
/* MII KStats */
sizeof (ql_kstats_mii), ql_kstats_mii_update);
return (DDI_FAILURE);
}
/* REG KStats */
sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* delete all kstat
*/
void
{
int i;
for (i = 0; i < QL_KSTAT_COUNT; i++) {
}
}
/* ************************************************************************* */
/*
* kstat end
*/
/* ************************************************************************* */
/*
* Setup the parameters for receive and transmit rings including buffer sizes
* and completion queue sizes
*/
static int
{
uint8_t i;
lbq_buf_size = (uint16_t)
/*
* rx_ring[0] is always the default queue.
*/
/*
* qlge->rx_ring_count:
* Total number of rx_rings. This includes a number
* of outbound completion handler rx_rings, and a
* number of inbound completion handler rx_rings.
* rss is only enabled if we have more than 1 rx completion
* queue. If we have a single rx completion queue
* then all rx completions go to this queue and
* the last completion queue
*/
for (i = 0; i < qlge->tx_ring_count; i++) {
/*
* The completion queue ID for the tx rings start
* immediately after the last rss completion queue.
*/
}
for (i = 0; i < qlge->rx_ring_count; i++) {
if (i != 0)
else
if (i < qlge->rss_ring_count) {
/*
* Inbound completions (RSS) queues
* Default queue is queue 0 which handles
* Other inbound queues handle unicast frames only.
*/
("%s(%d)Allocating rss completion queue %d "
} else {
/*
* Outbound queue handles outbound completions only
*/
/* outbound cq is same size as tx_ring it services. */
rx_ring->lbq_buf_size = 0;
rx_ring->sbq_buf_size = 0;
("%s(%d)Allocating TX completion queue %d on"
}
}
return (DDI_SUCCESS);
}
static int
{
/* first shadow area is used by wqicb's host copy of consumer index */
+ sizeof (uint64_t);
+ sizeof (uint64_t);
int err = 0;
int page_entries;
/* Set up the shadow registers for this ring. */
/* PCI doorbell mem area + 0x00 for consumer index register */
*rx_ring->prod_idx_sh_reg = 0;
/* PCI doorbell mem area + 0x04 for valid register */
/* PCI doorbell mem area + 0x18 for large buffer consumer */
/* PCI doorbell mem area + 0x1c */
/*
* Set up the control block load flags.
*/
FLAGS_LV | /* Load MSI-X vector */
FLAGS_LI; /* Load irq delay values */
/* Load lbq values */
page_entries = 0;
do {
tmp += VM_PAGE_SIZE;
page_entries++;
} while (page_entries < (int)(
cqicb->lbq_addr_lo =
cqicb->lbq_addr_hi =
rx_ring->lbq_prod_idx = 0;
rx_ring->lbq_curr_idx = 0;
}
/* Load sbq values */
page_entries = 0;
do {
tmp += VM_PAGE_SIZE;
page_entries++;
} while (page_entries < (uint32_t)
cqicb->sbq_addr_lo =
cqicb->sbq_addr_hi =
rx_ring->sbq_prod_idx = 0;
rx_ring->sbq_curr_idx = 0;
}
case TX_Q:
break;
case DEFAULT_Q:
break;
case RX_Q:
/*
* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
break;
default:
}
/* QL_DUMP_CQICB(qlge, cqicb); */
if (err) {
return (err);
}
rx_ring->rx_failed_sbq_allocs = 0;
rx_ring->rx_failed_lbq_allocs = 0;
rx_ring->rx_packets = 0;
rx_ring->frame_too_long = 0;
rx_ring->frame_too_short = 0;
return (err);
}
/*
* start RSS
*/
static int
{
int status = 0;
int i;
RSS_RT6);
/*
* Fill out the Indirection Table.
*/
for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
if (status) {
return (status);
}
return (status);
}
/*
* load a tx ring control block to hw and start this ring
*/
static int
{
int err = 0;
/*
* Assign doorbell registers for this tx_ring.
*/
/* TX PCI doorbell mem area for tx producer index */
/* TX PCI doorbell mem area + 0x04 */
/*
* Assign shadow registers for this tx_ring.
*/
*tx_ring->cnsmr_idx_sh_reg = 0;
" phys_addr 0x%lx\n",
/* QL_DUMP_WQICB(qlge, wqicb); */
if (err) {
return (err);
}
return (err);
}
/*
* Set up a MAC, multicast or VLAN address for the
* inbound frame matching.
*/
int
{
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
case MAC_ADDR_TYPE_CAM_MAC: {
(addr[5]);
"MULTICAST" : "UNICAST"));
("addr %02x %02x %02x %02x %02x %02x at index %d in "
"the CAM.\n",
if (status)
goto exit;
/* offset 0 - lower 32 bits of the MAC address */
(offset++) |
type); /* type */
if (status)
goto exit;
/* offset 1 - upper 16 bits of the MAC address */
(offset++) |
type); /* type */
if (status)
goto exit;
/* offset 2 - CQ ID associated with this MAC address */
type); /* type */
/*
* This field should also include the queue id
* and possibly the function id. Right now we hardcode
* the route field to NIC core.
*/
if (type == MAC_ADDR_TYPE_CAM_MAC) {
(0 <<
/* route to NIC core */
}
break;
}
default:
"Address type %d not yet supported.", type);
}
exit:
return (status);
}
/*
* The NIC function for this chip has 16 routing indexes. Each one can be used
* to route different frame types to various inbound queues. We send broadcast
*/
static int
{
int status;
("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
((index ==
switch (mask) {
case RT_IDX_CAM_HIT:
RT_IDX_TYPE_NICQ | /* type */
break;
case RT_IDX_VALID: /* Promiscuous Mode frames. */
RT_IDX_TYPE_NICQ | /* type */
break;
case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
RT_IDX_TYPE_NICQ | /* type */
break;
case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
RT_IDX_TYPE_NICQ | /* type */
break;
case RT_IDX_MCAST: /* Pass up All Multicast frames. */
RT_IDX_TYPE_NICQ | /* type */
break;
case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
RT_IDX_TYPE_NICQ | /* type */
break;
case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
RT_IDX_TYPE_NICQ | /* type */
break;
case 0: /* Clear the E-bit on an entry. */
RT_IDX_TYPE_NICQ | /* type */
break;
default:
mask);
goto exit;
}
if (value != 0) {
if (status)
goto exit;
}
exit:
return (status);
}
/*
* Clear all the entries in the routing table.
* Caller must get semaphore in advance.
*/
static int
{
int status = 0;
int i;
/* Clear all the entries in the routing table. */
for (i = 0; i < 16; i++) {
if (status) {
}
}
return (status);
}
/* Initialize the frame-to-queue routing. */
int
{
int status = 0;
if (status != DDI_SUCCESS)
return (status);
/* Clear all the entries in the routing table. */
if (status) {
goto exit;
}
if (status) {
"Failed to init routing register for broadcast packets.");
goto exit;
}
/*
* If we have more than one inbound queue, then turn on RSS in the
* routing block.
*/
RT_IDX_RSS_MATCH, 1);
if (status) {
"Failed to init routing register for MATCH RSS "
"packets.");
goto exit;
}
}
RT_IDX_CAM_HIT, 1);
if (status) {
"Failed to init routing register for CAM packets.");
goto exit;
}
RT_IDX_MCAST_MATCH, 1);
if (status) {
"Failed to init routing register for Multicast "
"packets.");
}
exit:
return (status);
}
/*
* Initialize hardware
*/
static int
{
int i;
int status = 0;
/*
* Set up the System register to halt on errors.
*/
/* Set the default queue. */
/* Enable the MPI interrupt. */
| INTR_MASK_PI);
/* Enable the function, set pagesize, enable error checking. */
}
/*
* check current port max frame size, if different from OS setting,
* then we need to change
*/
if (status == DDI_SUCCESS) {
/* if current frame size is smaller than required size */
qlge->max_frame_size) {
("update frame size, current %d, new %d\n",
qlge->max_frame_size));
}
/* clear pause bits */
}
if (qlge->dcbx_enable)
}
/* if need to update port configuration */
if (update_port_config) {
(void) ql_set_mpi_port_config(qlge,
}
} else
/* Start up the rx queues. */
for (i = 0; i < qlge->rx_ring_count; i++) {
if (status) {
"Failed to start rx ring[%d]", i);
return (status);
}
}
/*
* If there is more than one inbound completion queue
* then download a RICB to configure RSS.
*/
if (status) {
return (status);
}
}
/* Start up the tx queues. */
for (i = 0; i < qlge->tx_ring_count; i++) {
if (status) {
"Failed to start tx ring[%d]", i);
return (status);
}
}
qlge->selected_tx_ring = 0;
/* Set the frame routing filter. */
if (status) {
return (status);
}
return (status);
}
/*
* Issue soft reset to chip.
*/
static int
{
BIT_RESET, 0) != DDI_SUCCESS) {
"TIMEOUT!!! errored out of resetting the chip!");
}
return (status);
}
/*
* If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
* its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
* to be used by hardware.
*/
static void
{
int i;
else {
/* Adjust to a multiple of 16 */
#ifdef QLGE_LOAD_UNLOAD
#endif
}
for (i = 0; i < arm_count; i++) {
break;
/* Arm asic */
sbq_entry++;
/* link the descriptors to in_use_list */
rx_ring->sbq_prod_idx++;
}
}
/*
* If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
* its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
* to be used by hardware.
*/
static void
{
int i;
else {
/* Adjust to a multiple of 16 */
#ifdef QLGE_LOAD_UNLOAD
#endif
}
for (i = 0; i < arm_count; i++) {
break;
/* Arm asic */
lbq_entry++;
/* link the descriptors to in_use_list */
rx_ring->lbq_prod_idx++;
}
}
/*
* Initializes the adapter by configuring request and response queues,
* allocates and ARMs small and large receive buffers to the
* hardware
*/
static int
{
int i;
goto err_bringup;
}
#ifdef QLGE_TRACK_BUFFER_USAGE
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
#endif
/* Arm buffers */
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
for (i = 0; i < qlge->tx_ring_count; i++) {
}
/* Enable completion queues */
for (i = 0; i < qlge->rx_ring_count; i++) {
}
for (i = 0; i < qlge->tx_ring_count; i++) {
}
for (i = 0; i < qlge->rx_ring_count; i++) {
}
/* This mutex will get re-acquired in enable_completion interrupt */
/* Traffic can start flowing now */
return (DDI_SUCCESS);
(void) ql_asic_reset(qlge);
return (DDI_FAILURE);
}
/*
*/
static int
{
int i;
for (i = 0; i < qlge->tx_ring_count; i++) {
}
for (i = 0; i < qlge->rx_ring_count; i++) {
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
/*
* Simply call pci_ereport_post which generates ereports for errors
* that occur in the PCI local bus configuration status registers.
*/
static int
{
return (err->fme_status);
}
static void
{
/*
* Register capabilities with IO Fault Services. The capabilities
* set above may not be supported by the parent nexus, in that case
* some capability bits may be cleared.
*/
if (qlge->fm_capabilities)
/*
* Initialize pci ereport capabilities if ereport capable
*/
}
/* Register error callback if error callback capable */
ql_fm_error_cb, (void*) qlge);
}
/*
* DDI_FLGERR_ACC indicates:
* Driver will check its access handle(s) for faults on
* a regular basis by calling ddi_fm_acc_err_get
* Driver is able to cope with incorrect results of I/O
* operations resulted from an I/O fault
*/
}
/*
* DDI_DMA_FLAGERR indicates:
* Driver will check its DMA handle(s) for faults on a
* regular basis using ddi_fm_dma_err_get
* Driver is able to cope with incorrect results of DMA
* operations resulted from an I/O fault
*/
}
}
static void
{
/* Only unregister FMA capabilities if we registered some */
if (qlge->fm_capabilities) {
/*
* Release any resources allocated by pci_ereport_setup()
*/
/*
* Un-register error callback if error callback capable
*/
/* Unregister from IO Fault Services */
}
}
/*
* ql_attach - Driver attach.
*/
static int
{
int instance;
int rval;
uint16_t w;
rval = DDI_FAILURE;
/* first get the instance */
switch (cmd) {
case DDI_ATTACH:
/*
* Allocate our per-device-instance structure
*/
/* Set up the coalescing parameters. */
qlge->ql_dbgprnt = 0;
#if QL_DEBUG
#endif /* QL_DEBUG */
/*
* Initialize for fma support
*/
/* fault management (fm) capabilities. */
if (data <= 0xf) {
}
/*
* Setup the ISP8x00 registers address mapping to be
* accessed by this particular driver.
* 0x0 Configuration Space
* 0x1 I/O Space
* 0x2 1st Memory Space address - Control Register Set
* 0x3 2nd Memory Space address - Doorbell Memory Space
*/
w = 2;
sizeof (dev_reg_t), &ql_dev_acc_attr,
break;
}
/* map Doorbell memory space */
w = 3;
if (ddi_regs_map_setup(dip, w,
0x100000 /* sizeof (dev_doorbell_reg_t) */,
"registers",
break;
}
/*
* Allocate a macinfo structure for this instance
*/
break;
}
/* save adapter status to dip private data */
ADAPTER_NAME, instance));
/*
* Attach this instance of the device
*/
/* Setup PCI Local Bus Configuration resource. */
}
break;
}
instance));
}
break;
}
instance));
/* Setup interrupt vectors */
break;
}
instance));
/* Configure queues */
break;
}
instance));
/*
* Allocate memory resources
*/
break;
}
instance));
/*
* Map queues to interrupt vectors
*/
/* Initialize mutex, need the interrupt priority */
(void) ql_init_rx_tx_locks(qlge);
ADAPTER_NAME, instance));
/*
* Use a soft interrupt to do something that we do not want
* to do in regular network functions or with mutexs being held
*/
!= DDI_SUCCESS) {
break;
}
!= DDI_SUCCESS) {
break;
}
!= DDI_SUCCESS) {
break;
}
ADAPTER_NAME, instance));
/*
* mutex to protect the adapter state structure.
* initialize mutexes according to the interrupt priority
*/
/* Mailbox wait and interrupt conditional variable. */
ADAPTER_NAME, instance));
/*
* KStats
*/
break;
}
ADAPTER_NAME, instance));
/*
* Initialize gld macinfo structure
*/
/*
* Add interrupt handlers
*/
"handlers");
break;
}
ADAPTER_NAME, instance));
/*
* MAC Register
*/
break;
}
ADAPTER_NAME, instance));
rval = DDI_SUCCESS;
break;
/*
* DDI_RESUME
* When called with cmd set to DDI_RESUME, attach() must
* restore the hardware state of a device (power may have been
* removed from the device), allow pending requests to con-
* tinue, and service new requests. In this case, the driver
* must not make any assumptions about the state of the
* hardware, but must restore the state of the device except
* for the power level of components.
*
*/
case DDI_RESUME:
return (DDI_FAILURE);
break;
default:
break;
}
/* if failed to attach */
}
return (rval);
}
/*
* Unbind all pending tx dma handles during driver bring down
*/
static void
{
int i, j;
for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
if (tx_ring_desc->tx_dma_handle[j]) {
(void) ddi_dma_unbind_handle(
tx_ring_desc->tx_dma_handle[j]);
}
}
} /* end of for loop */
}
}
/*
* Wait for all the packets sent to the chip to finish transmission
* to prevent buffers to be unmapped before or during a transmit operation
*/
static int
{
int rings_done;
int done = 0;
while (!done) {
rings_done = 0;
for (i = 0; i < qlge->tx_ring_count; i++) {
if (qlge->isr_stride) {
(void) ql_clean_outbound_rx_ring(
ob_ring);
}
}
/*
* Get the pending iocb count, ones which have not been
* pulled down by the chip
*/
if (producer_idx >= consumer_idx)
else
rings_done++;
else {
done = 1;
break;
}
}
/* If all the rings are done */
#ifdef QLGE_LOAD_UNLOAD
#endif
rval = DDI_SUCCESS;
break;
}
qlge_delay(100);
count--;
if (!count) {
#ifdef QLGE_LOAD_UNLOAD
" Transmits on queue %d to complete .\n",
i);
" Producer %d, Consumer %d\n",
i+1,
" Producer %d, Consumer %d\n",
#endif
/* For now move on */
break;
}
}
/* Stop the request queue */
for (i = 0; i < qlge->tx_ring_count; i++) {
}
}
return (rval);
}
/*
* Wait for all the receives indicated to the stack to come back
*/
static int
{
int i;
/* Disable all the completion queues */
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
/* Wait for OS to return all rx buffers */
return (DDI_SUCCESS);
}
/*
* stop the driver
*/
static int
{
int i;
/* stop forwarding external packets to driver */
if (status)
return (status);
(void) ql_stop_routing(qlge);
/*
* Set the flag for receive and transmit
* operations to cease
*/
for (i = 0; i < qlge->tx_ring_count; i++) {
}
for (i = 0; i < qlge->rx_ring_count; i++) {
}
/*
* Need interrupts to be running while the transmit
* completions are cleared. Wait for the packets
* queued to the chip to be sent out
*/
(void) ql_wait_tx_quiesce(qlge);
/* Interrupts not needed from now */
/* Disable Global interrupt */
/* Wait for all the indicated packets to come back */
/* Reset adapter */
(void) ql_asic_reset(qlge);
/*
* Unbind all tx dma handles to prevent pending tx descriptors'
* dma handles from being re-used.
*/
for (i = 0; i < qlge->tx_ring_count; i++) {
}
}
return (status);
}
/*
* ql_detach
* Used to remove all the states associated with a given
* instances of a device node prior to the removal of that
* instance from the system.
*/
static int
{
int rval;
rval = DDI_SUCCESS;
switch (cmd) {
case DDI_DETACH:
return (DDI_FAILURE);
if (rval != DDI_SUCCESS)
break;
/* free memory resources */
}
break;
case DDI_SUSPEND:
return (DDI_FAILURE);
(void) ql_do_stop(qlge);
}
break;
default:
rval = DDI_FAILURE;
break;
}
return (rval);
}
/*
* quiesce(9E) entry point.
*
* This function is called when the system is single-threaded at high
* PIL with preemption disabled. Therefore, this function must not be
* blocked.
*
* This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
*/
int
{
int i;
return (DDI_FAILURE);
/* stop forwarding external packets to driver */
(void) ql_stop_routing(qlge);
/* Stop all the request queues */
for (i = 0; i < qlge->tx_ring_count; i++) {
}
}
/* Interrupts not needed from now */
/* Disable MPI interrupt */
(INTR_MASK_PI << 16));
/* Disable all the rx completion queues */
for (i = 0; i < qlge->rx_ring_count; i++) {
}
}
/* Reset adapter */
(void) ql_asic_reset(qlge);
qlge_delay(100);
}
return (DDI_SUCCESS);
}
/*
* Loadable Driver Interface Structures.
* Declare and initialize the module configuration section...
*/
&mod_driverops, /* type of module: driver */
version, /* name of module */
&ql_ops /* driver dev_ops */
};
};
/*
* Loadable Module Routines
*/
/*
* _init
* Initializes a loadable module. It is called before any other
* routine in a loadable module.
*/
int
_init(void)
{
int rval;
if (rval != DDI_SUCCESS) {
}
return (rval);
}
/*
* _fini
* Prepares a module for unloading. It is called when the system
* wants to unload a module. If the module determines that it can
* be unloaded, then _fini() returns the value returned by
* mod_remove(). Upon successful return from _fini() no other
* routine in the module will be called before _init() is called.
*/
int
_fini(void)
{
int rval;
if (rval == DDI_SUCCESS) {
}
return (rval);
}
/*
* _info
* Returns information about loadable module.
*/
int
{
}