nxge_rxdma.c revision 0a8e077a7aaa5fa8697989cd69445512a997fc66
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
*
*/
extern uint32_t nxge_rbr_size;
extern uint32_t nxge_rcr_size;
extern uint32_t nxge_rbr_spare_size;
extern uint32_t nxge_mblks_pending;
/*
* Tunable to reduce the amount of time spent in the
* ISR doing Rx Processing.
*/
extern uint32_t nxge_max_rx_pkts;
/*
* Tunables to manage the receive buffer blocks.
*
* nxge_rx_threshold_hi: copy all buffers.
* nxge_rx_bcopy_size_type: receive buffer block size type.
* nxge_rx_threshold_lo: copy only up to tunable block size type.
*/
static void nxge_unmap_rxdma(p_nxge_t);
static void nxge_rxdma_hw_stop_common(p_nxge_t);
static void nxge_rxdma_hw_stop(p_nxge_t);
p_rx_mbox_t *);
p_rx_rcr_ring_t *, p_rx_mbox_t *);
static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
p_rx_rbr_ring_t *, uint32_t);
static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
mblk_t *
static void nxge_receive_packet(p_nxge_t,
boolean_t *,
static void nxge_freeb(p_rx_msg_t);
static nxge_status_t
static uint16_t
{
"<== nxge_init_rxdma: status 0x%x", status));
return (status);
}
}
}
"<== nxge_init_rxdma_channels: status 0x%x", status));
return (status);
}
void
{
"<== nxge_uinit_rxdma_channels"));
}
{
if (rs != NPI_SUCCESS) {
}
return (status);
}
void
{
int i, ndmas;
(void) npi_rxdma_dump_fzc_regs(handle);
if (rx_rbr_rings == NULL) {
"<== nxge_rxdma_regs_dump_channels: "
"NULL ring pointer"));
return;
}
"<== nxge_rxdma_regs_dump_channels: "
" NULL rbr rings pointer"));
return;
}
if (!ndmas) {
"<== nxge_rxdma_regs_dump_channels: no channel"));
return;
}
"==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
continue;
}
}
}
{
if (rs != NPI_SUCCESS) {
}
return (status);
}
{
"<== nxge_init_rxdma_channel_event_mask"));
if (rs != NPI_SUCCESS) {
}
return (status);
}
{
"<== nxge_init_rxdma_channel_cntl_stat"));
if (rs != NPI_SUCCESS) {
}
return (status);
}
{
" ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
" <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
return (NXGE_OK);
}
{
" ==> nxge_rxdma_cfg_port_default_rdc"));
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
" <== nxge_rxdma_cfg_port_default_rdc"));
return (NXGE_OK);
}
{
" ==> nxge_rxdma_cfg_rcr_threshold"));
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
return (NXGE_OK);
}
{
if (enable == 0) {
} else {
tout);
}
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
return (NXGE_OK);
}
{
/*
* Use configuration data composed at init time.
* Write to hardware the receive ring configurations.
*/
"==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
switch (nxgep->rx_bksize_code) {
case RBR_BKSIZE_4K:
break;
case RBR_BKSIZE_8K:
break;
case RBR_BKSIZE_16K:
break;
case RBR_BKSIZE_32K:
break;
}
"rbr_len qlen %d pagesize code %d rcr_len %d",
"size 0 %d size 1 %d size 2 %d",
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
/*
* Enable the timeout and threshold.
*/
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
/* Enable the DMA */
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
/* Kick the DMA engine. */
/* Clear the rbr empty bit */
return (NXGE_OK);
}
{
/* disable the DMA */
if (rs != NPI_SUCCESS) {
"<== nxge_disable_rxdma_channel:failed (0x%x)",
rs));
return (NXGE_ERROR | rs);
}
return (NXGE_OK);
}
{
"<== nxge_init_rxdma_channel_rcrflush"));
"<== nxge_init_rxdma_channel_rcrflsh"));
return (status);
}
#define TO_LEFT -1
#define TO_RIGHT 1
#define NO_HINT 0xffffffff
/*ARGSUSED*/
{
int bufsize;
int found, search_done;
int max_iterations, iteration;
"==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
switch (pktbufsz_type) {
case 0:
break;
case 1:
break;
case 2:
break;
case RCR_SINGLE_BLOCK:
bufsize = 0;
anchor_index = 0;
break;
default:
return (NXGE_ERROR);
}
anchor_index = 0;
"==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
"buf_pp $%p btype %d anchor_index %d "
"bufinfo $%p",
bufinfo));
goto found_index;
}
"==> nxge_rxbuf_pp_to_vp: "
"buf_pp $%p btype %d anchor_index %d",
anchor_index));
iteration = 0;
/*
* First check if this block has been seen
* recently. This is indicated by a hint which
* is initialized when the first buffer of the block
* is seen. The hint is reset when the last buffer of
* the block has been processed.
* As three block sizes are supported, three hints
* are kept. The idea behind the hints is that once
* the hardware uses a block for a buffer of that
* size, it will use it exclusively for that size
* and will use it until it is exhausted. It is assumed
* that there would a single block being used for the same
* buffer sizes at any given time.
*/
/*
* check if this is the last buffer in the block
* If so, then reset the hint for the size;
*/
}
}
"==> nxge_rxbuf_pp_to_vp: (!found)"
"buf_pp $%p btype %d anchor_index %d",
anchor_index));
/*
* This is the first buffer of the block of this
* size. Need to search the whole information
* array.
* the search algorithm uses a binary tree search
* algorithm. It assumes that the information is
* already sorted with increasing order
* info[0] < info[1] < info[2] .... < info[n-1]
* where n is the size of the information array
*/
l_index = 0;
while (search_done == B_FALSE) {
(iteration >= max_iterations))
/* read the DVMA address information and sort it */
"==> nxge_rxbuf_pp_to_vp: (searching)"
"buf_pp $%p btype %d "
"anchor_index %d chunk_size %d dvmaaddr $%p",
dvma_addr));
case IN_MIDDLE:
/* found */
(dvma_addr + chunk_size))
break;
case BOTH_RIGHT:
/* not found: go to the right */
break;
case BOTH_LEFT:
/* not found: go to the left */
l_index);
break;
default: /* should not come here */
return (NXGE_ERROR);
}
iteration++;
}
"==> nxge_rxbuf_pp_to_vp: (search done)"
"buf_pp $%p btype %d anchor_index %d",
anchor_index));
}
"==> nxge_rxbuf_pp_to_vp: (search failed)"
"buf_pp $%p btype %d anchor_index %d",
anchor_index));
return (NXGE_ERROR);
}
"==> nxge_rxbuf_pp_to_vp: (FOUND1)"
"buf_pp $%p btype %d bufsize %d anchor_index %d",
anchor_index));
/* index of the first block in this chunk */
"==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
"buf_pp $%p btype %d bufsize %d "
"anchor_index %d chunk_index %d dvma $%p",
dvma_addr));
"==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
"buf_pp $%p btype %d bufsize %d "
"anchor_index %d chunk_index %d dvma $%p "
"offset %d block_size %d",
block_size));
"==> nxge_rxbuf_pp_to_vp: "
"total_index %d dvma_addr $%p "
"offset %d block_size %d "
"block_index %d ",
block_index));
+ offset);
"==> nxge_rxbuf_pp_to_vp: "
"total_index %d dvma_addr $%p "
"offset %d block_size %d "
"block_index %d "
"*pkt_buf_addr_p $%p",
*pkt_buf_addr_p));
*msg_index = total_index;
"==> nxge_rxbuf_pp_to_vp: get msg index: "
"msg_index %d bufoffset_index %d",
*bufoffset));
return (NXGE_OK);
}
/*
* used by quick sort (qsort) function
* to perform comparison
*/
static int
{
rxbuf_index_info_t *a, *b;
a = (rxbuf_index_info_t *)p1;
b = (rxbuf_index_info_t *)p2;
return (1);
return (-1);
return (0);
}
/*
*
*/
/*
* Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
* n = # objs in the array
* s = size of each obj (must be multiples of a word size)
* f = ptr to function to compare two objs
* returns (-1 = less than, 0 = equal, 1 = greater than
*/
void
nxge_ksort(caddr_t v, int n, int s, int (*f)())
{
int g, i, j, ii;
unsigned int tmp;
/* No work to do */
if (v == NULL || n <= 1)
return;
/* Sanity check on arguments */
ASSERT(s > 0);
for (g = n / 2; g > 0; g /= 2) {
for (i = g; i < n; i++) {
for (j = i - g; j >= 0 &&
(*f)(v + j * s, v + (j + g) * s) == 1;
j -= g) {
p1 = (unsigned *)(v + j * s);
p2 = (unsigned *)(v + (j + g) * s);
}
}
}
}
}
/*
* Initialize data structures required for rxdma
* buffer dvma->vmem address lookup
*/
/*ARGSUSED*/
static nxge_status_t
{
int index;
int max_iteration = 0, max_index = 0;
/* read the DVMA address information and sort it */
/* do init of the information array */
" nxge_rxbuf_index_info_init Sort ptrs"));
/* sort the array */
sizeof (rxbuf_index_info_t), nxge_sort_compare);
" nxge_rxbuf_index_info_init: sorted chunk %d "
" ioaddr $%p kaddr $%p size %x",
}
max_iteration = 0;
" nxge_rxbuf_index_info_init Find max iter %d",
return (NXGE_OK);
}
/* ARGSUSED */
void
{
#ifdef NXGE_DEBUG
"\trcr entry $%p "
"\trcr entry 0x%0llx "
"\trcr entry 0x%08x "
"\trcr entry 0x%08x "
"\tvalue 0x%0llx\n"
"\tmulti = %d\n"
"\tpkt_type = 0x%x\n"
"\tzero_copy = %d\n"
"\tnoport = %d\n"
"\tpromis = %d\n"
"\terror = 0x%04x\n"
"\tdcf_err = 0x%01x\n"
"\tl2_len = %d\n"
"\tpktbufsize = %d\n"
"\tpkt_buf_addr = $%p\n"
"\tpkt_buf_addr (<< 6) = $%p\n",
bptr,
#endif
}
void
{
"==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
/* RBR head */
printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
/* RBR stats */
/* RCR tail */
printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
/* RCR qlen */
"<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
}
void
{
(void) nxge_rx_mac_disable(nxgep);
}
void
{
(void) nxge_rxdma_stop(nxgep);
(void) nxge_uninit_rxdma_channels(nxgep);
(void) nxge_init_rxdma_channels(nxgep);
#ifndef AXIS_DEBUG_LB
(void) nxge_xcvr_init(nxgep);
#endif
(void) nxge_rx_mac_enable(nxgep);
}
{
int i, ndmas;
"==> nxge_rxdma_hw_mode: mode %d", enable));
"<== nxge_rxdma_mode: not initialized"));
return (NXGE_ERROR);
}
if (rx_rbr_rings == NULL) {
"<== nxge_rxdma_mode: NULL ring pointer"));
return (NXGE_ERROR);
}
"<== nxge_rxdma_mode: NULL rbr rings pointer"));
return (NXGE_ERROR);
}
if (!ndmas) {
"<== nxge_rxdma_mode: no channel"));
return (NXGE_ERROR);
}
"==> nxge_rxdma_mode (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
continue;
}
if (enable) {
"==> nxge_rxdma_hw_mode: channel %d (enable)",
channel));
} else {
"==> nxge_rxdma_hw_mode: channel %d (disable)",
channel));
}
}
"<== nxge_rxdma_hw_mode: status 0x%x", status));
return (status);
}
void
{
"==> nxge_rxdma_enable_channel: channel %d", channel));
}
void
{
"==> nxge_rxdma_disable_channel: channel %d", channel));
}
void
{
(void) nxge_rx_mac_enable(nxgep);
}
/*ARGSUSED*/
void
{
int i, ndmas;
if (rx_rbr_rings == NULL) {
"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
return;
}
if (!ndmas) {
"<== nxge_fixup_rxdma_rings: no channel"));
return;
}
if (rx_rcr_rings == NULL) {
"<== nxge_fixup_rxdma_rings: NULL ring pointer"));
return;
}
"==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> nxge_fixup_rxdma_rings: channel %d "
}
}
void
{
int i;
if (i < 0) {
"<== nxge_rxdma_fix_channel: no entry found"));
return;
}
}
void
{
int ndmas;
"<== nxge_rxdma_fixup_channel: buf not allocated"));
return;
}
if (!ndmas) {
"<== nxge_rxdma_fixup_channel: no dma allocated"));
return;
}
/* Reinitialize the receive block and completion rings */
rbrp->rbr_rd_index = 0;
rcrp->comp_rd_index = 0;
rcrp->comp_wt_index = 0;
}
}
"==> nxge_rxdma_fixup_channel: failed (0x%08x)", status));
}
int
{
int i, ndmas;
"==> nxge_rxdma_get_ring_index: channel %d", channel));
if (rx_rbr_rings == NULL) {
"<== nxge_rxdma_get_ring_index: NULL ring pointer"));
return (-1);
}
if (!ndmas) {
"<== nxge_rxdma_get_ring_index: no channel"));
return (-1);
}
"==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> nxge_rxdma_get_rbr_ring: "
"channel %d (index %d) "
"ring %d", channel, i,
rbr_rings[i]));
return (i);
}
}
"<== nxge_rxdma_get_rbr_ring_index: not found"));
return (-1);
}
{
int i, ndmas;
"==> nxge_rxdma_get_rbr_ring: channel %d", channel));
if (rx_rbr_rings == NULL) {
"<== nxge_rxdma_get_rbr_ring: NULL ring pointer"));
return (NULL);
}
if (!ndmas) {
"<== nxge_rxdma_get_rbr_ring: no channel"));
return (NULL);
}
"==> nxge_rxdma_get_ring (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> nxge_rxdma_get_rbr_ring: channel %d "
return (rbr_rings[i]);
}
}
"<== nxge_rxdma_get_rbr_ring: not found"));
return (NULL);
}
{
int i, ndmas;
"==> nxge_rxdma_get_rcr_ring: channel %d", channel));
if (rx_rcr_rings == NULL) {
"<== nxge_rxdma_get_rcr_ring: NULL ring pointer"));
return (NULL);
}
if (!ndmas) {
"<== nxge_rxdma_get_rcr_ring: no channel"));
return (NULL);
}
"==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> nxge_rxdma_get_rcr_ring: channel %d "
return (rcr_rings[i]);
}
}
"<== nxge_rxdma_get_rcr_ring: not found"));
return (NULL);
}
/*
* Static functions start here.
*/
static p_rx_msg_t
{
"Allocation of a rx msg failed."));
goto nxge_allocb_exit;
}
if (dmabuf_p) {
} else {
"Allocation of a receive page failed."));
goto nxge_allocb_fail1;
}
}
goto nxge_allocb_fail2;
}
goto nxge_allocb_exit;
if (!nxge_mp->use_buf_pool) {
}
return (nxge_mp);
}
{
"offset = 0x%08X "
"size = 0x%08X",
goto nxge_dupb_exit;
}
nxge_mp));
return (mp);
}
{
goto nxge_dupb_bcopy_exit;
}
nxge_mp));
return (mp);
}
void
{
/* Reuse this buffer */
rx_msg_p->cur_usage_cnt = 0;
rx_msg_p->max_usage_cnt = 0;
rx_msg_p->pkt_buf_size = 0;
if (rx_rbr_p->rbr_use_bcopy) {
}
/*
* Get the rbr header pointer and its offset index.
*/
"<== nxge_post_page (channel %d post_next_index %d)",
}
void
{
int ref_cnt;
"nxge_freeb:rx_msg_p = $%p (block pending %d)",
if (!ref_cnt) {
"will free: rx_msg_p = $%p (block pending %d)",
(long long)rx_msg_p, nxge_mblks_pending));
if (!rx_msg_p->use_buf_pool) {
}
return;
}
/*
* Repost buffer.
*/
"nxge_freeb: post page $%p:", rx_msg_p));
rx_msg_p);
}
}
{
#ifdef NXGE_DEBUG
#endif
"<== nxge_rx_intr: arg2 $%p arg1 $%p",
return (DDI_INTR_CLAIMED);
}
}
"==> nxge_rx_intr: arg2 $%p arg1 $%p",
/*
* This interrupt handler is for a specific
* receive dma channel.
*/
/*
* Get the control and status for this channel.
*/
"cs 0x%016llx rcrto 0x%x rcrthres %x",
/* error events. */
}
/*
* Enable the mailbox update interrupt if we want
* to use mailbox. We probably don't need to use
* mailbox as it only saves us one pio read.
* Also write 1 to rcrthres and rcrto to clear
* these two edge triggered bits.
*/
/*
* Rearm this logical group if this is a single device
* group.
*/
}
serviced));
return (serviced);
}
/*
* Process the packets received in the specified logical device
* and pass up a chain of message blocks to the upper layer.
*/
static void
{
"<== nxge_rx_pkts_vring: no mp"));
return;
}
mp));
#ifdef NXGE_DEBUG
"==> nxge_rx_pkts_vring:calling mac_rx "
"LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p "
"mac_handle $%p",
"==> nxge_rx_pkts_vring: dump packets "
"(mp $%p b_rptr $%p b_wptr $%p):\n %s",
mp,
"==> nxge_rx_pkts_vring: dump b_cont packets "
"(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
}
"==> nxge_rx_pkts_vring: dump next packets "
"(b_rptr $%p): %s",
}
#endif
}
/*
* This routine is the main packet receive processing function.
* It gets the packet type, error code, and buffer related
* information from the receive completion entry.
* How many completion entries to process is based on the number of packets
* queued by the hardware, a hardware maintained tail pointer
* and a configurable receive packet count.
*
* A chain of message blocks will be created as result of processing
* the completion entries. This chain of message blocks will be returned and
* a hardware control status register will be updated with the number of
* packets were removed from the hardware queue.
*
*/
mblk_t *
{
#if defined(_BIG_ENDIAN)
#endif
return (NULL);
}
"channel %d, and rcr channel %d not matched.",
return (NULL);
}
"==> nxge_rx_pkts: START: rcr channel %d "
"head_p $%p head_pp $%p index %d ",
rcr_p->comp_rd_index));
#if !defined(_BIG_ENDIAN)
#else
if (rs != NPI_SUCCESS) {
"channel %d, get qlen failed 0x%08x",
return (NULL);
}
#endif
if (!qlen) {
"==> nxge_rx_pkts:rcr channel %d "
return (NULL);
}
/*
* Number of packets queued
* (The jumbo or multi packet will be counted as only one
* packets and it may take up more than one completion entry).
*/
while (qlen_hw) {
#ifdef NXGE_DEBUG
#endif
/*
* Process one completion ring entry.
*/
/*
* message chaining modes
*/
if (nmp) {
}
}
"==> nxge_rx_pkts: loop: rcr channel %d "
"before updating: multi %d "
"nrcr_read %d "
"npk read %d "
"head_pp $%p index %d ",
if (!multi) {
qlen_hw--;
npkt_read++;
}
/*
* Update the next read entry.
*/
nrcr_read++;
"<== nxge_rx_pkts: (SAM, process one packet) "
"nrcr_read %d",
nrcr_read));
"==> nxge_rx_pkts: loop: rcr channel %d "
"multi %d "
"nrcr_read %d "
"npk read %d "
"head_pp $%p index %d ",
}
if (rcr_p->intr_timeout)
}
"==> nxge_rx_pkts: EXIT: rcr channel %d "
"head_pp $%p index %016llx ",
rcr_p->comp_rd_index));
/*
* Update RCR buffer pointer read and number of packets
* read.
*/
return (head_mp);
}
void
{
#ifdef NXGE_DEBUG
int dump_len;
#endif
"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
"error_type 0x%x pkt_type 0x%x "
"pktbufsz_type %d ",
"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
"error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
pkt_type));
"==> (rbr) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
/* get the stats ptr */
if (!l2_len) {
"<== nxge_receive_packet: failed: l2 length is 0."));
return;
}
/* shift 6 bits to get the full io address */
"==> (rbr) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
if (first_entry) {
"==> nxge_receive_packet: first entry 0x%016llx "
"pkt_buf_addr_pp $%p l2_len %d hdr %d",
hdr_size));
}
"==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
/*
* Packet buffer address in the completion entry points
* to the starting buffer address (offset 0).
* Use the starting buffer address to locate the corresponding
* kernel address.
*/
&msg_index);
"==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"<== nxge_receive_packet: found vaddr failed %d",
status));
return;
}
"==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
switch (pktbufsz_type) {
case RCR_PKTBUFSZ_0:
"==> nxge_receive_packet: 0 buf %d", bsize));
break;
case RCR_PKTBUFSZ_1:
"==> nxge_receive_packet: 1 buf %d", bsize));
break;
case RCR_PKTBUFSZ_2:
"==> nxge_receive_packet: 2 buf %d", bsize));
break;
case RCR_SINGLE_BLOCK:
"==> nxge_receive_packet: single %d", bsize));
break;
default:
return;
}
"==> nxge_receive_packet: after first dump:usage count"));
if (rx_msg_p->cur_usage_cnt == 0) {
if (rx_rbr_p->rbr_use_bcopy) {
if (rx_rbr_p->rbr_consumed <
if (rx_rbr_p->rbr_threshold_lo == 0 ||
((rx_rbr_p->rbr_consumed >=
rx_rbr_p->rbr_threshold_lo) &&
(rx_rbr_p->rbr_bufsize_type >=
pktbufsz_type))) {
}
} else {
}
}
"==> nxge_receive_packet: buf %d (new block) ",
bsize));
if (pktbufsz_type == RCR_SINGLE_BLOCK) {
"==> nxge_receive_packet: buf %d "
"(single block) ",
bsize));
/*
* Buffer can be reused once the free function
* is called.
*/
} else {
}
}
} else {
}
}
"msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
if ((error_type) || (dcf_err)) {
if (dcf_err) {
#ifdef NXGE_DEBUG
"nxge_receive_packet: channel %d dcf_err rcr"
}
#endif
} else {
/* Update error stats */
switch (error_type) {
case RCR_L2_ERROR:
" nxge_receive_packet:"
" channel %d RCR L2_ERROR",
channel));
break;
case RCR_L4_CSUM_ERROR:
if (rdc_stats->l4_cksum_err <
" nxge_receive_packet:"
" channel %d"
" RCR L4_CSUM_ERROR",
channel));
break;
case RCR_FFLP_SOFT_ERROR:
if (rdc_stats->fflp_soft_err <
" nxge_receive_packet:"
" channel %d"
" RCR FFLP_SOFT_ERROR",
channel));
break;
case RCR_ZCP_SOFT_ERROR:
if (rdc_stats->zcp_soft_err <
" nxge_receive_packet:"
" Channel %d"
" RCR ZCP_SOFT_ERROR",
channel));
break;
default:
" nxge_receive_packet:"
" Channel %d"
" RCR entry 0x%llx"
" error 0x%x",
error_type));
break;
}
}
/*
* Update and repost buffer block if max usage
* count is reached.
*/
if (error_send_up == B_FALSE) {
if (buffer_free == B_TRUE) {
}
return;
}
}
"==> nxge_receive_packet: DMA sync second "));
if (!rx_msg_p->rx_use_bcopy) {
} else {
"==> nxge_receive_packet: use bcopy "
"rbr consumed %d "
"pktbufsz_type %d "
"offset %d "
"hdr_size %d l2_len %d "
"nmp->b_rptr $%p",
}
if (!rx_msg_p->rx_use_bcopy) {
if (first_entry) {
bytes_read = 0;
- skip_len];
else
} else {
else
}
"==> nxge_receive_packet after dupb: "
"rbr consumed %d "
"pktbufsz_type %d "
"nmp $%p rptr $%p wptr $%p "
"buf_offset %d bzise %d l2_len %d skip_len %d",
}
} else {
"update stats (error)");
}
if (buffer_free == B_TRUE) {
}
/*
* ERROR, FRAG and PKT_TYPE are only reported
* in the first entry.
* If a packet is not fragmented and no error bit is set, then
* L4 checksum is OK.
*/
}
if (is_valid) {
if (first_entry) {
} else
}
/*
* Update stats and hardware checksuming.
*/
pkt_type == RCR_PKT_IS_UDP) ?
"is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
"is_valid 0x%x multi 0x%llx pkt %d frag %d "
"error %d",
}
}
"==> nxge_receive_packet: *mp 0x%016llx", *mp));
"multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
}
/*ARGSUSED*/
static nxge_status_t
{
"==> nxge_rx_err_evnts: rx_rbr_timeout"));
}
rdc_stats->rsp_cnt_err++;
"==> nxge_rx_err_evnts(channel %d): "
"rsp_cnt_err", channel));
}
rdc_stats->byte_en_bus++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: byte_en_bus", channel));
}
rdc_stats->rsp_dat_err++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rsp_dat_err", channel));
}
rdc_stats->rcr_ack_err++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rcr_ack_err", channel));
}
rdc_stats->dc_fifo_err++;
/* This is not a fatal error! */
"==> nxge_rx_err_evnts(channel %d): "
"dc_fifo_err", channel));
}
!= NPI_SUCCESS) {
"==> nxge_rx_err_evnts(channel %d): "
"rcr_sha_par: get perr", channel));
return (NXGE_ERROR | rs);
}
rdc_stats->rcr_sha_par++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rcr_sha_par", channel));
}
rdc_stats->rbr_pre_par++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rbr_pre_par", channel));
}
}
"==> nxge_rx_err_evnts (channel %d): "
"port_drop_pkt", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"wred_drop", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"rbr_pre_empty", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"rcr_shadow_full", channel));
}
rdc_stats->config_err++;
"==> nxge_rx_err_evnts(channel %d): "
"config error", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rcrincon error", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rcrfull error", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"rbr empty error", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rbr_full error", channel));
}
rdc_stats->rbrlogpage++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rbr logical page error", channel));
}
rdc_stats->cfiglogpage++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: cfig logical page error", channel));
}
if (rxport_fatal) {
" nxge_rx_err_evnts: "
" fatal error on Port #%d\n",
portn));
}
}
if (rxchan_fatal) {
" nxge_rx_err_evnts: "
" fatal error on Channel #%d\n",
channel));
}
}
return (status);
}
static nxge_status_t
{
int i, ndmas;
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
#endif
"<== nxge_map_rxdma: buf not allocated"));
return (NXGE_ERROR);
}
if (!ndmas) {
"<== nxge_map_rxdma: no dma allocated"));
return (NXGE_ERROR);
}
rbr_rings = (p_rx_rbr_ring_t *)
rcr_rings = (p_rx_rcr_ring_t *)
rx_mbox_p = (p_rx_mbox_t *)
/*
* Timeout should be set based on the system clock divider.
* The following timeout value of 1 assumes that the
* granularity (1000) is 3 microseconds running at 300MHz.
*/
/*
* Map descriptors from the buffer polls for each dam channel.
*/
for (i = 0; i < ndmas; i++) {
/*
* Set up and prepare buffer blocks, descriptors
* and mailbox.
*/
(p_nxge_dma_common_t *)&dma_buf_p[i],
(p_rx_rbr_ring_t *)&rbr_rings[i],
num_chunks[i],
(p_nxge_dma_common_t *)&dma_cntl_p[i],
(p_rx_rcr_ring_t *)&rcr_rings[i],
(p_rx_mbox_t *)&rx_mbox_p[i]);
goto nxge_map_rxdma_fail1;
}
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
"==> nxge_map_rxdma_channel: "
"channel %d "
"data buf base io $%p ($%p) "
"size 0x%llx (%d 0x%x)",
"==> nxge_map_rxdma_channel: "
"channel %d "
"cntl base io $%p ($%p) "
"size 0x%llx (%d 0x%x)",
}
#endif /* sun4v and NIU_LP_WORKAROUND */
}
goto nxge_map_rxdma_exit;
"==> nxge_map_rxdma: unmap rbr,rcr "
"(status 0x%x channel %d i %d)",
for (; i >= 0; i--) {
rbr_rings[i],
rcr_rings[i],
rx_mbox_p[i]);
}
"<== nxge_map_rxdma: "
"(status 0x%x channel %d)",
return (status);
}
static void
{
int i, ndmas;
"<== nxge_unmap_rxdma: NULL buf pointers"));
return;
}
"<== nxge_unmap_rxdma: NULL ring pointers"));
return;
}
if (!ndmas) {
"<== nxge_unmap_rxdma: no channel"));
return;
}
"==> nxge_unmap_rxdma (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> nxge_unmap_rxdma (ndmas %d) channel %d",
(p_rx_rbr_ring_t)rbr_rings[i],
(p_rx_rcr_ring_t)rcr_rings[i],
(p_rx_mbox_t)rx_mbox_p[i]);
}
"<== nxge_unmap_rxdma"));
}
{
/*
* Set up and prepare buffer blocks, descriptors
* and mailbox.
*/
"==> nxge_map_rxdma_channel (channel %d)", channel));
/*
* Receive buffer blocks
*/
"==> nxge_map_rxdma_channel (channel %d): "
}
/*
* Receive block ring, completion ring and mailbox.
*/
"==> nxge_map_rxdma_channel (channel %d): "
}
/* Free rbr, rcr */
"(status 0x%x channel %d)",
/* Free buffer blocks */
"==> nxge_map_rxdma_channel: free rx buffers"
"(nxgep 0x%x status 0x%x channel %d)",
"<== nxge_map_rxdma_channel: "
"(nxgep 0x%x status 0x%x channel %d)",
return (status);
}
/*ARGSUSED*/
static void
{
"==> nxge_unmap_rxdma_channel (channel %d)", channel));
/*
* unmap receive block ring, completion ring and mailbox.
*/
/* unmap buffer blocks */
}
/*ARGSUSED*/
static nxge_status_t
{
int i;
"==> nxge_map_rxdma_channel_cfg_ring"));
cntl_dmap = *dma_cntl_p;
/* Map in the receive block ring */
/*
* Zero out buffer block ring descriptors.
*/
/*
* For each buffer block, enter receive block address to the ring.
*/
"==> nxge_map_rxdma_channel_cfg_ring: channel %d "
rx_msg_p = rx_msg_ring[i];
>> RBR_BKADDR_SHIFT));
*rbr_vaddrp++ = bkaddr;
}
rbrp->rbr_rd_index = 0;
rbrp->rbr_consumed = 0;
/*
* Do bcopy on packets greater than bcopy size once
* the lo threshold is reached.
* This lo threshold should be less than the hi threshold.
*
* Do bcopy on every packet once the hi threshold is reached.
*/
if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
/* default it to use hi */
}
if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
}
switch (nxge_rx_threshold_hi) {
default:
case NXGE_RX_COPY_NONE:
/* Do not do bcopy at all */
break;
case NXGE_RX_COPY_1:
case NXGE_RX_COPY_2:
case NXGE_RX_COPY_3:
case NXGE_RX_COPY_4:
case NXGE_RX_COPY_5:
case NXGE_RX_COPY_6:
case NXGE_RX_COPY_7:
break;
case NXGE_RX_COPY_ALL:
rbrp->rbr_threshold_hi = 0;
break;
}
switch (nxge_rx_threshold_lo) {
default:
case NXGE_RX_COPY_NONE:
/* Do not do bcopy at all */
if (rbrp->rbr_use_bcopy) {
}
break;
case NXGE_RX_COPY_1:
case NXGE_RX_COPY_2:
case NXGE_RX_COPY_3:
case NXGE_RX_COPY_4:
case NXGE_RX_COPY_5:
case NXGE_RX_COPY_6:
case NXGE_RX_COPY_7:
break;
case NXGE_RX_COPY_ALL:
rbrp->rbr_threshold_lo = 0;
break;
}
"nxge_map_rxdma_channel_cfg_ring: channel %d "
"rbb_max %d "
"rbrp->rbr_bufsize_type %d "
"rbb_threshold_hi %d "
"rbb_threshold_lo %d",
rbrp->rbr_threshold_lo));
/* Map in the receive completion ring */
rcrp = (p_rx_rcr_ring_t)
sizeof (rcr_entry_t));
rcrp->comp_rd_index = 0;
rcrp->comp_wt_index = 0;
(nxge_port_rcr_size - 1);
(nxge_port_rcr_size - 1);
"==> nxge_map_rxdma_channel_cfg_ring: "
"channel %d "
"rbr_vaddrp $%p "
"rcr_desc_rd_head_p $%p "
"rcr_desc_rd_head_pp $%p "
"rcr_desc_rd_last_p $%p "
"rcr_desc_rd_last_pp $%p ",
rcrp->rcr_desc_last_pp));
/*
* Zero out buffer block ring descriptors.
*/
rcrp->sw_priv_hdr_len = 0;
/*
* Timeout should be set based on the system clock divider.
* The following timeout value of 1 assumes that the
* granularity (1000) is 3 microseconds running at 300MHz.
*/
/* Map in the mailbox */
mboxp = (p_rx_mbox_t)
"==> nxge_map_rxdma_channel_cfg_ring: "
"channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
& 0xfff);
"==> nxge_map_rxdma_channel_cfg_ring: "
"channel %d damaddrp $%p "
"cfg1 0x%016llx cfig2 0x%016llx",
"<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
"==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
"<== nxge_unmap_rxdma_channel_cfg_ring"));
}
static nxge_status_t
{
int i, j, index;
"==> nxge_map_rxdma_channel_buf_ring: channel %d",
channel));
" nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
"chunks bufp 0x%016llx",
nmsgs = 0;
for (i = 0; i < num_chunks; i++, tmp_bufp++) {
"==> nxge_map_rxdma_channel_buf_ring: channel %d "
"bufp 0x%016llx nblocks %d nmsgs %d",
}
if (!nmsgs) {
"<== nxge_map_rxdma_channel_buf_ring: channel %d "
"no msg blocks",
channel));
status = NXGE_ERROR;
}
rbrp = (p_rx_rbr_ring_t)
KM_SLEEP);
(void *)nxgep->interrupt_cookie);
(void *)nxgep->interrupt_cookie);
/*
* Buffer sizes suggested by NIU architect.
* 256, 512 and 2K.
*/
} else {
} else {
}
}
"==> nxge_map_rxdma_channel_buf_ring: channel %d "
"actual rbr max %d rbb_max %d nmsgs %d "
"rbrp->block_size %d default_block_size %d "
"(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
/* Map in buffers from the buffer pool. */
index = 0;
" nxge_map_rxdma_channel_buf_ring: map channel %d "
"chunk %d"
" nblocks %d chunk_size %x block_size 0x%x "
"dma_bufp $%p", channel, i,
dma_bufp));
for (j = 0; j < nblocks; j++) {
"allocb failed"));
break;
}
"index %d j %d rx_msg_p $%p",
index++;
}
}
if (i < rbrp->num_blocks) {
}
"nxge_map_rxdma_channel_buf_ring: done buf init "
"channel %d msg block entries %d",
" nxge_map_rxdma_channel_buf_ring: "
"channel %d done buf info init", channel));
" nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
index--;
}
}
"<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
#ifdef NXGE_DEBUG
int num_chunks;
#endif
"==> nxge_unmap_rxdma_channel_buf_ring"));
"<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
return;
}
"==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
"<== nxge_unmap_rxdma_channel_buf_ring: "
"rx_msg_ring $%p ring_info $%p",
return;
}
#ifdef NXGE_DEBUG
#endif
" nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
"tnblocks %d (max %d) size ptrs %d ",
rx_msg_p = rx_msg_ring[i];
" nxge_unmap_rxdma_channel_buf_ring: "
"rx_msg_p $%p",
rx_msg_p));
rx_msg_ring[i] = NULL;
}
}
"<== nxge_unmap_rxdma_channel_buf_ring"));
}
static nxge_status_t
{
/*
* Load the sharable parameters by writing to the
* function zero control registers. These FZC registers
* should be initialized only once for the entire chip.
*/
(void) nxge_init_fzc_rx_common(nxgep);
/*
* Initialize the RXDMA port specific FZC control configurations.
* These FZC registers are pertaining to each port.
*/
(void) nxge_init_fzc_rxdma_port(nxgep);
return (status);
}
/*ARGSUSED*/
static void
{
}
static nxge_status_t
{
int i, ndmas;
"<== nxge_rxdma_hw_start: NULL ring pointers"));
return (NXGE_ERROR);
}
if (ndmas == 0) {
"<== nxge_rxdma_hw_start: no dma channel allocated"));
return (NXGE_ERROR);
}
"==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
if (rx_mbox_areas_p) {
}
for (i = 0; i < ndmas; i++) {
"==> nxge_rxdma_hw_start (ndmas %d) channel %d",
(p_rx_rbr_ring_t)rbr_rings[i],
(p_rx_rcr_ring_t)rcr_rings[i],
(p_rx_mbox_t)rx_mbox_p[i]);
}
}
"rx_rbr_rings 0x%016llx rings 0x%016llx",
goto nxge_rxdma_hw_start_exit;
"==> nxge_rxdma_hw_start: disable "
for (; i >= 0; i--) {
}
"==> nxge_rxdma_hw_start: (status 0x%x)", status));
return (status);
}
static void
{
int i, ndmas;
"<== nxge_rxdma_hw_stop: NULL ring pointers"));
return;
}
if (!ndmas) {
"<== nxge_rxdma_hw_stop: no dma channel allocated"));
return;
}
"==> nxge_rxdma_hw_stop (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> nxge_rxdma_hw_stop (ndmas %d) channel %d",
}
"rx_rbr_rings 0x%016llx rings 0x%016llx",
}
static nxge_status_t
{
"npi handle addr $%p acc $%p",
/* Reset RXDMA channel */
if (rs != NPI_SUCCESS) {
"==> nxge_rxdma_start_channel: "
"reset rxdma failed (0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
"==> nxge_rxdma_start_channel: reset done: channel %d",
channel));
/*
* Initialize the RXDMA channel specific FZC control
* configurations. These FZC registers are pertaining
* to each RX channel (logical pages).
*/
"==> nxge_rxdma_start_channel: "
"init fzc rxdma failed (0x%08x channel %d)",
return (status);
}
"==> nxge_rxdma_start_channel: fzc done"));
/*
* Zero out the shadow and prefetch ram.
*/
"ram done"));
/* Set up the interrupt event masks. */
&ent_mask);
if (rs != NPI_SUCCESS) {
"==> nxge_rxdma_start_channel: "
"init rxdma event masks failed (0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
"event done: channel %d (mask 0x%016llx)",
/* Initialize the receive DMA control and status register */
"==> nxge_rxdma_start_channel: "
"init rxdma control register failed (0x%08x channel %d",
return (status);
}
/*
* Load RXDMA descriptors, buffers, mailbox,
* initialise the receive DMA channels and
* enable each DMA channel.
*/
" nxge_rxdma_start_channel: "
" init enable rxdma failed (0x%08x channel %d)",
return (status);
}
&ent_mask);
if (rs != NPI_SUCCESS) {
"==> nxge_rxdma_start_channel: "
"init rxdma event masks failed (0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
"==> nxge_rxdma_start_channel: enable done"));
return (NXGE_OK);
}
static nxge_status_t
{
"npi handle addr $%p acc $%p",
/* Reset RXDMA channel */
if (rs != NPI_SUCCESS) {
" nxge_rxdma_stop_channel: "
" reset rxdma failed (0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
"==> nxge_rxdma_stop_channel: reset done"));
/* Set up the interrupt event masks. */
&ent_mask);
if (rs != NPI_SUCCESS) {
"==> nxge_rxdma_stop_channel: "
"set rxdma event masks failed (0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
"==> nxge_rxdma_stop_channel: event done"));
/* Initialize the receive DMA control and status register */
&cs);
" nxge_rxdma_stop_channel: init rxdma"
" control register failed (0x%08x channel %d",
return (status);
}
"==> nxge_rxdma_stop_channel: control done"));
/* disable dma channel */
" nxge_rxdma_stop_channel: "
" init enable rxdma failed (0x%08x channel %d)",
return (status);
}
RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
return (NXGE_OK);
}
{
if (rs != NPI_SUCCESS)
return (NXGE_ERROR | rs);
statsp->id_mismatch++;
/* Global fatal error encountered */
}
case 0:
}
break;
case 1:
}
break;
case 2:
}
break;
case 3:
}
break;
default:
return (NXGE_ERROR);
}
}
if (my_err) {
return (status);
}
return (NXGE_OK);
}
static nxge_status_t
{
statsp->ipp_eop_err++;
}
statsp->zcp_eop_err++;
}
if (rxport_fatal) {
" nxge_rxdma_handle_port_error: "
" fatal error on Port #%d\n",
portn));
}
}
return (status);
}
static nxge_status_t
{
int ring_idx;
int i;
"Recovering from RxDMAChannel#%d error...", channel));
/*
* Stop the dma channel waits for the stop done.
* If the stop done bit is not set, then create
* an error.
*/
if (rs != NPI_SUCCESS) {
"nxge_disable_rxdma_channel:failed"));
goto fail;
}
/* Disable interrupt */
if (rs != NPI_SUCCESS) {
"nxge_rxdma_stop_channel: "
"set rxdma event masks failed (channel %d)",
channel));
}
/* Reset RXDMA channel */
if (rs != NPI_SUCCESS) {
"nxge_rxdma_fatal_err_recover: "
" reset rxdma failed (channel %d)", channel));
goto fail;
}
mboxp =
rbrp->rbr_rd_index = 0;
rcrp->comp_rd_index = 0;
rcrp->comp_wt_index = 0;
(nxge_port_rcr_size - 1);
(nxge_port_rcr_size - 1);
for (i = 0; i < rbrp->rbr_max_size; i++) {
if (ref_cnt != 1) {
if (rx_msg_p->cur_usage_cnt !=
"buf[%d]: cur_usage_cnt = %d "
"max_usage_cnt = %d\n", i,
} else {
/* Buffer can be re-posted */
rx_msg_p->cur_usage_cnt = 0;
rx_msg_p->pkt_buf_size = 0;
}
}
}
goto fail;
}
"Recovery Successful, RxDMAChannel#%d Restored",
channel));
return (NXGE_OK);
fail:
return (NXGE_ERROR | rs);
}
{
int ndmas;
int i;
"Recovering from RxPort error..."));
/* Disable RxMAC */
goto fail;
NXGE_DELAY(1000);
for (i = 0; i < ndmas; i++) {
"Could not recover channel %d",
channel));
}
}
/* Reset IPP */
"nxge_rx_port_fatal_err_recover: "
"Failed to reset IPP"));
goto fail;
}
/* Reset RxMAC */
"nxge_rx_port_fatal_err_recover: "
"Failed to reset RxMAC"));
goto fail;
}
/* Re-Initialize IPP */
"nxge_rx_port_fatal_err_recover: "
"Failed to init IPP"));
goto fail;
}
/* Re-Initialize RxMAC */
"nxge_rx_port_fatal_err_recover: "
"Failed to reset RxMAC"));
goto fail;
}
/* Re-enable RxMAC */
"nxge_rx_port_fatal_err_recover: "
"Failed to enable RxMAC"));
goto fail;
}
"Recovery Successful, RxPort Restored"));
return (NXGE_OK);
fail:
return (status);
}
void
{
switch (err_id) {
else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
break;
else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
"!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
break;
break;
break;
}
}
static uint16_t
{
switch (bufsz_type) {
case RCR_PKTBUFSZ_0:
case RBR_BUFSZ0_256B:
break;
case RBR_BUFSZ0_512B:
break;
case RBR_BUFSZ0_1K:
break;
case RBR_BUFSZ0_2K:
break;
default:
"nxge_get_pktbug_size: bad bufsz0"));
break;
}
break;
case RCR_PKTBUFSZ_1:
case RBR_BUFSZ1_1K:
break;
case RBR_BUFSZ1_2K:
break;
case RBR_BUFSZ1_4K:
break;
case RBR_BUFSZ1_8K:
break;
default:
"nxge_get_pktbug_size: bad bufsz1"));
break;
}
break;
case RCR_PKTBUFSZ_2:
case RBR_BUFSZ2_2K:
break;
case RBR_BUFSZ2_4K:
break;
case RBR_BUFSZ2_8K:
break;
case RBR_BUFSZ2_16K:
break;
default:
"nxge_get_pktbug_size: bad bufsz2"));
break;
}
break;
case RCR_SINGLE_BLOCK:
case BKSIZE_4K:
break;
case BKSIZE_8K:
break;
case BKSIZE_16K:
break;
case BKSIZE_32K:
break;
default:
"nxge_get_pktbug_size: bad bksize"));
break;
}
break;
default:
"nxge_get_pktbug_size: bad bufsz_type"));
break;
}
return (sz);
}