/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#if !defined(_BIG_ENDIAN)
#include <npi_rx_rd32.h>
#endif
#include <npi_rx_rd64.h>
#include <npi_rx_wr64.h>
/*
*
*/
extern uint32_t nxge_rbr_size;
extern uint32_t nxge_rcr_size;
extern uint32_t nxge_rbr_spare_size;
extern uint16_t nxge_rdc_buf_offset;
extern uint32_t nxge_mblks_pending;
/*
* Tunable to reduce the amount of time spent in the
* ISR doing Rx Processing.
*/
extern uint32_t nxge_max_rx_pkts;
/*
* Tunables to manage the receive buffer blocks.
*
* nxge_rx_threshold_hi: copy all buffers.
* nxge_rx_bcopy_size_type: receive buffer block size type.
* nxge_rx_threshold_lo: copy only up to tunable block size type.
*/
extern uint32_t nxge_cksum_offload;
static void nxge_unmap_rxdma(p_nxge_t, int);
static void nxge_rxdma_hw_stop(p_nxge_t, int);
p_rx_mbox_t *);
p_rx_rcr_ring_t *, p_rx_mbox_t *);
static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
p_rx_rbr_ring_t *, uint32_t);
static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
static mblk_t *
static void nxge_receive_packet(p_nxge_t,
boolean_t *,
static void nxge_freeb(p_rx_msg_t);
static nxge_status_t
static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
{
int dev_gindex;
if (!isLDOMguest(nxgep)) {
return (NXGE_ERROR);
}
}
/*
* NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
* We only have 8 hardware RDC tables, but we may have
* up to 16 logical (software-defined) groups of RDCS,
* if we make use of layer 3 & 4 hardware classification.
*/
for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
if ((nxge_grp_dc_add(nxgep,
goto init_rxdma_channels_exit;
}
}
}
break;
}
return (NXGE_OK);
for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
}
}
}
break;
}
return (NXGE_ERROR);
}
{
"<== nxge_init_rxdma: status 0x%x", status));
return (status);
}
#if defined(sun4v)
if (isLDOMguest(nxge)) {
/* set rcr_ring */
return (status);
}
}
#endif
}
"<== nxge_init_rxdma_channel: status 0x%x", status));
return (status);
}
void
{
int rdc;
"nxge_uninit_rxdma_channels: no channels"));
return;
}
}
}
}
void
{
}
}
{
if (rs != NPI_SUCCESS) {
}
return (status);
}
void
{
int rdc;
if (!isLDOMguest(nxgep)) {
(void) npi_rxdma_dump_fzc_regs(handle);
}
"nxge_rxdma_regs_dump_channels: "
"NULL ring pointer(s)"));
return;
}
"nxge_rxdma_regs_dump_channels: no channels"));
return;
}
if (ring) {
}
}
}
}
{
if (rs != NPI_SUCCESS) {
}
return (status);
}
{
"<== nxge_init_rxdma_channel_event_mask"));
if (rs != NPI_SUCCESS) {
}
return (status);
}
{
"<== nxge_init_rxdma_channel_cntl_stat"));
if (rs != NPI_SUCCESS) {
}
return (status);
}
/*
* nxge_rxdma_cfg_rdcgrp_default_rdc
*
* Set the default RDC for an RDC Group (Table)
*
* Arguments:
* nxgep
* rdcgrp The group to modify
* rdc The new default RDC.
*
* Notes:
*
* npi_rxdma_cfg_rdc_table_default_rdc()
*
* Registers accessed:
* RDC_TBL_REG: FZC_ZCP + 0x10000
*
* Context:
* Service domain
*/
{
" ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
/*
* This has to be rewritten. Do we even allow this anymore?
*/
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
" <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
return (NXGE_OK);
}
{
" ==> nxge_rxdma_cfg_port_default_rdc"));
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
" <== nxge_rxdma_cfg_port_default_rdc"));
return (NXGE_OK);
}
{
" ==> nxge_rxdma_cfg_rcr_threshold"));
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
return (NXGE_OK);
}
{
if (enable == 0) {
} else {
tout);
}
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
return (NXGE_OK);
}
{
/*
* Use configuration data composed at init time.
* Write to hardware the receive ring configurations.
*/
"==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
switch (nxgep->rx_bksize_code) {
case RBR_BKSIZE_4K:
break;
case RBR_BKSIZE_8K:
break;
case RBR_BKSIZE_16K:
break;
case RBR_BKSIZE_32K:
break;
}
/* For now, disable this timeout in a guest domain. */
if (isLDOMguest(nxgep)) {
rdc_desc.rcr_timeout = 0;
} else {
}
"rbr_len qlen %d pagesize code %d rcr_len %d",
"size 0 %d size 1 %d size 2 %d",
else
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
/*
* Enable the timeout and threshold.
*/
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
if (!isLDOMguest(nxgep)) {
/* Enable the DMA */
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
}
/* Kick the DMA engine. */
if (!isLDOMguest(nxgep)) {
/* Clear the rbr empty bit */
}
return (NXGE_OK);
}
{
/* disable the DMA */
if (rs != NPI_SUCCESS) {
"<== nxge_disable_rxdma_channel:failed (0x%x)",
rs));
return (NXGE_ERROR | rs);
}
return (NXGE_OK);
}
{
"<== nxge_init_rxdma_channel_rcrflush"));
"<== nxge_init_rxdma_channel_rcrflsh"));
return (status);
}
/*ARGSUSED*/
{
int bufsize;
"==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
#if defined(__i386)
#else
#endif
switch (pktbufsz_type) {
case 0:
break;
case 1:
break;
case 2:
break;
case RCR_SINGLE_BLOCK:
bufsize = 0;
anchor_index = 0;
break;
default:
return (NXGE_ERROR);
}
anchor_index = 0;
"==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
"buf_pp $%p btype %d anchor_index %d "
"bufinfo $%p",
bufinfo));
goto found_index;
}
"==> nxge_rxbuf_pp_to_vp: "
"buf_pp $%p btype %d anchor_index %d",
anchor_index));
iteration = 0;
/*
* First check if this block has been seen
* recently. This is indicated by a hint which
* is initialized when the first buffer of the block
* is seen. The hint is reset when the last buffer of
* the block has been processed.
* As three block sizes are supported, three hints
* are kept. The idea behind the hints is that once
* the hardware uses a block for a buffer of that
* size, it will use it exclusively for that size
* and will use it until it is exhausted. It is assumed
* that there would a single block being used for the same
* buffer sizes at any given time.
*/
/*
* check if this is the last buffer in the block
* If so, then reset the hint for the size;
*/
}
}
"==> nxge_rxbuf_pp_to_vp: (!found)"
"buf_pp $%p btype %d anchor_index %d",
anchor_index));
/*
* This is the first buffer of the block of this
* size. Need to search the whole information
* array.
* the search algorithm uses a binary tree search
* algorithm. It assumes that the information is
* already sorted with increasing order
* info[0] < info[1] < info[2] .... < info[n-1]
* where n is the size of the information array
*/
l_index = 0;
while (search_done == B_FALSE) {
(iteration >= max_iterations))
/* read the DVMA address information and sort it */
"==> nxge_rxbuf_pp_to_vp: (searching)"
"buf_pp $%p btype %d "
"anchor_index %d chunk_size %d dvmaaddr $%p",
dvma_addr));
case IN_MIDDLE:
/* found */
(dvma_addr + chunk_size))
break;
case BOTH_RIGHT:
/* not found: go to the right */
break;
case BOTH_LEFT:
/* not found: go to the left */
break;
default: /* should not come here */
return (NXGE_ERROR);
}
iteration++;
}
"==> nxge_rxbuf_pp_to_vp: (search done)"
"buf_pp $%p btype %d anchor_index %d",
anchor_index));
}
"==> nxge_rxbuf_pp_to_vp: (search failed)"
"buf_pp $%p btype %d anchor_index %d",
anchor_index));
return (NXGE_ERROR);
}
"==> nxge_rxbuf_pp_to_vp: (FOUND1)"
"buf_pp $%p btype %d bufsize %d anchor_index %d",
anchor_index));
/* index of the first block in this chunk */
"==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
"buf_pp $%p btype %d bufsize %d "
"anchor_index %d chunk_index %d dvma $%p",
dvma_addr));
"==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
"buf_pp $%p btype %d bufsize %d "
"anchor_index %d chunk_index %d dvma $%p "
"offset %d block_size %d",
block_size));
"==> nxge_rxbuf_pp_to_vp: "
"total_index %d dvma_addr $%p "
"offset %d block_size %d "
"block_index %d ",
block_index));
#if defined(__i386)
#else
#endif
"==> nxge_rxbuf_pp_to_vp: "
"total_index %d dvma_addr $%p "
"offset %d block_size %d "
"block_index %d "
"*pkt_buf_addr_p $%p",
*pkt_buf_addr_p));
*msg_index = total_index;
"==> nxge_rxbuf_pp_to_vp: get msg index: "
"msg_index %d bufoffset_index %d",
*bufoffset));
return (NXGE_OK);
}
/*
* used by quick sort (qsort) function
* to perform comparison
*/
static int
{
rxbuf_index_info_t *a, *b;
a = (rxbuf_index_info_t *)p1;
b = (rxbuf_index_info_t *)p2;
return (1);
return (-1);
return (0);
}
/*
*
*/
/*
* Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
* n = # objs in the array
* s = size of each obj (must be multiples of a word size)
* f = ptr to function to compare two objs
* returns (-1 = less than, 0 = equal, 1 = greater than
*/
void
{
int g, i, j, ii;
unsigned int tmp;
/* No work to do */
if (v == NULL || n <= 1)
return;
/* Sanity check on arguments */
ASSERT(s > 0);
for (g = n / 2; g > 0; g /= 2) {
for (i = g; i < n; i++) {
for (j = i - g; j >= 0 &&
(*f)(v + j * s, v + (j + g) * s) == 1;
j -= g) {
p1 = (unsigned *)(v + j * s);
p2 = (unsigned *)(v + (j + g) * s);
}
}
}
}
}
/*
* Initialize data structures required for rxdma
* buffer dvma->vmem address lookup
*/
/*ARGSUSED*/
static nxge_status_t
{
int index;
/* read the DVMA address information and sort it */
/* do init of the information array */
" nxge_rxbuf_index_info_init Sort ptrs"));
/* sort the array */
sizeof (rxbuf_index_info_t), nxge_sort_compare);
" nxge_rxbuf_index_info_init: sorted chunk %d "
" ioaddr $%p kaddr $%p size %x",
}
max_iteration = 0;
" nxge_rxbuf_index_info_init Find max iter %d",
return (NXGE_OK);
}
/* ARGSUSED */
void
{
#ifdef NXGE_DEBUG
"\trcr entry $%p "
"\trcr entry 0x%0llx "
"\trcr entry 0x%08x "
"\trcr entry 0x%08x "
"\tvalue 0x%0llx\n"
"\tmulti = %d\n"
"\tpkt_type = 0x%x\n"
"\tzero_copy = %d\n"
"\tnoport = %d\n"
"\tpromis = %d\n"
"\terror = 0x%04x\n"
"\tdcf_err = 0x%01x\n"
"\tl2_len = %d\n"
"\tpktbufsize = %d\n"
"\tpkt_buf_addr = $%p\n"
"\tpkt_buf_addr (<< 6) = $%p\n",
bptr,
#endif
}
void
{
"==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
/* RBR head */
#if defined(__i386)
printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
#else
printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
#endif
/* RBR stats */
/* RCR tail */
#if defined(__i386)
printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
#else
printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
#endif
/* RCR qlen */
"<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
}
{
int rdc;
"==> nxge_rxdma_hw_mode: mode %d", enable));
"<== nxge_rxdma_mode: not initialized"));
return (NXGE_ERROR);
}
"<== nxge_tx_port_fatal_err_recover: "
"NULL ring pointer(s)"));
return (NXGE_ERROR);
}
"nxge_rxdma_regs_dump_channels: no channels"));
return (NULL);
}
if (ring) {
if (enable) {
"==> nxge_rxdma_hw_mode: "
"channel %d (enable)", rdc));
} else {
"==> nxge_rxdma_hw_mode: "
"channel %d disable)", rdc));
}
}
}
}
"<== nxge_rxdma_hw_mode: status 0x%x", status));
return (status);
}
void
{
"==> nxge_rxdma_enable_channel: channel %d", channel));
}
void
{
"==> nxge_rxdma_disable_channel: channel %d", channel));
}
void
{
(void) nxge_rx_mac_enable(nxgep);
}
/*ARGSUSED*/
void
{
int rdc;
"<== nxge_tx_port_fatal_err_recover: "
"NULL ring pointer(s)"));
return;
}
"nxge_rxdma_regs_dump_channels: no channels"));
return;
}
if (ring) {
"==> nxge_fixup_rxdma_rings: "
"channel %d ring $%px",
}
}
}
}
void
{
int ndmas;
"<== nxge_rxdma_fix_channel: buf not allocated"));
return;
}
if (!ndmas) {
"<== nxge_rxdma_fix_channel: no dma allocated"));
return;
}
/* Reinitialize the receive block and completion rings */
rbrp->rbr_rd_index = 0;
rcrp->comp_rd_index = 0;
rcrp->comp_wt_index = 0;
}
"<== nxge_rxdma_fix_channel: success (0x%08x)", status));
return;
"<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
}
{
"==> nxge_rxdma_get_rbr_ring: channel %d", channel));
"<== nxge_rxdma_get_rbr_ring: "
"NULL ring pointer(s)"));
return (NULL);
}
"<== nxge_rxdma_get_rbr_ring: no channels"));
return (NULL);
}
if (ring) {
"==> nxge_rxdma_get_rbr_ring: "
return (ring);
}
}
}
}
"<== nxge_rxdma_get_rbr_ring: not found"));
return (NULL);
}
{
"==> nxge_rxdma_get_rcr_ring: channel %d", channel));
"<== nxge_rxdma_get_rcr_ring: "
"NULL ring pointer(s)"));
return (NULL);
}
"<== nxge_rxdma_get_rbr_ring: no channels"));
return (NULL);
}
if (ring) {
"==> nxge_rxdma_get_rcr_ring: "
return (ring);
}
}
}
}
"<== nxge_rxdma_get_rcr_ring: not found"));
return (NULL);
}
/*
* Static functions start here.
*/
static p_rx_msg_t
{
"Allocation of a rx msg failed."));
goto nxge_allocb_exit;
}
if (dmabuf_p) {
} else {
"Allocation of a receive page failed."));
goto nxge_allocb_fail1;
}
}
goto nxge_allocb_fail2;
}
goto nxge_allocb_exit;
if (!nxge_mp->use_buf_pool) {
}
return (nxge_mp);
}
{
"offset = 0x%08X "
"size = 0x%08X",
goto nxge_dupb_exit;
}
nxge_mp));
return (mp);
}
{
goto nxge_dupb_bcopy_exit;
}
nxge_mp));
return (mp);
}
void
{
/* Reuse this buffer */
rx_msg_p->cur_usage_cnt = 0;
rx_msg_p->max_usage_cnt = 0;
rx_msg_p->pkt_buf_size = 0;
if (rx_rbr_p->rbr_use_bcopy) {
}
/*
* Get the rbr header pointer and its offset index.
*/
"<== nxge_post_page (channel %d post_next_index %d)",
}
void
{
int ref_cnt;
"nxge_freeb:rx_msg_p = $%p (block pending %d)",
/*
* First we need to get the free state, then
* atomic decrement the reference count to prevent
* the race condition with the interrupt thread that
* is processing a loaned up buffer block.
*/
if (!ref_cnt) {
"will free: rx_msg_p = $%p (block pending %d)",
if (!rx_msg_p->use_buf_pool) {
}
if (ring) {
/*
* Decrement the receive buffer ring's reference
* count, too.
*/
/*
* Free the receive buffer ring, if
* 1. all the receive buffers have been freed
* 2. and we are in the proper state (that is,
* we are not UNMAPPING).
*/
if (ring->rbr_ref_cnt == 0 &&
/*
* Free receive data buffers,
* buffer index information
* (rxring_info) and
* the message block ring.
*/
"nxge_freeb:rx_msg_p = $%p "
"(block pending %d) free buffers",
sizeof (rxring_info_t));
}
if (ring->rx_msg_ring) {
sizeof (p_rx_msg_t));
}
}
}
return;
}
/*
* Repost buffer.
*/
"nxge_freeb: post page $%p:", rx_msg_p));
}
}
{
"<== nxge_rx_intr: arg2 $%p arg1 $%p",
return (DDI_INTR_CLAIMED);
}
}
"<== nxge_rx_intr: interface not started or intialized"));
return (DDI_INTR_CLAIMED);
}
"==> nxge_rx_intr: arg2 $%p arg1 $%p",
/*
* Get the PIO handle.
*/
/*
* Get the ring to enable us to process packets.
*/
/*
* The RCR ring lock must be held when packets
* are being processed and the hardware registers are
* being read or written to prevent race condition
* among the interrupt thread, the polling thread
* (will cause fatal errors such as rcrincon bit set)
* and the setting of the poll_flag.
*/
/*
* Get the control and status for this channel.
*/
"<== nxge_rx_intr: channel is not started"));
/*
* We received an interrupt before the ring is started.
*/
/*
* Rearm this logical group if this is a single device
* group.
*/
if (isLDOMguest(nxgep)) {
} else {
}
}
return (DDI_INTR_CLAIMED);
}
"cs 0x%016llx rcrto 0x%x rcrthres %x",
}
/* error events. */
}
/*
* Enable the mailbox update interrupt if we want
* to use mailbox. We probably don't need to use
* mailbox as it only saves us one pio read.
* Also write 1 to rcrthres and rcrto to clear
* these two edge triggered bits.
*/
/*
* If the polling mode is enabled, disable the interrupt.
*/
"==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
/*
* Disarm this logical group if this is a single device
* group.
*/
if (isLDOMguest(nxgep)) {
} else {
}
}
} else {
/*
* Rearm this logical group if this is a single device
* group.
*/
if (isLDOMguest(nxgep)) {
} else {
}
}
"==> nxge_rx_intr: rdc %d ldgp $%p "
}
rcrp->rcr_gen_num);
}
return (DDI_INTR_CLAIMED);
}
/*
* This routine is the main packet receive processing function.
* It gets the packet type, error code, and buffer related
* information from the receive completion entry.
* How many completion entries to process is based on the number of packets
* queued by the hardware, a hardware maintained tail pointer
* and a configurable receive packet count.
*
* A chain of message blocks will be created as result of processing
* the completion entries. This chain of message blocks will be returned and
* a hardware control status register will be updated with the number of
* packets were removed from the hardware queue.
*
* The RCR ring lock is held when entering this function.
*/
static mblk_t *
int bytes_to_pickup)
{
int totallen = 0;
#if defined(_BIG_ENDIAN)
#endif
return (NULL);
}
"==> nxge_rx_pkts: START: rcr channel %d "
"head_p $%p head_pp $%p index %d ",
rcr_p->comp_rd_index));
#if !defined(_BIG_ENDIAN)
#else
if (rs != NPI_SUCCESS) {
"channel %d, get qlen failed 0x%08x",
return (NULL);
}
#endif
if (!qlen) {
"==> nxge_rx_pkts:rcr channel %d "
return (NULL);
}
/*
* Number of packets queued
* (The jumbo or multi packet will be counted as only one
* packets and it may take up more than one completion entry).
*/
while (qlen_hw) {
#ifdef NXGE_DEBUG
#endif
/*
* Process one completion ring entry.
*/
/*
* message chaining modes
*/
if (nmp) {
}
}
"==> nxge_rx_pkts: loop: rcr channel %d "
"before updating: multi %d "
"nrcr_read %d "
"npk read %d "
"head_pp $%p index %d ",
if (!multi) {
qlen_hw--;
npkt_read++;
}
/*
* Update the next read entry.
*/
nrcr_read++;
"<== nxge_rx_pkts: (SAM, process one packet) "
"nrcr_read %d",
nrcr_read));
"==> nxge_rx_pkts: loop: rcr channel %d "
"multi %d "
"nrcr_read %d "
"npk read %d "
"head_pp $%p index %d ",
if ((bytes_to_pickup != -1) &&
(totallen >= bytes_to_pickup)) {
break;
}
}
}
"==> nxge_rx_pkts: EXIT: rcr channel %d "
"head_pp $%p index %016llx ",
rcr_p->comp_rd_index));
/*
* Update RCR buffer pointer read and number of packets
* read.
*/
return (head_mp);
}
void
{
#ifdef NXGE_DEBUG
int dump_len;
#endif
#if defined(__i386)
#else
#endif
"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
"error_type 0x%x pkt_type 0x%x "
"pktbufsz_type %d ",
"==> nxge_receive_packet: entryp $%p entry 0x%0llx "
"pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
"error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
pkt_type));
"==> (rbr) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
/* get the stats ptr */
if (!l2_len) {
"<== nxge_receive_packet: failed: l2 length is 0."));
return;
}
/*
* Software workaround for BMAC hardware limitation that allows
* maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
* instead of 0x2400 for jumbo.
*/
}
/* Hardware sends us 4 bytes of CRC as no stripping is done. */
/* shift 6 bits to get the full io address */
#if defined(__i386)
#else
#endif
"==> (rbr) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
if (first_entry) {
"==> nxge_receive_packet: first entry 0x%016llx "
"pkt_buf_addr_pp $%p l2_len %d hdr %d",
hdr_size));
}
"==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
/*
* Packet buffer address in the completion entry points
* to the starting buffer address (offset 0).
* Use the starting buffer address to locate the corresponding
* kernel address.
*/
&msg_index);
"==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"<== nxge_receive_packet: found vaddr failed %d",
status));
return;
}
"==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
switch (pktbufsz_type) {
case RCR_PKTBUFSZ_0:
"==> nxge_receive_packet: 0 buf %d", bsize));
break;
case RCR_PKTBUFSZ_1:
"==> nxge_receive_packet: 1 buf %d", bsize));
break;
case RCR_PKTBUFSZ_2:
"==> nxge_receive_packet: 2 buf %d", bsize));
break;
case RCR_SINGLE_BLOCK:
"==> nxge_receive_packet: single %d", bsize));
break;
default:
return;
}
switch (nxge_rdc_buf_offset) {
case SW_OFFSET_NO_OFFSET:
sw_offset_bytes = 0;
break;
case SW_OFFSET_64:
sw_offset_bytes = 64;
break;
case SW_OFFSET_128:
sw_offset_bytes = 128;
break;
case SW_OFFSET_192:
sw_offset_bytes = 192;
break;
case SW_OFFSET_256:
sw_offset_bytes = 256;
break;
case SW_OFFSET_320:
sw_offset_bytes = 320;
break;
case SW_OFFSET_384:
sw_offset_bytes = 384;
break;
case SW_OFFSET_448:
sw_offset_bytes = 448;
break;
default:
sw_offset_bytes = 0;
break;
}
"==> nxge_receive_packet: after first dump:usage count"));
if (rx_msg_p->cur_usage_cnt == 0) {
if (rx_rbr_p->rbr_use_bcopy) {
if (rx_rbr_p->rbr_consumed <
if (rx_rbr_p->rbr_threshold_lo == 0 ||
((rx_rbr_p->rbr_consumed >=
rx_rbr_p->rbr_threshold_lo) &&
(rx_rbr_p->rbr_bufsize_type >=
pktbufsz_type))) {
}
} else {
}
}
"==> nxge_receive_packet: buf %d (new block) ",
bsize));
if (pktbufsz_type == RCR_SINGLE_BLOCK) {
"==> nxge_receive_packet: buf %d "
"(single block) ",
bsize));
/*
* Buffer can be reused once the free function
* is called.
*/
} else {
}
}
} else {
}
}
"msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
if (dcf_err) {
#ifdef NXGE_DEBUG
"nxge_receive_packet: channel %d dcf_err rcr"
}
#endif
} else if (pkt_too_long_err) {
" channel %d packet length [%d] > "
} else {
/* Update error stats */
switch (error_type) {
/*
* Do not send FMA ereport for RCR_L2_ERROR and
* RCR_L4_CSUM_ERROR because most likely they indicate
* back pressure rather than HW failures.
*/
case RCR_L2_ERROR:
" nxge_receive_packet:"
" channel %d RCR L2_ERROR",
channel));
}
break;
case RCR_L4_CSUM_ERROR:
if (rdc_stats->l4_cksum_err <
" nxge_receive_packet:"
" channel %d"
" RCR L4_CSUM_ERROR", channel));
}
break;
/*
* Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
* RCR_ZCP_SOFT_ERROR because they reflect the same
* FFLP and ZCP errors that have been reported by
* nxge_fflp.c and nxge_zcp.c.
*/
case RCR_FFLP_SOFT_ERROR:
if (rdc_stats->fflp_soft_err <
" nxge_receive_packet:"
" channel %d"
" RCR FFLP_SOFT_ERROR", channel));
}
break;
case RCR_ZCP_SOFT_ERROR:
if (rdc_stats->zcp_soft_err <
" nxge_receive_packet: Channel %d"
" RCR ZCP_SOFT_ERROR", channel));
break;
default:
if (rdc_stats->rcr_unknown_err
< error_disp_cnt) {
" nxge_receive_packet: Channel %d"
" RCR entry 0x%llx error 0x%x",
}
break;
}
}
/*
* Update and repost buffer block if max usage
* count is reached.
*/
if (error_send_up == B_FALSE) {
if (buffer_free == B_TRUE) {
}
return;
}
}
"==> nxge_receive_packet: DMA sync second "));
if (!rx_msg_p->rx_use_bcopy) {
/*
* For loaned up buffers, the driver reference count
* will be incremented first and then the free state.
*/
if (first_entry) {
} else {
- skip_len];
}
} else {
} else {
}
}
}
} else {
if (first_entry) {
} else {
}
}
if (first_entry) {
/*
* Jumbo packets may be received with more than one
* buffer, increment ipackets for the first entry only.
*/
/* Update ibytes for kstat. */
/*
* Update the number of bytes read so far for the
* current frame.
*/
} else {
}
"==> nxge_receive_packet after dupb: "
"rbr consumed %d "
"pktbufsz_type %d "
"nmp $%p rptr $%p wptr $%p "
"buf_offset %d bzise %d l2_len %d skip_len %d",
} else {
"update stats (error)");
if (buffer_free == B_TRUE) {
}
return;
}
if (buffer_free == B_TRUE) {
}
}
if (is_valid) {
if (first_entry) {
} else {
}
}
/*
* ERROR, FRAG and PKT_TYPE are only reported in the first entry.
* If a packet is not fragmented and no error bit is set, then
* L4 checksum is OK.
*/
/*
* If the checksum flag nxge_chksum_offload
* is 1, TCP and UDP packets can be sent
* up with good checksum. If the checksum flag
* is set to 0, checksum reporting will apply to
* TCP packets only (workaround for a hardware bug).
* If the checksum flag nxge_cksum_offload is
* greater than 1, both TCP and UDP packets
* will not be reported its hardware checksum results.
*/
if (nxge_cksum_offload == 1) {
pkt_type == RCR_PKT_IS_UDP) ?
} else if (!nxge_cksum_offload) {
/* TCP checksum only. */
}
"is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
"is_valid 0x%x multi 0x%llx pkt %d frag %d "
"error %d",
}
}
"==> nxge_receive_packet: *mp 0x%016llx", *mp));
"multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
}
/*
* Enable polling for a ring. Interrupt for the ring is disabled when
* the nxge interrupt comes (see nxge_rx_intr).
*/
int
{
if (ring_handle == NULL) {
return (0);
}
"==> nxge_enable_poll: rdc %d NULL ldgp: no change",
return (0);
}
/* enable polling */
"==> nxge_enable_poll: rdc %d set poll flag to 1",
}
return (0);
}
/*
* Disable polling for a ring and enable its interrupt.
*/
int
{
if (ring_handle == NULL) {
return (0);
}
/* disable polling: enable interrupt */
/*
* Get the control and status for this channel.
*/
/*
* Enable mailbox update
* Since packets were not read and the hardware uses
* bits pktread and ptrread to update the queue
* length, we need to set both bits to 0.
*/
/*
* Rearm this logical group if this is a single device
* group.
*/
"==> nxge_disable_poll: no ldgp rdc %d "
return (0);
}
"==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
if (isLDOMguest(nxgep)) {
} else {
}
}
}
return (0);
}
/*
* Poll 'bytes_to_pickup' bytes of message from the rx ring.
*/
mblk_t *
{
/*
* Get the control and status for this channel.
*/
"==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
/* error events. */
}
return (mblk);
}
/*ARGSUSED*/
static nxge_status_t
{
"==> nxge_rx_err_evnts: rx_rbr_timeout"));
}
rdc_stats->rsp_cnt_err++;
"==> nxge_rx_err_evnts(channel %d): "
"rsp_cnt_err", channel));
}
rdc_stats->byte_en_bus++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: byte_en_bus", channel));
}
rdc_stats->rsp_dat_err++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rsp_dat_err", channel));
}
rdc_stats->rcr_ack_err++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rcr_ack_err", channel));
}
rdc_stats->dc_fifo_err++;
/* This is not a fatal error! */
"==> nxge_rx_err_evnts(channel %d): "
"dc_fifo_err", channel));
}
!= NPI_SUCCESS) {
"==> nxge_rx_err_evnts(channel %d): "
"rcr_sha_par: get perr", channel));
return (NXGE_ERROR | rs);
}
rdc_stats->rcr_sha_par++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rcr_sha_par", channel));
}
rdc_stats->rbr_pre_par++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rbr_pre_par", channel));
}
}
/*
* The Following 4 status bits are for information, the system
* is running fine. There is no need to send FMA ereports or
* log messages.
*/
}
}
}
}
rdc_stats->config_err++;
"==> nxge_rx_err_evnts(channel %d): "
"config error", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rcrincon error", channel));
}
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rcrfull error", channel));
}
/*
* This bit is for information, there is no need
* send FMA ereport or log a message.
*/
}
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rbr_full error", channel));
}
rdc_stats->rbrlogpage++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: rbr logical page error", channel));
}
rdc_stats->cfiglogpage++;
"==> nxge_rx_err_evnts(channel %d): "
"fatal error: cfig logical page error", channel));
}
if (rxport_fatal) {
" nxge_rx_err_evnts: fatal error on Port #%d\n",
portn));
if (isLDOMguest(nxgep)) {
status = NXGE_ERROR;
} else {
}
}
}
if (rxchan_fatal) {
" nxge_rx_err_evnts: fatal error on Channel #%d\n",
channel));
if (isLDOMguest(nxgep)) {
status = NXGE_ERROR;
} else {
}
}
}
return (status);
}
/*
* nxge_rdc_hvio_setup
*
* This code appears to setup some Hypervisor variables.
*
* Arguments:
* nxgep
* channel
*
* Notes:
* What does NIU_LP_WORKAROUND mean?
*
* na
*
* Context:
* Any domain
*/
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
static void
{
"channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
"channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
}
#endif
/*
* nxge_map_rxdma
*
* Map an RDC into our kernel space.
*
* Arguments:
* nxgep
* channel The channel to map.
*
* Notes:
* 1. Allocate & initialise a memory pool, if necessary.
* 2. Allocate however many receive buffers are required.
* 3. Setup buffers, descriptors, and mailbox.
*
* nxge_alloc_rx_mem_pool()
* nxge_alloc_rbb()
* nxge_map_rxdma_channel()
*
* Registers accessed:
*
* Context:
* Any domain
*/
static nxge_status_t
{
if (!nxgep->rx_buf_pool_p) {
"<== nxge_map_rxdma: buf not allocated"));
return (NXGE_ERROR);
}
}
return (NXGE_ERROR);
/*
* Map descriptors from the buffer polls for each dma channel.
*/
/*
* Set up and prepare buffer blocks, descriptors
* and mailbox.
*/
"==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
"returned 0x%x",
return (status);
}
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
if (!isLDOMguest(nxgep))
#endif
return (status);
}
static void
{
return;
return;
(void) nxge_unmap_rxdma_channel(
}
{
/*
* Set up and prepare buffer blocks, descriptors
* and mailbox.
*/
"==> nxge_map_rxdma_channel (channel %d)", channel));
/*
* Receive buffer blocks
*/
"==> nxge_map_rxdma_channel (channel %d): "
}
/*
* Receive block ring, completion ring and mailbox.
*/
"==> nxge_map_rxdma_channel (channel %d): "
}
/* Free rbr, rcr */
"(status 0x%x channel %d)",
/* Free buffer blocks */
"==> nxge_map_rxdma_channel: free rx buffers"
"(nxgep 0x%x status 0x%x channel %d)",
status = NXGE_ERROR;
"<== nxge_map_rxdma_channel: "
"(nxgep 0x%x status 0x%x channel %d)",
return (status);
}
/*ARGSUSED*/
static void
{
"==> nxge_unmap_rxdma_channel (channel %d)", channel));
/*
* unmap receive block ring, completion ring and mailbox.
*/
/* unmap buffer blocks */
}
/*ARGSUSED*/
static nxge_status_t
{
int i;
"==> nxge_map_rxdma_channel_cfg_ring"));
cntl_dmap = *dma_cntl_p;
/* Map in the receive block ring */
/*
* Zero out buffer block ring descriptors.
*/
/*
* For each buffer block, enter receive block address to the ring.
*/
"==> nxge_map_rxdma_channel_cfg_ring: channel %d "
rx_msg_p = rx_msg_ring[i];
>> RBR_BKADDR_SHIFT));
*rbr_vaddrp++ = bkaddr;
}
rbrp->rbr_rd_index = 0;
rbrp->rbr_consumed = 0;
/*
* Do bcopy on packets greater than bcopy size once
* the lo threshold is reached.
* This lo threshold should be less than the hi threshold.
*
* Do bcopy on every packet once the hi threshold is reached.
*/
if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
/* default it to use hi */
}
if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
}
switch (nxge_rx_threshold_hi) {
default:
case NXGE_RX_COPY_NONE:
/* Do not do bcopy at all */
break;
case NXGE_RX_COPY_1:
case NXGE_RX_COPY_2:
case NXGE_RX_COPY_3:
case NXGE_RX_COPY_4:
case NXGE_RX_COPY_5:
case NXGE_RX_COPY_6:
case NXGE_RX_COPY_7:
break;
case NXGE_RX_COPY_ALL:
rbrp->rbr_threshold_hi = 0;
break;
}
switch (nxge_rx_threshold_lo) {
default:
case NXGE_RX_COPY_NONE:
/* Do not do bcopy at all */
if (rbrp->rbr_use_bcopy) {
}
break;
case NXGE_RX_COPY_1:
case NXGE_RX_COPY_2:
case NXGE_RX_COPY_3:
case NXGE_RX_COPY_4:
case NXGE_RX_COPY_5:
case NXGE_RX_COPY_6:
case NXGE_RX_COPY_7:
break;
case NXGE_RX_COPY_ALL:
rbrp->rbr_threshold_lo = 0;
break;
}
"nxge_map_rxdma_channel_cfg_ring: channel %d "
"rbb_max %d "
"rbrp->rbr_bufsize_type %d "
"rbb_threshold_hi %d "
"rbb_threshold_lo %d",
rbrp->rbr_threshold_lo));
/* Map in the receive completion ring */
rcrp = (p_rx_rcr_ring_t)
sizeof (rcr_entry_t));
rcrp->comp_rd_index = 0;
rcrp->comp_wt_index = 0;
#if defined(__i386)
#else
#endif
(nxge_port_rcr_size - 1);
(nxge_port_rcr_size - 1);
"==> nxge_map_rxdma_channel_cfg_ring: "
"channel %d "
"rbr_vaddrp $%p "
"rcr_desc_rd_head_p $%p "
"rcr_desc_rd_head_pp $%p "
"rcr_desc_rd_last_p $%p "
"rcr_desc_rd_last_pp $%p ",
rcrp->rcr_desc_last_pp));
/*
* Zero out buffer block ring descriptors.
*/
/*
* Timeout should be set based on the system clock divider.
* A timeout value of 1 assumes that the
* granularity (1000) is 3 microseconds running at 300MHz.
*/
/* Map in the mailbox */
mboxp = (p_rx_mbox_t)
"==> nxge_map_rxdma_channel_cfg_ring: "
"channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
& 0xfff);
"==> nxge_map_rxdma_channel_cfg_ring: "
"channel %d damaddrp $%p "
"cfg1 0x%016llx cfig2 0x%016llx",
switch (rcrp->sw_priv_hdr_len) {
case SW_OFFSET_NO_OFFSET:
case SW_OFFSET_64:
case SW_OFFSET_128:
case SW_OFFSET_192:
break;
case SW_OFFSET_256:
case SW_OFFSET_320:
case SW_OFFSET_384:
case SW_OFFSET_448:
break;
default:
}
} else {
}
"<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
"==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
"<== nxge_unmap_rxdma_channel_cfg_ring"));
}
static nxge_status_t
{
int i, j, index;
"==> nxge_map_rxdma_channel_buf_ring: channel %d",
channel));
" nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
"chunks bufp 0x%016llx",
nmsgs = 0;
for (i = 0; i < num_chunks; i++, tmp_bufp++) {
"==> nxge_map_rxdma_channel_buf_ring: channel %d "
"bufp 0x%016llx nblocks %d nmsgs %d",
}
if (!nmsgs) {
"<== nxge_map_rxdma_channel_buf_ring: channel %d "
"no msg blocks",
channel));
status = NXGE_ERROR;
}
KM_SLEEP);
(void *)nxgep->interrupt_cookie);
(void *)nxgep->interrupt_cookie);
/*
* Buffer sizes suggested by NIU architect.
* 256, 512 and 2K.
*/
} else {
} else {
}
}
"==> nxge_map_rxdma_channel_buf_ring: channel %d "
"actual rbr max %d rbb_max %d nmsgs %d "
"rbrp->block_size %d default_block_size %d "
"(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
/* Map in buffers from the buffer pool. */
index = 0;
#if defined(__i386)
#else
#endif
#if defined(__i386)
#else
#endif
" nxge_map_rxdma_channel_buf_ring: map channel %d "
"chunk %d"
" nblocks %d chunk_size %x block_size 0x%x "
"dma_bufp $%p", channel, i,
dma_bufp));
for (j = 0; j < nblocks; j++) {
"allocb failed (index %d i %d j %d)",
index, i, j));
}
"index %d j %d rx_msg_p $%p mblk %p",
rbrp->rbr_ref_cnt++;
index++;
}
if (dma_bufp->contig_alloc_type) {
}
if (dma_bufp->kmem_alloc_type) {
}
" nxge_map_rxdma_channel_buf_ring: map channel %d "
"chunk %d"
" nblocks %d chunk_size %x block_size 0x%x "
"dma_bufp $%p",
channel, i,
dma_bufp));
}
if (i < rbrp->num_blocks) {
}
"nxge_map_rxdma_channel_buf_ring: done buf init "
"channel %d msg block entries %d",
" nxge_map_rxdma_channel_buf_ring: "
"channel %d done buf info init", channel));
/*
* Finally, permit nxge_freeb() to call nxge_post_page().
*/
" nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
index--;
}
}
status = NXGE_ERROR;
"<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
#ifdef NXGE_DEBUG
int num_chunks;
#endif
"==> nxge_unmap_rxdma_channel_buf_ring"));
"<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
return;
}
"==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
"<== nxge_unmap_rxdma_channel_buf_ring: "
"rx_msg_ring $%p ring_info $%p",
return;
}
#ifdef NXGE_DEBUG
#endif
" nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
"tnblocks %d (max %d) size ptrs %d ",
rx_msg_p = rx_msg_ring[i];
" nxge_unmap_rxdma_channel_buf_ring: "
"rx_msg_p $%p",
rx_msg_p));
rx_msg_ring[i] = NULL;
}
}
/*
* We no longer may use the mutex <post_lock>. By setting
* <rbr_state> to anything but POSTING, we prevent
* nxge_post_page() from accessing a dead mutex.
*/
if (rbr_p->rbr_ref_cnt == 0) {
/*
* This is the normal state of affairs.
* Need to free the following buffers:
* - data buffers
* - rx_msg ring
* - ring_info
* - rbr ring
*/
"unmap_rxdma_buf_ring: No outstanding - freeing "));
} else {
/*
* Some of our buffers are still being used.
* Therefore, tell nxge_freeb() this ring is
* unmapped, so it may free <rbr_p> for us.
*/
"unmap_rxdma_buf_ring: %d %s outstanding.",
}
"<== nxge_unmap_rxdma_channel_buf_ring"));
}
/*
* nxge_rxdma_hw_start_common
*
* Arguments:
* nxgep
*
* Notes:
*
* nxge_init_fzc_rx_common();
* nxge_init_fzc_rxdma_port();
*
* Registers accessed:
*
* Context:
* Service domain
*/
static nxge_status_t
{
/*
* Load the sharable parameters by writing to the
* function zero control registers. These FZC registers
* should be initialized only once for the entire chip.
*/
(void) nxge_init_fzc_rx_common(nxgep);
/*
* Initialize the RXDMA port specific FZC control configurations.
* These FZC registers are pertaining to each port.
*/
(void) nxge_init_fzc_rxdma_port(nxgep);
return (status);
}
static nxge_status_t
{
int i, ndmas;
"<== nxge_rxdma_hw_start: NULL ring pointers"));
return (NXGE_ERROR);
}
if (ndmas == 0) {
"<== nxge_rxdma_hw_start: no dma channel allocated"));
return (NXGE_ERROR);
}
"==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
if (rx_mbox_areas_p) {
}
i = channel;
"==> nxge_rxdma_hw_start (ndmas %d) channel %d",
(p_rx_rbr_ring_t)rbr_rings[i],
(p_rx_rcr_ring_t)rcr_rings[i],
(p_rx_mbox_t)rx_mbox_p[i]);
"==> nxge_rxdma_hw_start: disable "
return (status);
}
"rx_rbr_rings 0x%016llx rings 0x%016llx",
"==> nxge_rxdma_hw_start: (status 0x%x)", status));
return (status);
}
static void
{
"<== nxge_rxdma_hw_stop: NULL ring pointers"));
return;
}
"==> nxge_rxdma_hw_stop(channel %d)",
channel));
"rx_rbr_rings 0x%016llx rings 0x%016llx",
}
static nxge_status_t
{
"npi handle addr $%p acc $%p",
/* Reset RXDMA channel, but not if you're a guest. */
if (!isLDOMguest(nxgep)) {
if (rs != NPI_SUCCESS) {
"==> nxge_init_fzc_rdc: "
"npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
return (NXGE_ERROR | rs);
}
"==> nxge_rxdma_start_channel: reset done: channel %d",
channel));
}
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
if (isLDOMguest(nxgep))
#endif
/*
* Initialize the RXDMA channel specific FZC control
* configurations. These FZC registers are pertaining
* to each RX channel (logical pages).
*/
if (!isLDOMguest(nxgep)) {
"==> nxge_rxdma_start_channel: "
"init fzc rxdma failed (0x%08x channel %d)",
return (status);
}
"==> nxge_rxdma_start_channel: fzc done"));
}
/* Set up the interrupt event masks. */
&ent_mask);
if (rs != NPI_SUCCESS) {
"==> nxge_rxdma_start_channel: "
"init rxdma event masks failed "
"(0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
"==> nxge_rxdma_start_channel: "
"event done: channel %d (mask 0x%016llx)",
/* Initialize the receive DMA control and status register */
"==> nxge_rxdma_start_channel: "
"init rxdma control register failed (0x%08x channel %d",
return (status);
}
/*
* Load RXDMA descriptors, buffers, mailbox,
* initialise the receive DMA channels and
* enable each DMA channel.
*/
" nxge_rxdma_start_channel: "
" enable rxdma failed (0x%08x channel %d)",
return (status);
}
"==> nxge_rxdma_start_channel: enabled channel %d"));
if (isLDOMguest(nxgep)) {
/* Add interrupt handler for this channel. */
" nxge_rxdma_start_channel: "
" nxge_hio_intr_add failed (0x%08x channel %d)",
return (status);
}
}
&ent_mask);
if (rs != NPI_SUCCESS) {
"==> nxge_rxdma_start_channel: "
"init rxdma event masks failed (0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
return (NXGE_OK);
}
static nxge_status_t
{
"npi handle addr $%p acc $%p",
if (!isLDOMguest(nxgep)) {
/*
* Stop RxMAC = A.9.2.6
*/
"nxge_rxdma_stop_channel: "
"Failed to disable RxMAC"));
}
/*
* Drain IPP Port = A.9.3.6
*/
(void) nxge_ipp_drain(nxgep);
}
/* Reset RXDMA channel */
if (rs != NPI_SUCCESS) {
" nxge_rxdma_stop_channel: "
" reset rxdma failed (0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
"==> nxge_rxdma_stop_channel: reset done"));
/* Set up the interrupt event masks. */
&ent_mask);
if (rs != NPI_SUCCESS) {
"==> nxge_rxdma_stop_channel: "
"set rxdma event masks failed (0x%08x channel %d)",
return (NXGE_ERROR | rs);
}
"==> nxge_rxdma_stop_channel: event done"));
/*
* Initialize the receive DMA control and status register
*/
" nxge_rxdma_stop_channel: init rxdma"
" control register failed (0x%08x channel %d",
return (status);
}
"==> nxge_rxdma_stop_channel: control done"));
/*
* Make sure channel is disabled.
*/
" nxge_rxdma_stop_channel: "
" init enable rxdma failed (0x%08x channel %d)",
return (status);
}
if (!isLDOMguest(nxgep)) {
/*
* Enable RxMAC = A.9.2.10
*/
"nxge_rxdma_stop_channel: Rx MAC still disabled"));
}
}
RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
return (NXGE_OK);
}
{
if (rs != NPI_SUCCESS)
return (NXGE_ERROR | rs);
statsp->id_mismatch++;
/* Global fatal error encountered */
}
case 0:
}
break;
case 1:
}
break;
case 2:
}
break;
case 3:
}
break;
default:
return (NXGE_ERROR);
}
}
if (my_err) {
return (status);
}
return (NXGE_OK);
}
static nxge_status_t
{
statsp->ipp_eop_err++;
}
statsp->zcp_eop_err++;
}
if (rxport_fatal) {
" nxge_rxdma_handle_port_error: "
" fatal error on Port #%d\n",
portn));
}
}
return (status);
}
static nxge_status_t
{
int i;
"Recovering from RxDMAChannel#%d error...", channel));
/*
* Stop the dma channel waits for the stop done.
* If the stop done bit is not set, then create
* an error.
*/
if (rs != NPI_SUCCESS) {
"nxge_disable_rxdma_channel:failed"));
goto fail;
}
/* Disable interrupt */
if (rs != NPI_SUCCESS) {
"nxge_rxdma_stop_channel: "
"set rxdma event masks failed (channel %d)",
channel));
}
/* Reset RXDMA channel */
if (rs != NPI_SUCCESS) {
"nxge_rxdma_fatal_err_recover: "
" reset rxdma failed (channel %d)", channel));
goto fail;
}
rbrp->rbr_rd_index = 0;
rcrp->comp_rd_index = 0;
rcrp->comp_wt_index = 0;
#if defined(__i386)
#else
#endif
(nxge_port_rcr_size - 1);
(nxge_port_rcr_size - 1);
for (i = 0; i < rbrp->rbr_max_size; i++) {
if (ref_cnt != 1) {
if (rx_msg_p->cur_usage_cnt !=
"buf[%d]: cur_usage_cnt = %d "
"max_usage_cnt = %d\n", i,
} else {
/* Buffer can be re-posted */
rx_msg_p->cur_usage_cnt = 0;
rx_msg_p->pkt_buf_size = 0;
}
}
}
goto fail;
}
"Recovery Successful, RxDMAChannel#%d Restored",
channel));
return (NXGE_OK);
fail:
return (NXGE_ERROR | rs);
}
{
int rdc;
"Recovering from RxPort error..."));
goto fail;
NXGE_DELAY(1000);
"Could not recover "
"channel %d", rdc));
}
}
}
}
/* Reset IPP */
"nxge_rx_port_fatal_err_recover: "
"Failed to reset IPP"));
goto fail;
}
/* Reset RxMAC */
"nxge_rx_port_fatal_err_recover: "
"Failed to reset RxMAC"));
goto fail;
}
/* Re-Initialize IPP */
"nxge_rx_port_fatal_err_recover: "
"Failed to init IPP"));
goto fail;
}
/* Re-Initialize RxMAC */
"nxge_rx_port_fatal_err_recover: "
"Failed to reset RxMAC"));
goto fail;
}
/* Re-enable RxMAC */
"nxge_rx_port_fatal_err_recover: "
"Failed to enable RxMAC"));
goto fail;
}
"Recovery Successful, RxPort Restored"));
return (NXGE_OK);
fail:
return (status);
}
void
{
switch (err_id) {
else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
#if defined(__i386)
#else
#endif
break;
else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
#if defined(__i386)
"!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
#else
"!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
#endif
break;
break;
break;
}
}
static void
{
int index;
"==> nxge_rxdma_databuf_free: NULL rbr pointer"));
return;
}
"<== nxge_rxdma_databuf_free: DDI"));
return;
}
"==> nxge_rxdma_databuf_free: NULL ring info"));
return;
}
"==> nxge_rxdma_databuf_free: free chunk %d "
"kaddrp $%p chunk size %d",
}
}
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
extern void contig_mem_free(void *, size_t);
#endif
void
{
"==> nxge_free_buf: invalid kaddr $%p size to free %d",
return;
}
switch (alloc_type) {
case KMEM_ALLOC:
"==> nxge_free_buf: freeing kmem $%p size %d",
#if defined(__i386)
#else
#endif
break;
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
case CONTIG_MEM_ALLOC:
"==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
break;
#endif
default:
"<== nxge_free_buf: unsupported alloc type %d",
alloc_type));
return;
}
}