hxge_rxdma.c revision 14648441d6138f071fa68189d8b67500158ef61d
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <hxge_impl.h>
#include <hxge_rxdma.h>
/*
* Number of blocks to accumulate before re-enabling DMA
* when we get RBR empty.
*/
#define HXGE_RBR_EMPTY_THRESHOLD 64
/*
*
*/
extern uint32_t hxge_rbr_size;
extern uint32_t hxge_rcr_size;
extern uint32_t hxge_rbr_spare_size;
extern uint32_t hxge_mblks_pending;
/*
* Tunable to reduce the amount of time spent in the
* ISR doing Rx Processing.
*/
extern uint32_t hxge_max_rx_pkts;
/*
* Tunables to manage the receive buffer blocks.
*
* hxge_rx_threshold_hi: copy all buffers.
* hxge_rx_bcopy_size_type: receive buffer block size type.
* hxge_rx_threshold_lo: copy only up to tunable block size type.
*/
/*
* Static local functions.
*/
int n_init_kick);
static void hxge_freeb(p_rx_msg_t);
{
/* Reset RDC block from PEU to clear any previous state */
HXGE_DELAY(1000);
"<== hxge_init_rxdma: status 0x%x", status));
return (status);
}
}
}
"<== hxge_init_rxdma_channels: status 0x%x", status));
return (status);
}
void
{
}
{
"<== hxge_init_rxdma_channel_cntl_stat"));
if (rs != HPI_SUCCESS) {
}
return (status);
}
int n_init_kick)
{
/*
* Use configuration data composed at init time. Write to hardware the
* receive ring configurations.
*/
"==> hxge_enable_rxdma_channel: mboxp $%p($%p)",
switch (hxgep->rx_bksize_code) {
case RBR_BKSIZE_4K:
break;
case RBR_BKSIZE_8K:
break;
}
"rbr_len qlen %d pagesize code %d rcr_len %d",
"size 0 %d size 1 %d size 2 %d",
if (rs != HPI_SUCCESS) {
return (HXGE_ERROR | rs);
}
/*
* Enable the timeout and threshold.
*/
if (rs != HPI_SUCCESS) {
return (HXGE_ERROR | rs);
}
if (rs != HPI_SUCCESS) {
return (HXGE_ERROR | rs);
}
/* Enable the DMA */
if (rs != HPI_SUCCESS) {
return (HXGE_ERROR | rs);
}
/* Kick the DMA engine */
/* Clear the rbr empty bit */
return (HXGE_OK);
}
static hxge_status_t
{
/* disable the DMA */
if (rs != HPI_SUCCESS) {
"<== hxge_disable_rxdma_channel:failed (0x%x)", rs));
return (HXGE_ERROR | rs);
}
return (HXGE_OK);
}
{
"==> hxge_rxdma_channel_rcrflush"));
"<== hxge_rxdma_channel_rcrflush"));
return (status);
}
#define TO_LEFT -1
#define TO_RIGHT 1
#define NO_HINT 0xffffffff
/*ARGSUSED*/
{
int bufsize;
int found, search_done;
int max_iterations, iteration;
"==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
#if defined(__i386)
#else
#endif
switch (pktbufsz_type) {
case 0:
break;
case 1:
break;
case 2:
break;
case RCR_SINGLE_BLOCK:
bufsize = 0;
anchor_index = 0;
break;
default:
return (HXGE_ERROR);
}
anchor_index = 0;
"==> hxge_rxbuf_pp_to_vp: (found, 1 block) "
"buf_pp $%p btype %d anchor_index %d bufinfo $%p",
goto found_index;
}
"==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d",
iteration = 0;
/*
* First check if this block have been seen recently. This is indicated
* by a hint which is initialized when the first buffer of the block is
* seen. The hint is reset when the last buffer of the block has been
* processed. As three block sizes are supported, three hints are kept.
* The idea behind the hints is that once the hardware uses a block
* for a buffer of that size, it will use it exclusively for that size
* and will use it until it is exhausted. It is assumed that there
* would a single block being used for the same buffer sizes at any
* given time.
*/
/*
* check if this is the last buffer in the block If so,
* then reset the hint for the size;
*/
}
}
"==> hxge_rxbuf_pp_to_vp: (!found)"
"buf_pp $%p btype %d anchor_index %d",
/*
* This is the first buffer of the block of this size. Need to
* search the whole information array. the search algorithm
* uses a binary tree search algorithm. It assumes that the
* information is already sorted with increasing order info[0]
* < info[1] < info[2] .... < info[n-1] where n is the size of
* the information array
*/
l_index = 0;
while (search_done == B_FALSE) {
(iteration >= max_iterations))
/* read the DVMA address information and sort it */
"==> hxge_rxbuf_pp_to_vp: (searching)"
"buf_pp $%p btype %d "
"anchor_index %d chunk_size %d dvmaaddr $%p",
chunk_size, dvma_addr));
case IN_MIDDLE:
/* found */
(dvma_addr + chunk_size))
break;
case BOTH_RIGHT:
/* not found: go to the right */
break;
case BOTH_LEFT:
/* not found: go to the left */
break;
default: /* should not come here */
return (HXGE_ERROR);
}
iteration++;
}
"==> hxge_rxbuf_pp_to_vp: (search done)"
"buf_pp $%p btype %d anchor_index %d",
}
"==> hxge_rxbuf_pp_to_vp: (search failed)"
"buf_pp $%p btype %d anchor_index %d",
return (HXGE_ERROR);
}
"==> hxge_rxbuf_pp_to_vp: (FOUND1)"
"buf_pp $%p btype %d bufsize %d anchor_index %d",
/* index of the first block in this chunk */
"==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
"buf_pp $%p btype %d bufsize %d "
"anchor_index %d chunk_index %d dvma $%p",
"==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
"buf_pp $%p btype %d bufsize %d "
"anchor_index %d chunk_index %d dvma $%p "
"offset %d block_size %d",
"==> hxge_rxbuf_pp_to_vp: "
"total_index %d dvma_addr $%p "
"offset %d block_size %d "
"block_index %d ",
#if defined(__i386)
#else
offset);
#endif
"==> hxge_rxbuf_pp_to_vp: "
"total_index %d dvma_addr $%p "
"offset %d block_size %d "
"block_index %d "
"*pkt_buf_addr_p $%p",
*msg_index = total_index;
"==> hxge_rxbuf_pp_to_vp: get msg index: "
"msg_index %d bufoffset_index %d",
return (HXGE_OK);
}
/*
* used by quick sort (qsort) function
* to perform comparison
*/
static int
{
rxbuf_index_info_t *a, *b;
a = (rxbuf_index_info_t *)p1;
b = (rxbuf_index_info_t *)p2;
return (1);
return (-1);
return (0);
}
/*
*
* Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
* n = # objs in the array
* s = size of each obj (must be multiples of a word size)
* f = ptr to function to compare two objs
* returns (-1 = less than, 0 = equal, 1 = greater than
*/
void
hxge_ksort(caddr_t v, int n, int s, int (*f) ())
{
int g, i, j, ii;
unsigned int tmp;
/* No work to do */
if (v == NULL || n <= 1)
return;
/* Sanity check on arguments */
ASSERT(s > 0);
for (g = n / 2; g > 0; g /= 2) {
for (i = g; i < n; i++) {
for (j = i - g; j >= 0 &&
(*f) (v + j * s, v + (j + g) * s) == 1; j -= g) {
p1 = (unsigned *)(v + j * s);
p2 = (unsigned *)(v + (j + g) * s);
}
}
}
}
}
/*
* Initialize data structures required for rxdma
* buffer dvma->vmem address lookup
*/
/*ARGSUSED*/
static hxge_status_t
{
int index;
int max_iteration = 0, max_index = 0;
/* read the DVMA address information and sort it */
/* do init of the information array */
" hxge_rxbuf_index_info_init Sort ptrs"));
/* sort the array */
sizeof (rxbuf_index_info_t), hxge_sort_compare);
" hxge_rxbuf_index_info_init: sorted chunk %d "
" ioaddr $%p kaddr $%p size %x",
}
max_iteration = 0;
" hxge_rxbuf_index_info_init Find max iter %d",
return (HXGE_OK);
}
/*ARGSUSED*/
void
{
#ifdef HXGE_DEBUG
"\trcr entry $%p "
"\trcr entry 0x%0llx "
"\trcr entry 0x%08x "
"\trcr entry 0x%08x "
"\tvalue 0x%0llx\n"
"\tmulti = %d\n"
"\tpkt_type = 0x%x\n"
"\terror = 0x%04x\n"
"\tl2_len = %d\n"
"\tpktbufsize = %d\n"
"\tpkt_buf_addr = $%p\n"
"\tpkt_buf_addr (<< 6) = $%p\n",
bptr,
#endif
}
/*ARGSUSED*/
void
{
(void) hxge_rx_vmac_disable(hxgep);
}
void
{
(void) hxge_rxdma_stop(hxgep);
(void) hxge_uninit_rxdma_channels(hxgep);
(void) hxge_init_rxdma_channels(hxgep);
(void) hxge_rx_vmac_enable(hxgep);
}
{
int i, ndmas;
"==> hxge_rxdma_hw_mode: mode %d", enable));
"<== hxge_rxdma_mode: not initialized"));
return (HXGE_ERROR);
}
if (rx_rbr_rings == NULL) {
"<== hxge_rxdma_mode: NULL ring pointer"));
return (HXGE_ERROR);
}
"<== hxge_rxdma_mode: NULL rbr rings pointer"));
return (HXGE_ERROR);
}
if (!ndmas) {
"<== hxge_rxdma_mode: no channel"));
return (HXGE_ERROR);
}
"==> hxge_rxdma_mode (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
continue;
}
if (enable) {
"==> hxge_rxdma_hw_mode: channel %d (enable)",
channel));
} else {
"==> hxge_rxdma_hw_mode: channel %d (disable)",
channel));
}
}
"<== hxge_rxdma_hw_mode: status 0x%x", status));
return (status);
}
int
{
int i, ndmas;
"==> hxge_rxdma_get_ring_index: channel %d", channel));
if (rx_rbr_rings == NULL) {
"<== hxge_rxdma_get_ring_index: NULL ring pointer"));
return (-1);
}
if (!ndmas) {
"<== hxge_rxdma_get_ring_index: no channel"));
return (-1);
}
"==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> hxge_rxdma_get_rbr_ring: "
"channel %d (index %d) "
return (i);
}
}
"<== hxge_rxdma_get_rbr_ring_index: not found"));
return (-1);
}
/*
* Static functions start here.
*/
static p_rx_msg_t
{
"Allocation of a rx msg failed."));
goto hxge_allocb_exit;
}
if (dmabuf_p) {
} else {
"Allocation of a receive page failed."));
goto hxge_allocb_fail1;
}
}
goto hxge_allocb_fail2;
}
goto hxge_allocb_exit;
if (!hxge_mp->use_buf_pool) {
}
return (hxge_mp);
}
{
goto hxge_dupb_exit;
}
return (mp);
}
{
goto hxge_dupb_bcopy_exit;
}
return (mp);
}
void
{
int i;
/* Reuse this buffer */
rx_msg_p->cur_usage_cnt = 0;
rx_msg_p->max_usage_cnt = 0;
rx_msg_p->pkt_buf_size = 0;
if (rx_rbr_p->rbr_use_bcopy) {
}
/*
* Get the rbr header pointer and its offset index.
*/
/*
* Accumulate some buffers in the ring before re-enabling the
* DMA channel, if rbr empty was signaled.
*/
if (!rx_rbr_p->rbr_is_empty) {
} else {
rx_rbr_p->accumulate++;
rx_rbr_p->accumulate = 0;
/*
* Complete the processing for the RBR Empty by:
* 0) kicking back HXGE_RBR_EMPTY_THRESHOLD
* packets.
* 1) Disable the RX vmac.
* 2) Re-enable the affected DMA channel.
* 3) Re-enable the RX vmac.
*/
/*
* Disable the RX VMAC, but setting the framelength
* to 0, since there is a hardware bug when disabling
* the vmac.
*/
(void) hpi_vmac_rx_set_framesize(
if (hpi_status != HPI_SUCCESS) {
"hxge(%d): channel(%d) is empty.",
}
}
for (i = 0; i < 1024; i++) {
}
/*
* Re-enable the RX VMAC.
*/
(void) hpi_vmac_rx_set_framesize(
}
}
"<== hxge_post_page (channel %d post_next_index %d)",
}
void
{
int ref_cnt;
"hxge_freeb:rx_msg_p = $%p (block pending %d)",
return;
/*
* This is to prevent posting activities while we are recovering
* from fatal errors. This should not be a performance drag since
* ref_cnt != 0 most times.
*/
/*
* First we need to get the free state, then
* atomic decrement the reference count to prevent
* the race condition with the interrupt thread that
* is processing a loaned up buffer block.
*/
if (!ref_cnt) {
"will free: rx_msg_p = $%p (block pending %d)",
if (!rx_msg_p->use_buf_pool) {
}
/*
* Decrement the receive buffer ring's reference
* count, too.
*/
/*
* Free the receive buffer ring, iff
* 1. all the receive buffers have been freed
* 2. and we are in the proper state (that is,
* we are not UNMAPPING).
*/
if (ring->rbr_ref_cnt == 0 &&
/* post_lock has been destroyed already */
return;
}
}
/*
* Repost buffer.
*/
"hxge_freeb: post page $%p:", rx_msg_p));
}
}
{
return (DDI_INTR_UNCLAIMED);
}
}
/*
* If the interface is not started, just swallow the interrupt
* for the logical device and don't rearm it.
*/
return (DDI_INTR_CLAIMED);
/*
* This interrupt handler is for a specific receive dma channel.
*/
/*
* Get the control and status for this channel.
*/
"cs 0x%016llx rcrto 0x%x rcrthres %x",
/* error events. */
}
/*
* Enable the mailbox update interrupt if we want to use mailbox. We
* probably don't need to use mailbox as it only saves us one pio read.
* Also write 1 to rcrthres and rcrto to clear these two edge triggered
* bits.
*/
/*
* Rearm this logical group if this is a single device group.
*/
}
"<== hxge_rx_intr: serviced %d", serviced));
return (serviced);
}
static void
{
"<== hxge_rx_pkts_vring: no mp"));
return;
}
#ifdef HXGE_DEBUG
"==> hxge_rx_pkts_vring:calling mac_rx (NEMO) "
"LEN %d mp $%p mp->b_next $%p rcrp $%p "
"mac_handle $%p",
"==> hxge_rx_pkts_vring: dump packets "
"(mp $%p b_rptr $%p b_wptr $%p):\n %s",
"==> hxge_rx_pkts_vring: dump b_cont packets "
"(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s",
}
"==> hxge_rx_pkts_vring: dump next packets "
"(b_rptr $%p): %s",
}
#endif
"==> hxge_rx_pkts_vring: send packet to stack"));
}
/*ARGSUSED*/
mblk_t *
{
return (NULL);
}
"channel %d, and rcr channel %d not matched.",
return (NULL);
}
"==> hxge_rx_pkts: START: rcr channel %d "
"head_p $%p head_pp $%p index %d ",
if (!qlen) {
"<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)",
return (NULL);
}
/*
* Number of packets queued (The jumbo or multi packet will be counted
* as only one paccket and it may take up more than one completion
* entry).
*/
if (rcr_tail_index >= rcr_head_index) {
} else {
/* rcr_tail has wrapped around */
}
"Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n",
}
while (qlen_hw) {
#ifdef HXGE_DEBUG
#endif
/*
* Process one completion ring entry.
*/
invalid_rcr_entry = 0;
if (invalid_rcr_entry != 0) {
"Channel %d could only read 0x%x packets, "
break;
}
/*
* message chaining modes (nemo msg chaining)
*/
if (nmp) {
}
}
"==> hxge_rx_pkts: loop: rcr channel %d "
"before updating: multi %d "
"nrcr_read %d "
"npk read %d "
"head_pp $%p index %d ",
if (!multi) {
qlen_hw--;
npkt_read++;
}
/*
* Update the next read entry.
*/
nrcr_read++;
"<== hxge_rx_pkts: (SAM, process one packet) "
"nrcr_read %d", nrcr_read));
"==> hxge_rx_pkts: loop: rcr channel %d "
"multi %d nrcr_read %d npk read %d head_pp $%p index %d ",
}
/* Adjust the mailbox queue length for a hardware bug workaround */
if (rcr_p->intr_timeout)
}
"==> hxge_rx_pkts: EXIT: rcr channel %d "
"head_pp $%p index %016llx ",
/*
* Update RCR buffer pointer read and number of packets read.
*/
return (head_mp);
}
#define RCR_ENTRY_PATTERN 0x5a5a6b6b7c7c8d8dULL
#define NO_PORT_BIT 0x20
/*ARGSUSED*/
void
{
/* Verify the content of the rcr_entry for a hardware bug workaround */
*invalid_rcr_entry = 1;
"Channel %d invalid RCR entry 0x%llx found, returning\n",
return;
}
/*
* Hardware does not strip the CRC due bug ID 11451 where
* the hardware mis handles minimum size packets.
*/
#if defined(__i386)
#else
#endif
"==> hxge_receive_packet: entryp $%p entry 0x%0llx "
"pkt_buf_addr_pp $%p l2_len %d multi %d "
"error_type 0x%x pktbufsz_type %d ",
"==> hxge_receive_packet: entryp $%p entry 0x%0llx "
"pkt_buf_addr_pp $%p l2_len %d multi %d "
"error_type 0x%x ", rcr_desc_rd_head_p,
"==> (rbr) hxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
/* get the stats ptr */
if (!l2_len) {
"<== hxge_receive_packet: failed: l2 length is 0."));
return;
}
/* shift 6 bits to get the full io address */
#if defined(__i386)
#else
#endif
"==> (rbr) hxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
if (first_entry) {
"==> hxge_receive_packet: first entry 0x%016llx "
"pkt_buf_addr_pp $%p l2_len %d hdr %d",
}
"==> (rbr 1) hxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
/*
* Packet buffer address in the completion entry points to the starting
* buffer address (offset 0). Use the starting buffer address to locate
* the corresponding kernel address.
*/
&buf_offset, &msg_index);
"==> (rbr 2) hxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"<== hxge_receive_packet: found vaddr failed %d", status));
return;
}
"==> (rbr 3) hxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
"==> hxge_receive_packet: FATAL msg_index (%d) "
"should be smaller than tnblocks (%d)\n",
return;
}
"==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
"full pkt_buf_addr_pp $%p l2_len %d",
switch (pktbufsz_type) {
case RCR_PKTBUFSZ_0:
"==> hxge_receive_packet: 0 buf %d", bsize));
break;
case RCR_PKTBUFSZ_1:
"==> hxge_receive_packet: 1 buf %d", bsize));
break;
case RCR_PKTBUFSZ_2:
"==> hxge_receive_packet: 2 buf %d", bsize));
break;
case RCR_SINGLE_BLOCK:
"==> hxge_receive_packet: single %d", bsize));
break;
default:
return;
}
"==> hxge_receive_packet: after first dump:usage count"));
if (rx_msg_p->cur_usage_cnt == 0) {
if (rx_rbr_p->rbr_use_bcopy) {
if (rx_rbr_p->rbr_consumed >
}
}
"==> hxge_receive_packet: buf %d (new block) ", bsize));
if (pktbufsz_type == RCR_SINGLE_BLOCK) {
"==> hxge_receive_packet: buf %d (single block) ",
bsize));
/*
* Buffer can be reused once the free function is
* called.
*/
} else {
}
}
} else {
}
}
if (rx_msg_p->rx_use_bcopy) {
if (buffer_free == B_TRUE) {
}
return;
}
"msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
if (error_type) {
/* Update error stats */
if (error_type & RCR_CTRL_FIFO_DED) {
" hxge_receive_packet: "
" channel %d RCR ctrl_fifo_ded error", channel));
} else if (error_type & RCR_DATA_FIFO_DED) {
" hxge_receive_packet: channel %d"
" RCR data_fifo_ded error", channel));
}
/*
* Update and repost buffer block if max usage count is
* reached.
*/
if (error_send_up == B_FALSE) {
if (buffer_free == B_TRUE) {
}
return;
}
}
"==> hxge_receive_packet: DMA sync second "));
if (first_entry) {
}
if (!rx_msg_p->rx_use_bcopy) {
/*
* For loaned up buffers, the driver reference count
* will be incremented first and then the free state.
*/
if (first_entry) {
} else {
- skip_len];
}
} else {
} else {
}
}
}
} else {
if (first_entry) {
} else {
}
}
if (first_entry)
else
"==> hxge_receive_packet after dupb: "
"rbr consumed %d "
"pktbufsz_type %d "
"nmp $%p rptr $%p wptr $%p "
"buf_offset %d bzise %d l2_len %d skip_len %d",
} else {
if (buffer_free == B_TRUE) {
}
return;
}
if (buffer_free == B_TRUE) {
}
/*
* ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a
* packet is not fragmented and no error bit is set, then L4 checksum
* is OK.
*/
if (first_entry) {
rdc_stats->jumbo_pkts++;
} else {
/*
* Add the current portion of the packet to the kstats.
* The current portion of the packet is calculated by using
* length of the packet and the previously received portion.
*/
}
} else {
}
if (is_valid) {
if (first_entry) {
} else {
}
}
/*
* Update stats and hardware checksuming.
*/
HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
"is_valid 0x%x multi %d error %d",
}
}
"==> hxge_receive_packet: *mp 0x%016llx", *mp));
"multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
}
static void
{
/*
* Wait for the channel to be quiet
*/
/*
* Post page will accumulate some buffers before re-enabling
* the DMA channel.
*/
}
/*ARGSUSED*/
static hxge_status_t
{
/* Clear the interrupts */
"==> hxge_rx_err_evnts(channel %d): "
"fatal error: rx_rbr_timeout", channel));
}
(void) hpi_rxdma_ring_perr_stat_get(handle,
}
rdc_stats->rcr_sha_par++;
"==> hxge_rx_err_evnts(channel %d): "
"fatal error: rcr_shadow_par_err", channel));
}
rdc_stats->rbr_pre_par++;
"==> hxge_rx_err_evnts(channel %d): "
"fatal error: rbr_prefetch_par_err", channel));
}
"==> hxge_rx_err_evnts(channel %d): "
"fatal error: rbr_pre_empty", channel));
}
"==> hxge_rx_err_evnts(channel %d): "
"fatal error: peu_resp_err", channel));
}
}
}
"==> hxge_rx_err_evnts(channel %d): "
"fatal error: rcr_shadow_full", channel));
}
"==> hxge_rx_err_evnts(channel %d): "
"fatal error: rcrfull error", channel));
}
}
"==> hxge_rx_err_evnts(channel %d): "
"fatal error: rbr_full error", channel));
}
if (rxchan_fatal) {
" hxge_rx_err_evnts: fatal error on Channel #%d\n",
channel));
}
}
return (status);
}
static hxge_status_t
{
int i, ndmas;
if (!dma_buf_poolp->buf_allocated ||
"<== hxge_map_rxdma: buf not allocated"));
return (HXGE_ERROR);
}
if (!ndmas) {
"<== hxge_map_rxdma: no dma allocated"));
return (HXGE_ERROR);
}
/*
* Timeout should be set based on the system clock divider.
* The following timeout value of 1 assumes that the
* granularity (1000) is 3 microseconds running at 300MHz.
*/
/*
* Map descriptors from the buffer polls for each dam channel.
*/
for (i = 0; i < ndmas; i++) {
/*
* Set up and prepare buffer blocks, descriptors and mailbox.
*/
(p_hxge_dma_common_t *)&dma_buf_p[i],
(p_rx_rbr_ring_t *)&rbr_rings[i],
num_chunks[i],
(p_hxge_dma_common_t *)&dma_rbr_cntl_p[i],
(p_hxge_dma_common_t *)&dma_rcr_cntl_p[i],
(p_hxge_dma_common_t *)&dma_mbox_cntl_p[i],
(p_rx_rcr_ring_t *)&rcr_rings[i],
(p_rx_mbox_t *)&rx_mbox_p[i]);
goto hxge_map_rxdma_fail1;
}
}
goto hxge_map_rxdma_exit;
"==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)",
i--;
for (; i >= 0; i--) {
}
return (status);
}
static void
{
int i, ndmas;
if (!dma_buf_poolp->buf_allocated ||
"<== hxge_unmap_rxdma: NULL buf pointers"));
return;
}
"<== hxge_unmap_rxdma: NULL pointers"));
return;
}
if (!ndmas) {
"<== hxge_unmap_rxdma: no channel"));
return;
}
"==> hxge_unmap_rxdma (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> hxge_unmap_rxdma (ndmas %d) channel %d",
(p_rx_rbr_ring_t)rbr_rings[i],
(p_rx_rcr_ring_t)rcr_rings[i],
(p_rx_mbox_t)rx_mbox_p[i]);
}
}
{
/*
* Set up and prepare buffer blocks, descriptors and mailbox.
*/
"==> hxge_map_rxdma_channel (channel %d)", channel));
/*
* Receive buffer blocks
*/
"==> hxge_map_rxdma_channel (channel %d): "
}
/*
* Receive block ring, completion ring and mailbox.
*/
"==> hxge_map_rxdma_channel (channel %d): "
}
/* Free rbr, rcr */
/* Free buffer blocks */
"==> hxge_map_rxdma_channel: free rx buffers"
"(hxgep 0x%x status 0x%x channel %d)",
status = HXGE_ERROR;
"<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)",
return (status);
}
/*ARGSUSED*/
static void
{
"==> hxge_unmap_rxdma_channel (channel %d)", channel));
/*
* unmap receive block ring, completion ring and mailbox.
*/
/* unmap buffer blocks */
}
/*ARGSUSED*/
static hxge_status_t
{
int i;
"==> hxge_map_rxdma_channel_cfg_ring"));
/*
* Map in the receive block ring
*/
/*
* Zero out buffer block ring descriptors.
*/
/* XXXX: how to choose packet buffer sizes */
/*
* For each buffer block, enter receive block address to the ring.
*/
"==> hxge_map_rxdma_channel_cfg_ring: channel %d "
rx_msg_p = rx_msg_ring[i];
*rbr_vaddrp++ = bkaddr;
}
rbrp->rbr_rd_index = 0;
rbrp->rbr_consumed = 0;
/*
* Do bcopy on packets greater than bcopy size once the lo threshold is
* reached. This lo threshold should be less than the hi threshold.
*
* Do bcopy on every packet once the hi threshold is reached.
*/
if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) {
/* default it to use hi */
}
if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) {
}
switch (hxge_rx_threshold_hi) {
default:
case HXGE_RX_COPY_NONE:
/* Do not do bcopy at all */
break;
case HXGE_RX_COPY_1:
case HXGE_RX_COPY_2:
case HXGE_RX_COPY_3:
case HXGE_RX_COPY_4:
case HXGE_RX_COPY_5:
case HXGE_RX_COPY_6:
case HXGE_RX_COPY_7:
break;
case HXGE_RX_COPY_ALL:
rbrp->rbr_threshold_hi = 0;
break;
}
switch (hxge_rx_threshold_lo) {
default:
case HXGE_RX_COPY_NONE:
/* Do not do bcopy at all */
if (rbrp->rbr_use_bcopy) {
}
break;
case HXGE_RX_COPY_1:
case HXGE_RX_COPY_2:
case HXGE_RX_COPY_3:
case HXGE_RX_COPY_4:
case HXGE_RX_COPY_5:
case HXGE_RX_COPY_6:
case HXGE_RX_COPY_7:
break;
case HXGE_RX_COPY_ALL:
rbrp->rbr_threshold_lo = 0;
break;
}
"hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d "
"rbrp->rbr_bufsize_type %d rbb_threshold_hi %d "
"rbb_threshold_lo %d",
/* Map in the receive completion ring */
sizeof (rcr_entry_t));
rcrp->comp_rd_index = 0;
rcrp->comp_wt_index = 0;
#if defined(__i386)
#else
#endif
(hxge_port_rcr_size - 1);
(hxge_port_rcr_size - 1);
"==> hxge_map_rxdma_channel_cfg_ring: channel %d "
"rbr_vaddrp $%p rcr_desc_rd_head_p $%p "
"rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p "
"rcr_desc_rd_last_pp $%p ",
rcrp->rcr_desc_last_pp));
/*
* Zero out buffer block ring descriptors.
*/
rcrp->sw_priv_hdr_len = 0;
/*
* Timeout should be set based on the system clock divider. The
* following timeout value of 1 assumes that the granularity (1000) is
* 3 microseconds running at 300MHz.
*/
/* Map in the mailbox */
"==> hxge_map_rxdma_channel_cfg_ring: "
"channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
"==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p "
"cfg1 0x%016llx cfig2 0x%016llx",
"<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
"<== hxge_unmap_rxdma_channel_cfg_ring"));
}
static hxge_status_t
{
int i, j, index;
"==> hxge_map_rxdma_channel_buf_ring: channel %d", channel));
" hxge_map_rxdma_channel_buf_ring: channel %d to map %d "
nmsgs = 0;
for (i = 0; i < num_chunks; i++, tmp_bufp++) {
"==> hxge_map_rxdma_channel_buf_ring: channel %d "
"bufp 0x%016llx nblocks %d nmsgs %d",
}
if (!nmsgs) {
"<== hxge_map_rxdma_channel_buf_ring: channel %d "
"no msg blocks", channel));
status = HXGE_ERROR;
}
KM_SLEEP);
(void *) hxgep->interrupt_cookie);
(void *) hxgep->interrupt_cookie);
/*
* Buffer sizes suggested by NIU architect. 256, 512 and 2K.
*/
} else {
}
"==> hxge_map_rxdma_channel_buf_ring: channel %d "
"actual rbr max %d rbb_max %d nmsgs %d "
"rbrp->block_size %d default_block_size %d "
"(config hxge_rbr_size %d hxge_rbr_spare_size %d)",
/*
* Map in buffers from the buffer pool.
* Note that num_blocks is the num_chunks. For Sparc, there is likely
* only one chunk. For x86, there will be many chunks.
* Loop over chunks.
*/
index = 0;
#if defined(__i386)
#else
#endif
#if defined(__i386)
#else
#endif
" hxge_map_rxdma_channel_buf_ring: map channel %d "
"chunk %d nblocks %d chunk_size %x block_size 0x%x "
"dma_bufp $%p dvma_addr $%p", channel, i,
/* loop over blocks within a chunk */
for (j = 0; j < nblocks; j++) {
"allocb failed (index %d i %d j %d)",
index, i, j));
}
/*
* Too much output
* HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
* "index %d j %d rx_msg_p $%p mblk %p",
* index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
*/
rbrp->rbr_ref_cnt++;
index++;
}
}
if (i < rbrp->num_blocks) {
}
"hxge_map_rxdma_channel_buf_ring: done buf init "
"channel %d done buf info init", channel));
/*
* Finally, permit hxge_freeb() to call hxge_post_page().
*/
" hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
index--;
}
}
status = HXGE_ERROR;
"<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
"==> hxge_unmap_rxdma_channel_buf_ring"));
"<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
return;
}
"<== hxge_unmap_rxdma_channel_buf_ring: "
return;
}
" hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
rx_msg_p = rx_msg_ring[i];
" hxge_unmap_rxdma_channel_buf_ring: "
"rx_msg_p $%p", rx_msg_p));
rx_msg_ring[i] = NULL;
}
}
/*
* We no longer may use the mutex <post_lock>. By setting
* <rbr_state> to anything but POSTING, we prevent
* hxge_post_page() from accessing a dead mutex.
*/
if (rbr_p->rbr_ref_cnt == 0) {
/* This is the normal state of affairs. */
} else {
/*
* Some of our buffers are still being used.
* Therefore, tell hxge_freeb() this ring is
* unmapped, so it may free <rbr_p> for us.
*/
"unmap_rxdma_buf_ring: %d %s outstanding.",
}
"<== hxge_unmap_rxdma_channel_buf_ring"));
}
static hxge_status_t
{
/*
* Load the sharable parameters by writing to the function zero control
* registers. These FZC registers should be initialized only once for
* the entire chip.
*/
(void) hxge_init_fzc_rx_common(hxgep);
return (status);
}
static hxge_status_t
{
int i, ndmas;
"<== hxge_rxdma_hw_start: NULL ring pointers"));
return (HXGE_ERROR);
}
if (ndmas == 0) {
"<== hxge_rxdma_hw_start: no dma channel allocated"));
return (HXGE_ERROR);
}
"==> hxge_rxdma_hw_start (ndmas %d)", ndmas));
/*
* Scrub the RDC Rx DMA Prefetch Buffer Command.
*/
for (i = 0; i < 128; i++) {
}
/*
* Scrub Rx DMA Shadow Tail Command.
*/
for (i = 0; i < 64; i++) {
}
/*
* Scrub Rx DMA Control Fifo Command.
*/
for (i = 0; i < 512; i++) {
}
/*
* Scrub Rx DMA Data Fifo Command.
*/
for (i = 0; i < 1536; i++) {
}
/*
* Reset the FIFO Error Stat.
*/
/* Set the error mask to receive interrupts */
if (rx_mbox_areas_p) {
}
for (i = 0; i < ndmas; i++) {
"==> hxge_rxdma_hw_start (ndmas %d) channel %d",
(p_rx_rbr_ring_t)rbr_rings[i],
(p_rx_rcr_ring_t)rcr_rings[i],
}
}
"rx_rbr_rings 0x%016llx rings 0x%016llx",
goto hxge_rxdma_hw_start_exit;
"==> hxge_rxdma_hw_start: disable "
for (; i >= 0; i--) {
}
"==> hxge_rxdma_hw_start: (status 0x%x)", status));
return (status);
}
static void
{
int i, ndmas;
"<== hxge_rxdma_hw_stop: NULL ring pointers"));
return;
}
if (!ndmas) {
"<== hxge_rxdma_hw_stop: no dma channel allocated"));
return;
}
"==> hxge_rxdma_hw_stop (ndmas %d)", ndmas));
for (i = 0; i < ndmas; i++) {
"==> hxge_rxdma_hw_stop (ndmas %d) channel %d",
}
"rx_rbr_rings 0x%016llx rings 0x%016llx",
}
static hxge_status_t
int n_init_kick)
{
"hpi handle addr $%p acc $%p",
/* Reset RXDMA channel */
if (rs != HPI_SUCCESS) {
"==> hxge_rxdma_start_channel: "
"reset rxdma failed (0x%08x channel %d)",
return (HXGE_ERROR | rs);
}
"==> hxge_rxdma_start_channel: reset done: channel %d", channel));
/*
* Initialize the RXDMA channel specific FZC control configurations.
* These FZC registers are pertaining to each RX channel (logical
* pages).
*/
"==> hxge_rxdma_start_channel: "
"init fzc rxdma failed (0x%08x channel %d)",
return (status);
}
"==> hxge_rxdma_start_channel: fzc done"));
/*
* Zero out the shadow and prefetch ram.
*/
"==> hxge_rxdma_start_channel: ram done"));
/* Set up the interrupt event masks. */
if (rs != HPI_SUCCESS) {
"==> hxge_rxdma_start_channel: "
"init rxdma event masks failed (0x%08x channel %d)",
return (HXGE_ERROR | rs);
}
"event done: channel %d (mask 0x%016llx)",
/*
* Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA
* channels and enable each DMA channel.
*/
" hxge_rxdma_start_channel: "
" init enable rxdma failed (0x%08x channel %d)",
return (status);
}
/*
* Initialize the receive DMA control and status register
* Note that rdc_stat HAS to be set after RBR and RCR rings are set
*/
"==> hxge_rxdma_start_channel: "
"init rxdma control register failed (0x%08x channel %d",
return (status);
}
"==> hxge_rxdma_start_channel: enable done"));
return (HXGE_OK);
}
static hxge_status_t
{
"hpi handle addr $%p acc $%p",
/* Reset RXDMA channel */
if (rs != HPI_SUCCESS) {
" hxge_rxdma_stop_channel: "
" reset rxdma failed (0x%08x channel %d)",
return (HXGE_ERROR | rs);
}
"==> hxge_rxdma_stop_channel: reset done"));
/* Set up the interrupt event masks. */
if (rs != HPI_SUCCESS) {
"==> hxge_rxdma_stop_channel: "
"set rxdma event masks failed (0x%08x channel %d)",
return (HXGE_ERROR | rs);
}
"==> hxge_rxdma_stop_channel: event done"));
/* Initialize the receive DMA control and status register */
" hxge_rxdma_stop_channel: init rxdma"
" control register failed (0x%08x channel %d",
return (status);
}
"==> hxge_rxdma_stop_channel: control done"));
/* disable dma channel */
" hxge_rxdma_stop_channel: "
" init enable rxdma failed (0x%08x channel %d)",
return (status);
}
"==> hxge_rxdma_stop_channel: disable done"));
return (HXGE_OK);
}
{
/* Clear the int_dbg register in case it is an injected err */
/* Get the error status and clear the register */
statsp->ctrl_fifo_sec++;
"==> hxge_rxdma_handle_sys_errors: "
"rx_ctrl_fifo_sec"));
}
/* Global fatal error encountered */
statsp->ctrl_fifo_ded++;
"==> hxge_rxdma_handle_sys_errors: "
"fatal error: rx_ctrl_fifo_ded error"));
}
statsp->data_fifo_sec++;
"==> hxge_rxdma_handle_sys_errors: "
"rx_data_fifo_sec"));
}
/* Global fatal error encountered */
statsp->data_fifo_ded++;
"==> hxge_rxdma_handle_sys_errors: "
"fatal error: rx_data_fifo_ded error"));
}
" hxge_rxdma_handle_sys_errors: fatal error\n"));
}
}
return (HXGE_OK);
}
static hxge_status_t
{
int ring_idx;
int i;
int n_init_kick = 0;
"Recovering from RxDMAChannel#%d error...", channel));
/*
* Stop the dma channel waits for the stop done. If the stop done bit
* is not set, then create an error.
*/
if (rs != HPI_SUCCESS) {
"hxge_disable_rxdma_channel:failed"));
goto fail;
}
/* Disable interrupt */
if (rs != HPI_SUCCESS) {
"Set rxdma event masks failed (channel %d)", channel));
}
/* Reset RXDMA channel */
if (rs != HPI_SUCCESS) {
"Reset rxdma failed (channel %d)", channel));
goto fail;
}
rbrp->rbr_rd_index = 0;
rcrp->comp_rd_index = 0;
rcrp->comp_wt_index = 0;
#if defined(__i386)
#else
#endif
(hxge_port_rcr_size - 1);
(hxge_port_rcr_size - 1);
rbrp->rbr_max_size));
/* Count the number of buffers owned by the hardware at this moment */
for (i = 0; i < rbrp->rbr_max_size; i++) {
n_init_kick++;
}
}
/*
* This is error recover! Some buffers are owned by the hardware and
* the rest are owned by the apps. We should only kick in those
* owned by the hardware initially. The apps will post theirs
* eventually.
*/
goto fail;
}
/*
* The DMA channel may disable itself automatically.
* The following is a work-around.
*/
if (rs != HPI_SUCCESS) {
"hpi_rxdma_cfg_rdc_enable (channel %d)", channel));
}
"Recovery Successful, RxDMAChannel#%d Restored", channel));
return (HXGE_OK);
fail:
return (HXGE_ERROR | rs);
}
static hxge_status_t
{
int ndmas;
int i;
/* Reset RDC block from PEU for this fatal error */
/* Disable RxMAC */
goto fail;
HXGE_DELAY(1000);
/* Restore any common settings after PEU reset */
goto fail;
for (i = 0; i < ndmas; i++) {
"Could not recover channel %d", channel));
}
}
/* Reset RxMAC */
"hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
goto fail;
}
/* Re-Initialize RxMAC */
"hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
goto fail;
}
/* Re-enable RxMAC */
"hxge_rx_port_fatal_err_recover: Failed to enable RxMAC"));
goto fail;
}
/* Reset the error mask since PEU reset cleared it */
"Recovery Successful, RxPort Restored"));
return (HXGE_OK);
fail:
return (status);
}