/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <hxge_impl.h>
#include <hxge_txdma.h>
extern uint32_t hxge_tx_ring_size;
extern uint32_t hxge_bcopy_thresh;
extern uint32_t hxge_dvma_thresh;
extern uint32_t hxge_dma_stream_thresh;
extern dma_method_t hxge_force_dma;
/* Device register access attributes for PIO. */
/* Device descriptor access attributes for DMA. */
/* Device buffer access attributes for DMA. */
extern ddi_dma_attr_t hxge_desc_dma_attr;
extern ddi_dma_attr_t hxge_tx_dma_attr;
{
/*
* Reset TDC block from PEU to cleanup any unknown configuration.
* This may be resulted from previous reboot.
*/
HXGE_DELAY(1000);
"<== hxge_init_txdma_channels: status 0x%x", status));
return (status);
}
return (status);
}
"<== hxge_init_txdma_channels: status 0x%x", status));
return (HXGE_OK);
}
void
{
}
void
{
}
{
} else {
}
if (rs != HPI_SUCCESS) {
}
/*
* Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
* overflow fatal error if tail is not set to 0 after reset!
*/
return (status);
}
{
"<== hxge_init_txdma_channel_event_mask"));
/*
* Mask off tx_rng_oflow since it is a false alarm. The driver
* ensures not over flowing the hardware and check the hardware
* status.
*/
if (rs != HPI_SUCCESS) {
}
"==> hxge_init_txdma_channel_event_mask"));
return (status);
}
{
/*
* Use configuration data composed at init time. Write to hardware the
* transmit ring configurations.
*/
if (rs != HPI_SUCCESS) {
return (HXGE_ERROR | rs);
}
/* Write to hardware the mailbox */
if (rs != HPI_SUCCESS) {
return (HXGE_ERROR | rs);
}
/* Start the DMA engine. */
if (rs != HPI_SUCCESS) {
return (HXGE_ERROR | rs);
}
return (status);
}
void
{
/*
* Caller should zero out the headers first.
*/
if (fill_len) {
"==> hxge_fill_tx_hdr: pkt_len %d npads %d",
goto fill_tx_header_done;
}
/*
* mp is the original data packet (does not include the Neptune
* transmit header).
*/
"==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d",
"==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x",
tmp = 1ull;
if (*(hdrs_buf + sizeof (struct ether_header)) ==
LLC_SNAP_SAP) {
sizeof (struct ether_header) + 6)));
"==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x",
eth_type));
} else {
goto fill_tx_header_done;
}
} else if (eth_type == VLAN_ETHERTYPE) {
tmp = 1ull;
hdrs_buf)->ether_type);
"==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx",
}
if (!is_vlan) {
eth_hdr_size = sizeof (struct ether_header);
} else {
eth_hdr_size = sizeof (struct ether_vlan_header);
}
switch (eth_type) {
case ETHERTYPE_IP:
mblk_len -= eth_hdr_size;
ip_buf += eth_hdr_size;
} else {
}
}
hdrs_size = 0;
if (mblk_len >=
}
ip_buf += eth_hdr_size;
}
" iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
break;
case ETHERTYPE_IPV6:
hdrs_size = 0;
}
ip_buf += eth_hdr_size;
tmp = 1ull;
/* byte 6 is the next header protocol */
" iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
break;
default:
goto fill_tx_header_done;
}
switch (ipproto) {
case IPPROTO_TCP:
"==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
if (l4_cksum) {
tmp = 1ull;
"==> hxge_tx_pkt_hdr_init: TCP CKSUM"
}
break;
case IPPROTO_UDP:
if (l4_cksum) {
tmp = 0x2ull;
}
"==> hxge_tx_pkt_hdr_init: UDP value 0x%llx",
break;
default:
goto fill_tx_header_done;
}
"==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx",
}
/*ARGSUSED*/
{
"<== hxge_tx_pkt_header_reserve: allocb failed"));
return (NULL);
}
"==> hxge_tx_pkt_header_reserve: get new mp"));
"==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p",
"<== hxge_tx_pkt_header_reserve: use new mp"));
return (newmp);
}
int
{
"==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d",
nmblks = 0;
pkt_len = 0;
*tot_xfer_len_p = 0;
while (nmp) {
"len %d pkt_len %d nmblks %d tot_xfer_len %d",
if (len <= 0) {
"==> hxge_tx_pkt_nmblocks:"
continue;
}
*tot_xfer_len_p += len;
"len %d pkt_len %d nmblks %d tot_xfer_len %d",
if (len < hxge_bcopy_thresh) {
"==> hxge_tx_pkt_nmblocks: "
"len %d (< thresh) pkt_len %d nmblks %d",
if (pkt_len == 0)
nmblks++;
if (pkt_len >= hxge_bcopy_thresh) {
pkt_len = 0;
len = 0;
}
} else {
"==> hxge_tx_pkt_nmblocks: "
"len %d (> thresh) pkt_len %d nmblks %d",
pkt_len = 0;
nmblks++;
/*
* Hardware limits the transfer length to 4K. If len is
* more than 4K, we need to break it up to at most 2
* more blocks.
*/
if (len > TX_MAX_TRANSFER_LENGTH) {
"==> hxge_tx_pkt_nmblocks: "
"len %d pkt_len %d nmblks %d nsegs %d",
nsegs = 1;
++nsegs;
}
do {
return (0);
}
nmblks++;
if (--nsegs) {
}
} while (nsegs);
}
}
/*
* Hardware limits the transmit gather pointers to 15.
*/
"==> hxge_tx_pkt_nmblocks: pull msg - "
"len %d pkt_len %d nmblks %d",
/* Pull all message blocks from b_cont */
return (0);
}
pkt_len = 0;
}
}
"<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
"nmblks %d len %d tot_xfer_len %d",
return (nmblks);
}
{
int rc;
(nmblks != 0));
"==> hxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
if (!status) {
}
/*
* tdc_byte_cnt reg can be used to get bytes transmitted. It
* includes padding too in case of runt packets.
*/
"==> hxge_txdma_reclaim: tdc %d tx_rd_index %d "
"tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ",
/*
* Read the hardware maintained transmit head and wrap around
* bit.
*/
"==> hxge_txdma_reclaim: "
"tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
/*
* For debug only. This can be used to verify the qlen and make
* sure the hardware is wrapping the Tdr correctly.
*/
"==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d",
if (head_index == tail_index) {
"==> hxge_txdma_reclaim: EMPTY"));
return (B_TRUE);
}
"==> hxge_txdma_reclaim: Checking if ring full"));
tail_wrap)) {
"==> hxge_txdma_reclaim: full"));
return (B_FALSE);
}
}
"==> hxge_txdma_reclaim: tx_rd_index and head_index"));
/* XXXX: limit the # of reclaims */
tx_desc_pp = &r_tx_desc;
while ((tx_rd_index != head_index) &&
(tx_ring_p->descs_pending != 0)) {
"==> hxge_txdma_reclaim: Checking if pending"));
"==> hxge_txdma_reclaim: descs_pending %d ",
"==> hxge_txdma_reclaim: "
"(tx_rd_index %d head_index %d (tx_desc_p $%p)",
"==> hxge_txdma_reclaim: "
"(tx_rd_index %d head_index %d "
"tx_desc_p $%p (desc value 0x%llx) ",
"==> hxge_txdma_reclaim: dump desc:"));
/*
* tdc_byte_cnt reg can be used to get bytes
* transmitted
*/
"==> hxge_txdma_reclaim: pkt_len %d "
"tdc channel %d opackets %d",
"tx_desc_p = $%p tx_desc_pp = $%p "
"index = %d",
0, -1);
if (tx_ring_p->dvma_wr_index ==
tx_ring_p->dvma_wr_index = 0;
} else {
}
"==> hxge_txdma_reclaim: USE DMA"));
if (rc = ddi_dma_unbind_handle
(tx_msg_p->dma_handle)) {
"ddi_dma_unbind_handle "
"failed. status %d", rc);
}
}
"==> hxge_txdma_reclaim: count packets"));
/*
* count a chained packet only once.
*/
}
}
if (status) {
1, 0);
}
} else {
}
"<== hxge_txdma_reclaim status = 0x%08x", status));
return (status);
}
{
return (DDI_INTR_UNCLAIMED);
}
}
/*
* If the interface is not started, just swallow the interrupt
* and don't rearm the logical device.
*/
return (DDI_INTR_CLAIMED);
/*
* This interrupt handler is for a specific transmit dma channel.
*/
/* Get the control and status for this channel. */
"==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d",
"==> hxge_tx_intr:channel %d ring index %d status 0x%08x",
"==> hxge_tx_intr:channel %d ring index %d "
"==> hxge_tx_intr:channel %d ring index %d "
"status 0x%08x (marked bit set, calling reclaim)",
}
/*
* Process other transmit control and status. Check the ldv state.
*/
/* Clear the error bits */
/*
* Rearm this logical group if this is a single device group.
*/
}
}
return (serviced);
}
void
{
(void) hxge_tx_vmac_disable(hxgep);
}
{
int i, ndmas;
"==> hxge_txdma_hw_mode: enable mode %d", enable));
"<== hxge_txdma_mode: not initialized"));
return (HXGE_ERROR);
}
"<== hxge_txdma_hw_mode: NULL global ring pointer"));
return (HXGE_ERROR);
}
if (tx_desc_rings == NULL) {
"<== hxge_txdma_hw_mode: NULL rings pointer"));
return (HXGE_ERROR);
}
if (!ndmas) {
"<== hxge_txdma_hw_mode: no dma channel allocated"));
return (HXGE_ERROR);
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
for (i = 0; i < ndmas; i++) {
if (tx_desc_rings[i] == NULL) {
continue;
}
"==> hxge_txdma_hw_mode: channel %d", channel));
if (enable) {
"==> hxge_txdma_hw_mode: channel %d (enable) "
} else {
/*
* Stop the dma channel and waits for the stop done. If
* the stop done bit is not set, then force an error so
* TXC will stop. All channels bound to this port need
* to be stopped and reset after injecting an interrupt
* error.
*/
"==> hxge_txdma_hw_mode: channel %d (disable) "
}
}
"<== hxge_txdma_hw_mode: status 0x%x", status));
return (status);
}
void
{
"==> hxge_txdma_enable_channel: channel %d", channel));
/* enable the transmit dma channels */
}
void
{
"==> hxge_txdma_disable_channel: channel %d", channel));
/* stop the transmit dma channels */
}
int
{
int status;
/*
* Stop the dma channel waits for the stop done. If the stop done bit
* is not set, then create an error.
*/
"<== hxge_txdma_stop_inj_err (channel %d): "
"stopped OK", channel));
return (status);
}
"==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
return (status);
}
/*ARGSUSED*/
void
{
/*
* For each transmit channel, reclaim each descriptor and free buffers.
*/
"<== hxge_fixup_txdma_rings: NULL ring pointer"));
return;
}
if (!ndmas) {
"<== hxge_fixup_txdma_rings: no channel allocated"));
return;
}
"<== hxge_fixup_txdma_rings: NULL rings pointer"));
return;
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
"==> hxge_fixup_txdma_rings: channel %d", channel));
channel);
}
}
/*ARGSUSED*/
void
{
return;
}
"<== hxge_txdma_fix_channel: channel not matched "
return;
}
}
/*ARGSUSED*/
void
{
"<== hxge_txdma_fixup_channel: NULL ring pointer"));
return;
}
"<== hxge_txdma_fixup_channel: channel not matched "
return;
}
ring_p->descs_pending = 0;
}
/*ARGSUSED*/
void
{
"<== hxge_txdma_hw_kick: NULL ring pointer"));
return;
}
if (!ndmas) {
"<== hxge_txdma_hw_kick: no channel allocated"));
return;
}
"<== hxge_txdma_hw_kick: NULL rings pointer"));
return;
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
"==> hxge_txdma_hw_kick: channel %d", channel));
channel);
}
}
/*ARGSUSED*/
void
{
return;
}
"<== hxge_txdma_kick_channel: channel not matched "
return;
}
}
/*ARGSUSED*/
void
{
"<== hxge_txdma_hw_kick_channel: NULL ring pointer"));
return;
}
}
/*ARGSUSED*/
void
{
/*
* Needs inputs from hardware for regs: head index had not moved since
* last timeout. packets not transmitted or stuffed registers.
*/
if (hxge_txdma_hung(hxgep)) {
}
}
int
{
"<== hxge_txdma_hung: NULL ring pointer"));
return (B_FALSE);
}
if (!ndmas) {
"<== hxge_txdma_hung: no channel allocated"));
return (B_FALSE);
}
"<== hxge_txdma_hung: NULL rings pointer"));
return (B_FALSE);
}
"==> hxge_txdma_hung: channel %d", channel));
return (B_TRUE);
}
}
return (B_FALSE);
}
int
{
"==> hxge_txdma_channel_hung: channel %d", channel));
"==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d "
"tail_index %d tail_wrap %d ",
/*
* Read the hardware maintained transmit head and wrap around bit.
*/
"tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
(head_index == tx_rd_index)) {
"==> hxge_txdma_channel_hung: EMPTY"));
return (B_FALSE);
}
"==> hxge_txdma_channel_hung: Checking if ring full"));
"==> hxge_txdma_channel_hung: full"));
return (B_TRUE);
}
/* If not full, check with hardware to see if it is hung */
return (B_FALSE);
}
/*ARGSUSED*/
void
{
"<== hxge_fixup_hung_txdma_rings: NULL ring pointer"));
return;
}
if (!ndmas) {
"<== hxge_fixup_hung_txdma_rings: no channel allocated"));
return;
}
"<== hxge_fixup_hung_txdma_rings: NULL rings pointer"));
return;
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
"==> hxge_fixup_hung_txdma_rings: channel %d", channel));
channel);
}
}
/*ARGSUSED*/
void
{
"<== hxge_txdma_fix_hung_channel"));
return;
}
"<== hxge_txdma_fix_hung_channel: channel not matched "
return;
}
}
/*ARGSUSED*/
void
{
"<== hxge_txdma_fixup_hung_channel: NULL ring pointer"));
return;
}
"<== hxge_txdma_fixup_hung_channel: channel "
"not matched ring tdc %d passed channel",
return;
}
/* Reclaim descriptors */
/*
* Stop the dma channel waits for the stop done. If the stop done bit
* is not set, then force an error.
*/
if (!(status & HPI_TXDMA_STOP_FAILED)) {
"<== hxge_txdma_fixup_hung_channel: stopped OK "
return;
}
/* Stop done bit will be set as a result of error injection */
if (!(status & HPI_TXDMA_STOP_FAILED)) {
"<== hxge_txdma_fixup_hung_channel: stopped again"
return;
}
"<== hxge_txdma_fixup_hung_channel: stop done still not set!! "
}
/*ARGSUSED*/
void
{
"<== hxge_reclain_rimgs: NULL ring pointer"));
return;
}
if (!ndmas) {
"<== hxge_reclain_rimgs: no channel allocated"));
return;
}
"<== hxge_reclain_rimgs: NULL rings pointer"));
return;
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
channel));
}
}
/*
* Static functions start here.
*/
static hxge_status_t
{
int i, ndmas;
"==> hxge_map_txdma: buf not allocated"));
return (HXGE_ERROR);
}
if (!ndmas) {
"<== hxge_map_txdma: no dma allocated"));
return (HXGE_ERROR);
}
/*
* Map descriptors from the buffer pools for each dma channel.
*/
for (i = 0; i < ndmas; i++) {
/*
* Set up and prepare buffer blocks, descriptors and mailbox.
*/
(p_hxge_dma_common_t *)&dma_buf_p[i],
(p_tx_ring_t *)&tx_desc_rings[i],
dma_buf_poolp->num_chunks[i],
(p_hxge_dma_common_t *)&dma_cntl_p[i],
(p_tx_mbox_t *)&tx_mbox_p[i]);
goto hxge_map_txdma_fail1;
}
}
"tx_rings $%p tx_desc_rings $%p",
goto hxge_map_txdma_exit;
"==> hxge_map_txdma: uninit tx desc "
i--;
for (; i >= 0; i--) {
tx_mbox_p[i]);
}
return (status);
}
static void
{
int i, ndmas;
if (!dma_buf_poolp->buf_allocated) {
"==> hxge_unmap_txdma: buf not allocated"));
return;
}
if (!ndmas) {
"<== hxge_unmap_txdma: no dma allocated"));
return;
}
"<== hxge_unmap_txdma: NULL ring pointer"));
return;
}
if (tx_desc_rings == NULL) {
"<== hxge_unmap_txdma: NULL ring pointers"));
return;
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
for (i = 0; i < ndmas; i++) {
(p_tx_ring_t)tx_desc_rings[i],
(p_tx_mbox_t)tx_mbox_p[i]);
}
}
static hxge_status_t
{
/*
* Set up and prepare buffer blocks, descriptors and mailbox.
*/
"==> hxge_map_txdma_channel (channel %d)", channel));
/*
* Transmit buffer blocks
*/
"==> hxge_map_txdma_channel (channel %d): "
}
/*
* Transmit block ring, and mailbox.
*/
"==> hxge_map_txdma_channel: unmap buf"
"<== hxge_map_txdma_channel: (status 0x%x channel %d)",
return (status);
}
/*ARGSUSED*/
static void
{
"==> hxge_unmap_txdma_channel (channel %d)", channel));
/* unmap tx block ring, and mailbox. */
/* unmap buffer blocks */
}
/*ARGSUSED*/
static void
{
"==> hxge_map_txdma_channel_cfg_ring"));
cntl_dmap = *dma_cntl_p;
sizeof (tx_desc_t));
/*
* Zero out transmit ring descriptors.
*/
tx_ring_cfig_p->value = 0;
tx_ring_kick_p->value = 0;
tx_evmask_p->value = 0;
"==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p",
tx_ring_cfig_p->value = 0;
/* Hydra len is 11 bits and the lower 5 bits are 0s */
"==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
/* Map in mailbox */
"==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
TDC_MBL_MASK) >> TDC_MBL_SHIFT);
"==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
"==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p",
/*
* Set page valid and no mask
*/
"<== hxge_map_txdma_channel_cfg_ring"));
}
/*ARGSUSED*/
static void
{
"==> hxge_unmap_txdma_channel_cfg_ring: channel %d",
"<== hxge_unmap_txdma_channel_cfg_ring"));
}
static hxge_status_t
{
int i, j, index;
"==> hxge_map_txdma_channel_buf_ring"));
" hxge_map_txdma_channel_buf_ring: channel %d to map %d "
nmsgs = 0;
for (i = 0; i < num_chunks; i++, tmp_bufp++) {
"==> hxge_map_txdma_channel_buf_ring: channel %d "
"bufp $%p nblocks %d nmsgs %d",
}
if (!nmsgs) {
"<== hxge_map_txdma_channel_buf_ring: channel %d "
"no msg blocks", channel));
status = HXGE_ERROR;
}
TASKQ_DEFAULTPRI, 0);
}
(void *) hxgep->interrupt_cookie);
/*
* Allocate transmit message rings and handles for packets not to be
* copied to premapped buffers.
*/
for (i = 0; i < nmsgs; i++) {
if (ddi_status != DDI_SUCCESS) {
break;
}
}
if (i < nmsgs) {
"Allocate handles failed."));
}
if (!hxge_tx_intr_thres) {
}
tx_ring_p->descs_pending = 0;
"==> hxge_map_txdma_channel_buf_ring: channel %d "
"actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)",
/*
* Map in buffers from the buffer pool.
*/
index = 0;
"dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d",
for (i = 0; i < num_chunks; i++, dma_bufp++) {
"==> hxge_map_txdma_channel_buf_ring: dma chunk %d "
"size %d dma_bufp $%p",
i, sizeof (hxge_dma_common_t), dma_bufp));
for (j = 0; j < nblocks; j++) {
"==> hxge_map_txdma_channel_buf_ring: j %d"
"dmap $%p", i, dmap));
}
}
if (i < num_chunks) {
status = HXGE_ERROR;
}
}
index--;
}
}
status = HXGE_ERROR;
"<== hxge_map_txdma_channel_buf_ring status 0x%x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
"==> hxge_unmap_txdma_channel_buf_ring"));
"<== hxge_unmap_txdma_channel_buf_ring: NULL ringp"));
return;
}
"==> hxge_unmap_txdma_channel_buf_ring: channel %d",
for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
tx_msg_p = &tx_msg_ring[i];
if (tx_ring_p->dvma_wr_index ==
tx_ring_p->dvma_wr_index = 0;
} else {
}
"ddi_dma_unbind_handle failed.");
}
}
}
}
for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
}
}
}
"<== hxge_unmap_txdma_channel_buf_ring"));
}
static hxge_status_t
{
int i, ndmas;
/*
* Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat.
* 3. Scrub memory and check for errors.
*/
(void) hxge_tx_vmac_disable(hxgep);
/*
* Clear the error status
*/
/*
* Scrub the rtab memory for the TDC and reset the TDC.
*/
for (i = 0; i < 256; i++) {
(uint64_t)i);
/*
* Write the command register with an indirect read instruction
*/
/*
* Wait for status done
*/
tmp = 0;
do {
&tmp);
}
for (i = 0; i < 256; i++) {
/*
* Write the command register with an indirect read instruction
*/
/*
* Wait for status done
*/
tmp = 0;
do {
&tmp);
"unexpected data (hi), entry: %x, value: 0x%0llx\n",
i, (unsigned long long)tmp));
status = HXGE_ERROR;
}
if (tmp != 0) {
"unexpected data (lo), entry: %x\n", i));
status = HXGE_ERROR;
}
if (tmp != 0) {
"parity error, entry: %x, val 0x%llx\n",
i, (unsigned long long)tmp));
status = HXGE_ERROR;
}
if (tmp != 0) {
"parity error, entry: %x\n", i));
status = HXGE_ERROR;
}
}
goto hxge_txdma_hw_start_exit;
/*
* Reset FIFO Error Status for the TDC and enable FIFO error events.
*/
/*
* Initialize the Transmit DMAs.
*/
"<== hxge_txdma_hw_start: NULL ring pointer"));
return (HXGE_ERROR);
}
if (tx_desc_rings == NULL) {
"<== hxge_txdma_hw_start: NULL ring pointers"));
return (HXGE_ERROR);
}
if (!ndmas) {
"<== hxge_txdma_hw_start: no dma channel allocated"));
return (HXGE_ERROR);
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
/*
* Init the DMAs.
*/
for (i = 0; i < ndmas; i++) {
(p_tx_ring_t)tx_desc_rings[i],
(p_tx_mbox_t)tx_mbox_p[i]);
}
}
(void) hxge_tx_vmac_enable(hxgep);
"==> hxge_txdma_hw_start: tx_rings $%p rings $%p",
"==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p",
goto hxge_txdma_hw_start_exit;
"==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)",
for (; i >= 0; i--) {
(p_tx_ring_t)tx_desc_rings[i],
(p_tx_mbox_t)tx_mbox_p[i]);
}
"==> hxge_txdma_hw_start: (status 0x%x)", status));
return (status);
}
static void
{
int i, ndmas;
"<== hxge_txdma_hw_stop: NULL ring pointer"));
return;
}
if (tx_desc_rings == NULL) {
"<== hxge_txdma_hw_stop: NULL ring pointers"));
return;
}
if (!ndmas) {
"<== hxge_txdma_hw_stop: no dma channel allocated"));
return;
}
for (i = 0; i < ndmas; i++) {
(p_tx_ring_t)tx_desc_rings[i],
(p_tx_mbox_t)tx_mbox_p[i]);
}
}
static hxge_status_t
{
"==> hxge_txdma_start_channel (channel %d)", channel));
/*
*/
/*
* Reset TXDMA channel
*/
"==> hxge_txdma_start_channel (channel %d)"
}
/*
* Initialize the TXDMA channel specific FZC control configurations.
* These FZC registers are pertaining to each TX channel (i.e. logical
* pages).
*/
}
/*
* Initialize the event masks.
*/
}
/*
* Load TXDMA descriptors, buffers, mailbox, initialise the DMA
* channels and enable each DMA channel.
*/
}
return (status);
}
/*ARGSUSED*/
static hxge_status_t
{
"==> hxge_txdma_stop_channel: channel %d", channel));
/*
* Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit
* not set, the TXDMA reset state will not be set if reset TXDMA.
*/
/*
* Reset TXDMA channel
*/
}
return (status);
}
static p_tx_ring_t
{
"<== hxge_txdma_get_ring: NULL ring pointer"));
return (NULL);
}
if (!ndmas) {
"<== hxge_txdma_get_ring: no channel allocated"));
return (NULL);
}
"<== hxge_txdma_get_ring: NULL rings pointer"));
return (NULL);
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
"==> hxge_fixup_txdma_rings: channel %d", tdc));
"<== hxge_txdma_get_ring: tdc %d ring $%p",
}
}
return (NULL);
}
static p_tx_mbox_t
{
"<== hxge_txdma_get_mbox: NULL ring pointer"));
return (NULL);
}
if (tx_mbox_areas_p == NULL) {
"<== hxge_txdma_get_mbox: NULL mbox pointer"));
return (NULL);
}
if (!ndmas) {
"<== hxge_txdma_get_mbox: no channel allocated"));
return (NULL);
}
"<== hxge_txdma_get_mbox: NULL rings pointer"));
return (NULL);
}
"tx_rings $%p tx_desc_rings $%p ndmas %d",
"==> hxge_txdma_get_mbox: channel %d", tdc));
"<== hxge_txdma_get_mbox: tdc %d ring $%p",
}
}
return (NULL);
}
/*ARGSUSED*/
static hxge_status_t
{
/* Get the error counts if any */
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: peu_resp_err", channel));
}
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: pkt_size_hdr_err", channel));
}
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: runt_pkt_drop_err", channel));
}
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: pkt_size_err", channel));
}
if (tdc_stats->tx_rng_oflow)
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: tx_rng_oflow", channel));
}
/* Get the address of parity error read data */
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: pref_par_err", channel));
}
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: tdr_pref_cpl_to", channel));
}
tdc_stats->pkt_cpl_to++;
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: pkt_cpl_to", channel));
}
tdc_stats->invalid_sop++;
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: invalid_sop", channel));
}
"==> hxge_tx_err_evnts(channel %d): "
"fatal error: unexpected_sop", channel));
}
/* Clear error injection source in case this is an injected error */
if (txchan_fatal) {
" hxge_tx_err_evnts: "
" fatal error on channel %d cs 0x%llx\n",
}
}
return (status);
}
{
/*
* The FIFO is shared by all channels.
* Get the status of Reorder Buffer and Reorder Table Buffer Errors
*/
/*
* Clear the error bits. Note that writing a 1 clears the bit. Writing
* a 0 does nothing.
*/
"==> hxge_txdma_handle_sys_errors: fatal error: "
"reord_tbl_par_err"));
}
"==> hxge_txdma_handle_sys_errors: "
"fatal error: reord_buf_ded_err"));
}
"==> hxge_txdma_handle_sys_errors: "
"reord_buf_sec_err"));
}
}
}
return (status);
}
static hxge_status_t
{
"Recovering from TxDMAChannel#%d error...", channel));
/*
* Stop the dma channel waits for the stop done. If the stop done bit
* is not set, then create an error.
*/
channel));
if (rs != HPI_SUCCESS) {
"==> hxge_txdma_fatal_err_recover (channel %d): "
"stop failed ", channel));
goto fail;
}
channel));
/*
* Reset TXDMA channel
*/
channel));
HPI_SUCCESS) {
"==> hxge_txdma_fatal_err_recover (channel %d)"
goto fail;
}
/*
* Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
* overflow fatal error if tail is not set to 0 after reset!
*/
/*
* Restart TXDMA channel
*
* Initialize the TXDMA channel specific FZC control configurations.
* These FZC registers are pertaining to each TX channel (i.e. logical
* pages).
*/
channel));
goto fail;
/*
* Initialize the event masks.
*/
goto fail;
/*
* Load TXDMA descriptors, buffers, mailbox, initialise the DMA
* channels and enable each DMA channel.
*/
channel));
goto fail;
"Recovery Successful, TxDMAChannel#%d Restored", channel));
return (HXGE_OK);
fail:
"hxge_txdma_fatal_err_recover (channel %d): "
"failed to recover this txdma channel", channel));
return (status);
}
static hxge_status_t
{
int i, ndmas;
"==> hxge_tx_port_fatal_err_recover"));
"Recovering from TxPort error..."));
/* Reset TDC block from PEU for this fatal error */
HXGE_DELAY(1000);
/*
* Stop the dma channel waits for the stop done. If the stop done bit
* is not set, then create an error.
*/
for (i = 0; i < ndmas; i++) {
if (tx_desc_rings[i] == NULL) {
continue;
}
}
for (i = 0; i < ndmas; i++) {
if (tx_desc_rings[i] == NULL) {
continue;
}
if (rs != HPI_SUCCESS) {
"==> hxge_txdma_fatal_err_recover (channel %d): "
"stop failed ", channel));
goto fail;
}
}
/*
* Do reclaim on all of th DMAs.
*/
for (i = 0; i < ndmas; i++) {
if (tx_desc_rings[i] == NULL) {
continue;
}
}
/* Restart the TDC */
goto fail;
for (i = 0; i < ndmas; i++) {
if (tx_desc_rings[i] == NULL) {
continue;
}
}
"Recovery Successful, TxPort Restored"));
"<== hxge_tx_port_fatal_err_recover"));
return (HXGE_OK);
fail:
for (i = 0; i < ndmas; i++) {
if (tx_desc_rings[i] == NULL) {
continue;
}
}
"hxge_txdma_fatal_err_recover (channel %d): "
"failed to recover this txdma channel"));
return (status);
}