/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <npi_tx_rd64.h>
#include <npi_tx_wr64.h>
extern uint32_t nxge_tx_ring_size;
extern uint32_t nxge_bcopy_thresh;
extern uint32_t nxge_dvma_thresh;
extern uint32_t nxge_dma_stream_thresh;
extern dma_method_t nxge_force_dma;
extern uint32_t nxge_cksum_offload;
/* Device register access attributes for PIO. */
/* Device descriptor access attributes for DMA. */
/* Device buffer access attributes for DMA. */
extern ddi_dma_attr_t nxge_desc_dma_attr;
extern ddi_dma_attr_t nxge_tx_dma_attr;
extern void nxge_tx_ring_task(void *arg);
p_tx_mbox_t *);
p_tx_mbox_t *);
static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
{
int dev_gindex;
for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
if ((nxge_grp_dc_add(nxgep,
goto init_txdma_channels_exit;
}
}
}
break;
}
return (NXGE_OK);
for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
VP_BOUND_TX, tdc);
}
}
}
break;
}
return (NXGE_ERROR);
}
int channel)
{
"<== nxge_init_txdma_channel: status 0x%x", status));
return (status);
}
return (status);
}
return (status);
}
void
{
int tdc;
"nxge_uninit_txdma_channels: no channels"));
return;
}
}
}
}
void
{
}
}
void
{
}
/*
* nxge_reset_txdma_channel
*
* Reset a TDC.
*
* Arguments:
* nxgep
* channel The channel to reset.
* reg_data The current TX_CS.
*
* Notes:
*
* npi_txdma_channel_reset()
* npi_txdma_channel_control()
*
* Registers accessed:
* TX_CS DMC+0x40028 Transmit Control And Status
* TX_RING_KICK DMC+0x40018 Transmit Ring Kick
*
* Context:
* Any domain
*/
{
} else {
channel);
}
if (rs != NPI_SUCCESS) {
}
/*
* Reset the tail (kick) register to 0.
* (Hardware will not reset it. Tx overflow fatal
* error if tail is not set to 0 after reset!
*/
return (status);
}
/*
* nxge_init_txdma_channel_event_mask
*
* Enable interrupts for a set of events.
*
* Arguments:
* nxgep
* channel The channel to map.
* mask_p The events to enable.
*
* Notes:
*
* npi_txdma_event_mask()
*
* Registers accessed:
* TX_ENT_MSK DMC+0x40020 Transmit Event Mask
*
* Context:
* Any domain
*/
{
"<== nxge_init_txdma_channel_event_mask"));
if (rs != NPI_SUCCESS) {
}
return (status);
}
/*
* nxge_init_txdma_channel_cntl_stat
*
* Stop a TDC. If at first we don't succeed, inject an error.
*
* Arguments:
* nxgep
* channel The channel to stop.
*
* Notes:
*
* npi_txdma_control_status()
*
* Registers accessed:
* TX_CS DMC+0x40028 Transmit Control And Status
*
* Context:
* Any domain
*/
{
"<== nxge_init_txdma_channel_cntl_stat"));
if (rs != NPI_SUCCESS) {
}
return (status);
}
/*
* nxge_enable_txdma_channel
*
* Enable a TDC.
*
* Arguments:
* nxgep
* channel The channel to enable.
* tx_desc_p channel's transmit descriptor ring.
* mbox_p channel's mailbox,
*
* Notes:
*
* npi_txdma_ring_config()
* npi_txdma_mbox_config()
* npi_txdma_channel_init_enable()
*
* Registers accessed:
* TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration
* TXDMA_MBH DMC+0x40030 TXDMA Mailbox High
* TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low
* TX_CS DMC+0x40028 Transmit Control And Status
*
* Context:
* Any domain
*/
{
/*
* Use configuration data composed at init time.
* Write to hardware the transmit ring configurations.
*/
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
if (isLDOMguest(nxgep)) {
/* Add interrupt handler for this channel. */
return (NXGE_ERROR);
}
/* Write to hardware the mailbox */
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
/* Start the DMA engine. */
if (rs != NPI_SUCCESS) {
return (NXGE_ERROR | rs);
}
return (status);
}
void
{
64 + sizeof (uint32_t)];
/*
* Caller should zero out the headers first.
*/
if (fill_len) {
"==> nxge_fill_tx_hdr: pkt_len %d "
goto fill_tx_header_done;
}
/*
* mp is the original data packet (does not include the
* Neptune transmit header).
*/
"mp $%p b_rptr $%p len %d",
/* copy ether_header from mblk to hdrs_buf */
tmp = sizeof (struct ether_vlan_header);
}
tmp = 1ull;
if (*(hdrs_buf + sizeof (struct ether_header))
== LLC_SNAP_SAP) {
sizeof (struct ether_header) + 6)));
"==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
eth_type));
} else {
goto fill_tx_header_done;
}
} else if (eth_type == VLAN_ETHERTYPE) {
tmp = 1ull;
hdrs_buf)->ether_type);
}
if (!is_vlan) {
eth_hdr_size = sizeof (struct ether_header);
} else {
eth_hdr_size = sizeof (struct ether_vlan_header);
}
switch (eth_type) {
case ETHERTYPE_IP:
mblk_len -= eth_hdr_size;
ip_buf += eth_hdr_size;
} else {
}
}
hdrs_size = 0;
sizeof (hdrs_buf))) {
if (mblk_len >=
}
ip_buf += eth_hdr_size;
}
" iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
"tmp 0x%x",
break;
case ETHERTYPE_IPV6:
hdrs_size = 0;
sizeof (hdrs_buf))) {
if (mblk_len >=
}
ip_buf += eth_hdr_size;
tmp = 1ull;
/* byte 6 is the next header protocol */
" iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
ipproto));
break;
default:
goto fill_tx_header_done;
}
switch (ipproto) {
case IPPROTO_TCP:
"==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
if (l4_cksum) {
"==> nxge_tx_pkt_hdr_init: TCP CKSUM "
}
break;
case IPPROTO_UDP:
if (l4_cksum) {
if (!nxge_cksum_offload) {
/*
* The checksum field has the
* partial checksum.
* IP_CSUM() macro calls ip_cksum() which
* can add in the partial checksum.
*/
if (nmp)
}
"==> nxge_tx_pkt_hdr_init: UDP offset %d "
"use sw cksum "
"write to $%p cksum 0x%x content up 0x%x",
up,
*up));
} else {
/* Hardware will compute the full checksum */
"==> nxge_tx_pkt_hdr_init: UDP offset %d "
" use partial checksum "
"cksum 0x%x ",
"value 0x%llx",
}
}
"==> nxge_tx_pkt_hdr_init: UDP"
break;
default:
goto fill_tx_header_done;
}
"==> nxge_fill_tx_hdr: pkt_len %d "
}
/*ARGSUSED*/
{
"<== nxge_tx_pkt_header_reserve: allocb failed"));
return (NULL);
}
"==> nxge_tx_pkt_header_reserve: get new mp"));
"b_rptr $%p b_wptr $%p",
"<== nxge_tx_pkt_header_reserve: use new mp"));
return (newmp);
}
int
{
"==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
nmblks = 0;
pkt_len = 0;
*tot_xfer_len_p = 0;
while (nmp) {
"len %d pkt_len %d nmblks %d tot_xfer_len %d",
*tot_xfer_len_p));
if (len <= 0) {
"==> nxge_tx_pkt_nmblocks: "
"len (0) pkt_len %d nmblks %d",
continue;
}
*tot_xfer_len_p += len;
"len %d pkt_len %d nmblks %d tot_xfer_len %d",
*tot_xfer_len_p));
if (len < nxge_bcopy_thresh) {
"==> nxge_tx_pkt_nmblocks: "
"len %d (< thresh) pkt_len %d nmblks %d",
if (pkt_len == 0)
nmblks++;
if (pkt_len >= nxge_bcopy_thresh) {
pkt_len = 0;
len = 0;
}
} else {
"==> nxge_tx_pkt_nmblocks: "
"len %d (> thresh) pkt_len %d nmblks %d",
pkt_len = 0;
nmblks++;
/*
* Hardware limits the transfer length to 4K.
* If len is more than 4K, we need to break
* it up to at most 2 more blocks.
*/
if (len > TX_MAX_TRANSFER_LENGTH) {
nsegs = 1;
"==> nxge_tx_pkt_nmblocks: "
"len %d pkt_len %d nmblks %d nsegs %d",
++nsegs;
}
do {
return (0);
}
nmblks++;
if (--nsegs) {
}
} while (nsegs);
}
}
/*
* Hardware limits the transmit gather pointers to 15.
*/
"==> nxge_tx_pkt_nmblocks: pull msg - "
"len %d pkt_len %d nmblks %d",
/* Pull all message blocks from b_cont */
return (0);
}
pkt_len = 0;
}
}
"<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
"nmblks %d len %d tot_xfer_len %d",
return (nmblks);
}
{
int rc;
(nmblks != 0));
"==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
nmblks));
if (!status) {
}
"==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
"tail_index %d tail_wrap %d "
"tx_desc_p $%p ($%p) ",
/*
* Read the hardware maintained transmit head
* and wrap around bit.
*/
"==> nxge_txdma_reclaim: "
"tx_rd_index %d tail %d tail_wrap %d "
"head %d wrap %d",
head_index, head_wrap));
if (head_index == tail_index) {
tail_index, tail_wrap) &&
(head_index == tx_rd_index)) {
"==> nxge_txdma_reclaim: EMPTY"));
return (B_TRUE);
}
"==> nxge_txdma_reclaim: Checking "
"if ring full"));
tail_wrap)) {
"==> nxge_txdma_reclaim: full"));
return (B_FALSE);
}
}
"==> nxge_txdma_reclaim: tx_rd_index and head_index"));
tx_desc_pp = &r_tx_desc;
while ((tx_rd_index != head_index) &&
(tx_ring_p->descs_pending != 0)) {
"==> nxge_txdma_reclaim: Checking if pending"));
"==> nxge_txdma_reclaim: "
"descs_pending %d ",
"==> nxge_txdma_reclaim: "
"(tx_rd_index %d head_index %d "
"(tx_desc_p $%p)",
tx_desc_p));
"==> nxge_txdma_reclaim: "
"(tx_rd_index %d head_index %d "
"tx_desc_p $%p (desc value 0x%llx) ",
"==> nxge_txdma_reclaim: dump desc:"));
"==> nxge_txdma_reclaim: pkt_len %d "
"tdc channel %d opackets %d",
tdc,
"tx_desc_p = $%p "
"tx_desc_pp = $%p "
"index = %d",
0, -1);
if (tx_ring_p->dvma_wr_index ==
tx_ring_p->dvma_wr_index = 0;
} else {
}
USE_DMA) {
"==> nxge_txdma_reclaim: "
"USE DMA"));
if (rc = ddi_dma_unbind_handle
(tx_msg_p->dma_handle)) {
"ddi_dma_unbind_handle "
"failed. status %d", rc);
}
}
"==> nxge_txdma_reclaim: count packets"));
/*
* count a chained packet only once.
*/
}
}
if (status) {
1, 0);
}
} else {
}
"<== nxge_txdma_reclaim status = 0x%08x", status));
return (status);
}
/*
* nxge_tx_intr
*
* Process a TDC interrupt
*
* Arguments:
* arg1 A Logical Device state Vector (LSV) data structure.
* arg2 nxge_t *
*
* Notes:
*
* npi_txdma_control_status()
* npi_intr_ldg_mgmt_set()
*
* nxge_tx_err_evnts()
* nxge_txdma_reclaim()
*
* Registers accessed:
* TX_CS DMC+0x40028 Transmit Control And Status
* PIO_LDSV
*
* Context:
* Any domain
*/
{
"<== nxge_tx_intr: nxgep $%p ldvp $%p",
return (DDI_INTR_UNCLAIMED);
}
}
"==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
"<== nxge_tx_intr: interface not started or intialized"));
return (DDI_INTR_CLAIMED);
}
/*
* This interrupt handler is for a specific
* transmit dma channel.
*/
/* Get the control and status for this channel. */
"==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
"channel %d",
"==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
"==> nxge_tx_intr:channel %d ring index %d "
"status 0x%08x (mk bit set)",
"==> nxge_tx_intr:channel %d ring index %d "
"status 0x%08x (mk bit set, calling reclaim)",
nxge_tx_ring_task((void *)tx_ring_p);
}
/*
* Process other transmit control and status.
* Check the ldv state.
*/
/*
* Rearm this logical group if this is a single device
* group.
*/
"==> nxge_tx_intr: rearm"));
if (isLDOMguest(nxgep)) {
} else {
}
}
}
return (serviced);
}
void
{
}
void
{
(void) nxge_txdma_stop(nxgep);
(void) nxge_fixup_txdma_rings(nxgep);
(void) nxge_tx_mac_enable(nxgep);
(void) nxge_txdma_hw_kick(nxgep);
}
int channel)
{
/*
* Stop the dma channel and wait for the stop-done.
* If the stop-done bit is not present, then force
* an error so TXC will stop.
* All channels bound to this port need to be stopped
* and reset after injecting an interrupt error.
*/
"==> nxge_txdma_channel_disable(%d) "
if (rs != NPI_SUCCESS) {
/* Inject any error */
"==> nxge_txdma_hw_mode: "
"channel %d (stop failed 0x%x) "
(void) npi_txdma_inj_int_error_set(
"==> nxge_txdma_hw_mode: "
"channel %d (stop again 0x%x) "
"(after inject err)",
}
return (rs);
}
/*
* nxge_txdma_hw_mode
*
* Toggle all TDCs on (enable) or off (disable).
*
* Arguments:
* nxgep
* enable Enable or disable a TDC.
*
* Notes:
*
* npi_txdma_channel_enable(TX_CS)
* npi_txdma_channel_disable(TX_CS)
* npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
*
* Registers accessed:
* TX_CS DMC+0x40028 Transmit Control And Status
* TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
*
* Context:
* Any domain
*/
{
int tdc;
"==> nxge_txdma_hw_mode: enable mode %d", enable));
"<== nxge_txdma_mode: not initialized"));
return (NXGE_ERROR);
}
"<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
return (NXGE_ERROR);
}
/* Enable or disable all of the TDCs owned by us. */
if (ring) {
"==> nxge_txdma_hw_mode: channel %d", tdc));
if (enable) {
"==> nxge_txdma_hw_mode: "
"channel %d (enable) rs 0x%x",
} else {
}
}
}
}
"<== nxge_txdma_hw_mode: status 0x%x", status));
return (status);
}
void
{
"==> nxge_txdma_enable_channel: channel %d", channel));
/* enable the transmit dma channels */
}
void
{
"==> nxge_txdma_disable_channel: channel %d", channel));
/* stop the transmit dma channels */
}
/*
* nxge_txdma_stop_inj_err
*
* Stop a TDC. If at first we don't succeed, inject an error.
*
* Arguments:
* nxgep
* channel The channel to stop.
*
* Notes:
*
* npi_txdma_channel_disable()
* npi_txdma_inj_int_error_set()
* #if defined(NXGE_DEBUG)
* nxge_txdma_regs_dump_channels(nxgep);
* #endif
*
* Registers accessed:
* TX_CS DMC+0x40028 Transmit Control And Status
* TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
*
* Context:
* Any domain
*/
int
{
int status;
/*
* Stop the dma channel waits for the stop done.
* If the stop done bit is not set, then create
* an error.
*/
"<== nxge_txdma_stop_inj_err (channel %d): "
"stopped OK", channel));
return (status);
}
"==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
/* Inject any error */
/* Stop done bit will be set as a result of error injection */
if (!(rs & NPI_TXDMA_STOP_FAILED)) {
"<== nxge_txdma_stop_inj_err (channel %d): "
"stopped OK ", channel));
return (status);
}
#if defined(NXGE_DEBUG)
#endif
"==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
return (status);
}
/*ARGSUSED*/
void
{
int tdc;
"<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
return;
}
if (ring) {
"==> nxge_fixup_txdma_rings: channel %d",
tdc));
}
}
}
}
/*ARGSUSED*/
void
{
return;
}
"<== nxge_txdma_fix_channel: channel not matched "
"ring tdc %d passed channel",
return;
}
}
/*ARGSUSED*/
void
{
"<== nxge_txdma_fixup_channel: NULL ring pointer"));
return;
}
"<== nxge_txdma_fixup_channel: channel not matched "
"ring tdc %d passed channel",
return;
}
ring_p->descs_pending = 0;
}
/*ARGSUSED*/
void
{
int tdc;
"<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
return;
}
if (ring) {
"==> nxge_txdma_hw_kick: channel %d", tdc));
}
}
}
}
/*ARGSUSED*/
void
{
" nxge_txdma_kick_channel"));
return;
}
"<== nxge_txdma_kick_channel: channel not matched "
"ring tdc %d passed channel",
return;
}
}
/*ARGSUSED*/
void
{
"<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
return;
}
}
/*
* nxge_check_tx_hang
*
* Check the state of all TDCs belonging to nxgep.
*
* Arguments:
* nxgep
*
* Notes:
* Called by nxge_hw.c:nxge_check_hw_state().
*
*
* Registers accessed:
*
* Context:
* Any domain
*/
/*ARGSUSED*/
void
{
goto nxge_check_tx_hang_exit;
}
/*
* Needs inputs from hardware for regs:
* head index had not moved since last timeout.
* packets not transmitted or stuffed registers.
*/
if (nxge_txdma_hung(nxgep)) {
}
}
/*
* nxge_txdma_hung
*
* Reset a TDC.
*
* Arguments:
* nxgep
* channel The channel to reset.
* reg_data The current TX_CS.
*
* Notes:
* Called by nxge_check_tx_hang()
*
* nxge_txdma_channel_hung()
*
* Registers accessed:
*
* Context:
* Any domain
*/
int
{
int tdc;
"<== nxge_txdma_hung: NULL ring pointer(s)"));
return (B_FALSE);
}
/*
* Grab the shared state of the TDC.
*/
if (isLDOMservice(nxgep)) {
} else {
}
/*
* Now, process continue to process.
*/
if (ring) {
"==> nxge_txdma_hung: TDC %d hung",
tdc));
return (B_TRUE);
}
}
}
}
return (B_FALSE);
}
/*
* nxge_txdma_channel_hung
*
* Reset a TDC.
*
* Arguments:
* nxgep
* ring <channel>'s ring.
* channel The channel to reset.
*
* Notes:
* Called by nxge_txdma.c:nxge_txdma_hung()
*
* npi_txdma_ring_head_get()
*
* Registers accessed:
* TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
*
* Context:
* Any domain
*/
int
{
"==> nxge_txdma_channel_hung: channel %d", channel));
"==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
"tail_index %d tail_wrap %d ",
/*
* Read the hardware maintained transmit head
* and wrap around bit.
*/
"==> nxge_txdma_channel_hung: "
"tx_rd_index %d tail %d tail_wrap %d "
"head %d wrap %d",
head_index, head_wrap));
tail_index, tail_wrap) &&
(head_index == tx_rd_index)) {
"==> nxge_txdma_channel_hung: EMPTY"));
return (B_FALSE);
}
"==> nxge_txdma_channel_hung: Checking if ring full"));
tail_wrap)) {
"==> nxge_txdma_channel_hung: full"));
return (B_TRUE);
}
return (B_FALSE);
}
/*
* nxge_fixup_hung_txdma_rings
*
* Disable a TDC.
*
* Arguments:
* nxgep
* channel The channel to reset.
* reg_data The current TX_CS.
*
* Notes:
* Called by nxge_check_tx_hang()
*
* npi_txdma_ring_head_get()
*
* Registers accessed:
* TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
*
* Context:
* Any domain
*/
/*ARGSUSED*/
void
{
int tdc;
"<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
return;
}
if (ring) {
"==> nxge_fixup_hung_txdma_rings: TDC %d",
tdc));
}
}
}
}
/*
* nxge_txdma_fixup_hung_channel
*
* 'Fix' a hung TDC.
*
* Arguments:
* nxgep
* channel The channel to fix.
*
* Notes:
* Called by nxge_fixup_hung_txdma_rings()
*
* 1. Reclaim the TDC.
* 2. Disable the TDC.
*
* nxge_txdma_reclaim()
* npi_txdma_channel_disable(TX_CS)
* npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
*
* Registers accessed:
* TX_CS DMC+0x40028 Transmit Control And Status
* TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
*
* Context:
* Any domain
*/
/*ARGSUSED*/
void
{
"<== nxge_txdma_fix_hung_channel"));
return;
}
"<== nxge_txdma_fix_hung_channel: channel not matched "
"ring tdc %d passed channel",
return;
}
}
/*ARGSUSED*/
void
{
"<== nxge_txdma_fixup_channel: NULL ring pointer"));
return;
}
"<== nxge_txdma_fixup_hung_channel: channel "
"not matched "
"ring tdc %d passed channel",
return;
}
/* Reclaim descriptors */
/*
* Stop the dma channel waits for the stop done.
* If the stop done bit is not set, then force
* an error.
*/
if (!(status & NPI_TXDMA_STOP_FAILED)) {
"<== nxge_txdma_fixup_hung_channel: stopped OK "
"ring tdc %d passed channel %d",
return;
}
/* Inject any error */
/* Stop done bit will be set as a result of error injection */
if (!(status & NPI_TXDMA_STOP_FAILED)) {
"<== nxge_txdma_fixup_hung_channel: stopped again"
"ring tdc %d passed channel",
return;
}
"<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
"ring tdc %d passed channel",
}
/*ARGSUSED*/
void
{
int tdc;
"<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
return;
}
if (ring) {
"==> nxge_reclaim_rings: TDC %d", tdc));
}
}
}
}
void
{
int tdc;
if (!isLDOMguest(nxgep)) {
(void) npi_txdma_dump_fzc_regs(handle);
/* Dump TXC registers. */
(void) npi_txc_dump_fzc_regs(handle);
}
"<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
return;
}
if (ring) {
"==> nxge_txdma_regs_dump_channels: "
"TDC %d", tdc));
/* Dump TXC registers, if able to. */
if (!isLDOMguest(nxgep)) {
"==> nxge_txdma_regs_dump_channels:"
" FZC TDC %d", tdc));
(void) npi_txc_dump_tdc_fzc_regs
}
}
}
}
}
void
{
printf("\n\tfunc # %d tdc %d ",
printf("\n\tlog page func %d valid page 0 %d",
printf("\n\tlog page func %d valid page 1 %d",
printf("\n\thead value is 0x%0llx",
printf("\n\tkick value is 0x%0llx",
printf("\n\tTXC port control 0x%0llx",
{
#if defined(__i386)
#else
#endif
}
}
/*
* nxge_tdc_hvio_setup
*
* I'm not exactly sure what this code does.
*
* Arguments:
* nxgep
* channel The channel to map.
*
* Notes:
*
* na
*
* Context:
* Service domain?
*/
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
static void
{
"hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
"orig vatopa base io $%p orig_len 0x%llx (%d)",
"hv cntl base io $%p orig ioaddr_pp ($%p) "
"orig vatopa ($%p) size 0x%llx (%d 0x%x)",
}
#endif
static nxge_status_t
{
"<== nxge_map_txdma: buf not allocated"));
return (NXGE_ERROR);
}
}
return (NXGE_ERROR);
"tx_rings $%p tx_desc_rings $%p",
/*
* Map descriptors from the buffer pools for <channel>.
*/
/*
* Set up and prepare buffer blocks, descriptors
* and mailbox.
*/
"==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
"returned 0x%x",
return (status);
}
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
if (isLDOMguest(nxgep)) {
} else {
}
#endif
return (status);
}
static nxge_status_t
{
/*
* Set up and prepare buffer blocks, descriptors
* and mailbox.
*/
"==> nxge_map_txdma_channel (channel %d)", channel));
/*
* Transmit buffer blocks
*/
"==> nxge_map_txdma_channel (channel %d): "
}
/*
* Transmit block ring, and mailbox.
*/
"==> nxge_map_txdma_channel: unmap buf"
"(status 0x%x channel %d)",
"<== nxge_map_txdma_channel: "
"(status 0x%x channel %d)",
return (status);
}
/*ARGSUSED*/
static void
{
"==> nxge_unmap_txdma_channel (channel %d)", channel));
/*
* unmap tx block ring, and mailbox.
*/
/* unmap buffer blocks */
/*
* Cleanup the reference to the ring now that it does not exist.
*/
}
/*
* nxge_map_txdma_channel_cfg_ring
*
* Map a TDC into our kernel space.
* This function allocates all of the per-channel data structures.
*
* Arguments:
* nxgep
* dma_channel The channel to map.
* dma_cntl_p
* tx_ring_p dma_channel's transmit ring
* tx_mbox_p dma_channel's mailbox
*
* Notes:
*
* nxge_setup_dma_common()
*
* Registers accessed:
* none.
*
* Context:
* Any domain
*/
/*ARGSUSED*/
static void
{
"==> nxge_map_txdma_channel_cfg_ring"));
cntl_dmap = *dma_cntl_p;
sizeof (tx_desc_t));
/*
* Zero out transmit ring descriptors.
*/
tx_ring_cfig_p->value = 0;
tx_ring_kick_p->value = 0;
tx_evmask_p->value = 0;
"==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
tx_ring_cfig_p->value = 0;
"==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
tx_ring_cfig_p->value));
/* Map in mailbox */
mboxp = (p_tx_mbox_t)
"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
"==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
"mbox $%p",
"<== nxge_map_txdma_channel_cfg_ring"));
}
/*ARGSUSED*/
static void
{
"==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
"<== nxge_unmap_txdma_channel_cfg_ring"));
}
/*
* nxge_map_txdma_channel_buf_ring
*
*
* Arguments:
* nxgep
* channel The channel to map.
* dma_buf_p
* tx_desc_p channel's descriptor ring
* num_chunks
*
* Notes:
*
* nxge_setup_dma_common()
*
* Registers accessed:
* none.
*
* Context:
* Any domain
*/
static nxge_status_t
{
int i, j, index;
"==> nxge_map_txdma_channel_buf_ring"));
" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
"chunks bufp $%p",
nmsgs = 0;
for (i = 0; i < num_chunks; i++, tmp_bufp++) {
"==> nxge_map_txdma_channel_buf_ring: channel %d "
"bufp $%p nblocks %d nmsgs %d",
}
if (!nmsgs) {
"<== nxge_map_txdma_channel_buf_ring: channel %d "
"no msg blocks",
channel));
status = NXGE_ERROR;
}
tx_ring_p = (p_tx_ring_t)
(void *)nxgep->interrupt_cookie);
TASKQ_DEFAULTPRI, 0);
}
/*
* Allocate transmit message rings and handles for packets
* not to be copied to premapped buffers.
*/
for (i = 0; i < nmsgs; i++) {
DDI_DMA_DONTWAIT, 0,
&tx_msg_ring[i].dma_handle);
if (ddi_status != DDI_SUCCESS) {
break;
}
}
if (i < nmsgs) {
"Allocate handles failed."));
}
if (!nxge_tx_intr_thres) {
}
tx_ring_p->descs_pending = 0;
"==> nxge_map_txdma_channel_buf_ring: channel %d "
"actual tx desc max %d nmsgs %d "
"(config nxge_tx_ring_size %d)",
/*
* Map in buffers from the buffer pool.
*/
index = 0;
"dma_bufp $%p tx_rng_p $%p "
"tx_msg_rng_p $%p bsize %d",
for (i = 0; i < num_chunks; i++, dma_bufp++) {
"==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
"size %d dma_bufp $%p",
i, sizeof (nxge_dma_common_t), dma_bufp));
for (j = 0; j < nblocks; j++) {
#ifdef TX_MEM_DEBUG
"==> nxge_map_txdma_channel_buf_ring: j %d"
"dmap $%p", i, dmap));
#endif
bsize);
}
}
if (i < num_chunks) {
status = NXGE_ERROR;
}
}
index--;
}
}
status = NXGE_ERROR;
"<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
"==> nxge_unmap_txdma_channel_buf_ring"));
"<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
return;
}
"==> nxge_unmap_txdma_channel_buf_ring: channel %d",
/*
* Since the serialization thread, timer thread and
* interrupt thread can all call the transmit reclaim,
* the unmapping function needs to acquire the lock
* to free those buffers which were transmitted
* by the hardware already.
*/
"==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
"channel %d",
for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
tx_msg_p = &tx_msg_ring[i];
}
}
for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
}
}
}
"<== nxge_unmap_txdma_channel_buf_ring"));
}
static nxge_status_t
{
"<== nxge_txdma_hw_start: NULL ring pointer"));
return (NXGE_ERROR);
}
if (tx_desc_rings == NULL) {
"<== nxge_txdma_hw_start: NULL ring pointers"));
return (NXGE_ERROR);
}
}
"tx_rings $%p rings $%p",
"tx_rings $%p tx_desc_rings $%p",
goto nxge_txdma_hw_start_exit;
"==> nxge_txdma_hw_start: disable "
"==> nxge_txdma_hw_start: (status 0x%x)", status));
return (status);
}
/*
* nxge_txdma_start_channel
*
* Start a TDC.
*
* Arguments:
* nxgep
* channel The channel to start.
* tx_ring_p channel's transmit descriptor ring.
* tx_mbox_p channel' smailbox.
*
* Notes:
*
* nxge_reset_txdma_channel()
* nxge_init_txdma_channel_event_mask()
* nxge_enable_txdma_channel()
*
* Registers accessed:
* none directly (see functions above).
*
* Context:
* Any domain
*/
static nxge_status_t
{
"==> nxge_txdma_start_channel (channel %d)", channel));
/*
*/
/*
* Reset TXDMA channel
*/
"==> nxge_txdma_start_channel (channel %d)"
}
/*
* Initialize the TXDMA channel specific FZC control
* configurations. These FZC registers are pertaining
* to each TX channel (i.e. logical pages).
*/
if (!isLDOMguest(nxgep)) {
}
}
/*
* Initialize the event masks.
*/
}
/*
* Load TXDMA descriptors, buffers, mailbox,
* initialise the DMA channels and
* enable each DMA channel.
*/
}
return (status);
}
/*
* nxge_txdma_stop_channel
*
* Stop a TDC.
*
* Arguments:
* nxgep
* channel The channel to stop.
* tx_ring_p channel's transmit descriptor ring.
* tx_mbox_p channel' smailbox.
*
* Notes:
*
* nxge_txdma_stop_inj_err()
* nxge_reset_txdma_channel()
* nxge_init_txdma_channel_event_mask()
* nxge_init_txdma_channel_cntl_stat()
* nxge_disable_txdma_channel()
*
* Registers accessed:
* none directly (see functions above).
*
* Context:
* Any domain
*/
/*ARGSUSED*/
static nxge_status_t
{
"==> nxge_txdma_stop_channel: channel %d", channel));
/*
* Stop (disable) TXDMA and TXC (if stop bit is set
* and STOP_N_GO bit not set, the TXDMA reset state will
* not be set if reset TXDMA.
*/
status = NXGE_ERROR;
}
status = NXGE_ERROR;
}
/*
* Reset TXDMA channel
*/
}
#ifdef HARDWARE_REQUIRED
/* Set up the interrupt event masks. */
}
/* Initialize the DMA control and status register */
}
/* Disable channel */
}
"==> nxge_txdma_stop_channel: event done"));
#endif
return (status);
}
/*
* nxge_txdma_get_ring
*
* Get the ring for a TDC.
*
* Arguments:
* nxgep
* channel
*
* Notes:
*
*
* Registers accessed:
*
* Context:
* Any domain
*/
static p_tx_ring_t
{
int tdc;
"<== nxge_txdma_get_ring: NULL ring pointer(s)"));
goto return_null;
}
if (ring) {
"<== nxge_txdma_get_ring: "
return (ring);
}
}
}
}
"ring not found"));
return (NULL);
}
/*
* nxge_txdma_get_mbox
*
* Get the mailbox for a TDC.
*
* Arguments:
* nxgep
* channel
*
* Notes:
*
*
* Registers accessed:
*
* Context:
* Any domain
*/
static p_tx_mbox_t
{
int tdc;
if (nxgep->tx_mbox_areas_p == 0 ||
"<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
goto return_null;
}
"<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
goto return_null;
}
if (ring) {
"<== nxge_txdma_get_mbox: tdc %d "
return (mailbox);
}
}
}
}
"mailbox not found"));
return (NULL);
}
/*
* nxge_tx_err_evnts
*
* Recover a TDC.
*
* Arguments:
* nxgep
* index The index to the TDC ring.
* ldvp Used to get the channel number ONLY.
* cs A copy of the bits from TX_CS.
*
* Notes:
* Calling tree:
* nxge_tx_intr()
*
* npi_txdma_ring_error_get()
* npi_txdma_inj_par_error_get()
* nxge_txdma_fatal_err_recover()
*
* Registers accessed:
* TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High
* TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
* TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
*
* Context:
* Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR.
*/
/*ARGSUSED*/
static nxge_status_t
{
return (NXGE_ERROR | rs);
}
"==> nxge_tx_err_evnts(channel %d): "
"fatal error: mailbox", channel));
}
"==> nxge_tx_err_evnts(channel %d): "
"fatal error: pkt_size_err", channel));
}
"==> nxge_tx_err_evnts(channel %d): "
"fatal error: tx_ring_oflow", channel));
}
"==> nxge_tx_err_evnts(channel %d): "
"fatal error: pre_buf_par_err", channel));
/* Clear error injection source for parity error */
}
"==> nxge_tx_err_evnts(channel %d): "
"fatal error: nack_pref", channel));
}
tdc_stats->nack_pkt_rd++;
"==> nxge_tx_err_evnts(channel %d): "
"fatal error: nack_pkt_rd", channel));
}
"==> nxge_tx_err_evnts(channel %d): "
"fatal error: config_partition_err", channel));
}
"==> nxge_tx_err_evnts(channel %d): "
"fatal error: pkt_prt_err", channel));
}
/* Clear error injection source in case this is an injected error */
if (txchan_fatal) {
" nxge_tx_err_evnts: "
" fatal error on channel %d cs 0x%llx\n",
}
}
return (status);
}
static nxge_status_t
{
"Recovering from TxDMAChannel#%d error...", channel));
/*
* Stop the dma channel waits for the stop done.
* If the stop done bit is not set, then create
* an error.
*/
if (rs != NPI_SUCCESS) {
"==> nxge_txdma_fatal_err_recover (channel %d): "
"stop failed ", channel));
goto fail;
}
/*
* Reset TXDMA channel
*/
NPI_SUCCESS) {
"==> nxge_txdma_fatal_err_recover (channel %d)"
goto fail;
}
/*
* Reset the tail (kick) register to 0.
* (Hardware will not reset it. Tx overflow fatal
* error if tail is not set to 0 after reset!
*/
/* Restart TXDMA channel */
if (!isLDOMguest(nxgep)) {
// XXX This is a problem in HIO!
/*
* Initialize the TXDMA channel specific FZC control
* configurations. These FZC registers are pertaining
* to each TX channel (i.e. logical pages).
*/
goto fail;
}
/*
* Initialize the event masks.
*/
goto fail;
/*
* Load TXDMA descriptors, buffers, mailbox,
* initialise the DMA channels and
* enable each DMA channel.
*/
goto fail;
"Recovery Successful, TxDMAChannel#%d Restored",
channel));
return (NXGE_OK);
fail:
"nxge_txdma_fatal_err_recover (channel %d): "
"failed to recover this txdma channel", channel));
return (status);
}
/*
* nxge_tx_port_fatal_err_recover
*
* Attempt to recover from a fatal port error.
*
* Arguments:
* nxgep
*
* Notes:
* How would a guest do this?
*
*
* Registers accessed:
*
* Context:
* Service domain
*/
{
"Recovering from TxPort error..."));
if (isLDOMguest(nxgep)) {
return (NXGE_OK);
}
"<== nxge_tx_port_fatal_err_recover: not initialized"));
return (NXGE_ERROR);
}
"<== nxge_tx_port_fatal_err_recover: "
"NULL ring pointer(s)"));
return (NXGE_ERROR);
}
if (ring)
}
}
/*
* Stop all the TDCs owned by us.
* (The shared TDCs will have been stopped by their owners.)
*/
if (ring) {
if (rs != NPI_SUCCESS) {
"nxge_tx_port_fatal_err_recover "
"(channel %d): stop failed ", tdc));
goto fail;
}
}
}
}
if (ring) {
}
}
}
/*
* Reset all the TDCs.
*/
if (ring) {
if ((rs = npi_txdma_channel_control
!= NPI_SUCCESS) {
"nxge_tx_port_fatal_err_recover "
"(channel %d) reset channel "
goto fail;
}
}
/*
* Reset the tail (kick) register to 0.
* (Hardware will not reset it. Tx overflow fatal
* error if tail is not set to 0 after reset!
*/
}
}
/* Restart all the TDCs */
if (ring) {
/*
* Initialize the event masks.
*/
goto fail;
goto fail;
}
}
}
/* Re-enable all the TDCs */
if (ring) {
goto fail;
}
}
}
/*
* Unlock all the TDCs.
*/
if (ring)
}
}
return (NXGE_OK);
fail:
if (ring)
}
}
return (status);
}
/*
* nxge_txdma_inject_err
*
* Inject an error into a TDC.
*
* Arguments:
* nxgep
* err_id The error to inject.
* chan The channel to inject into.
*
* Notes:
* This is called from nxge_main.c:nxge_err_inject()
* Has this ioctl ever been used?
*
* npi_txdma_inj_par_error_get()
* npi_txdma_inj_par_error_set()
*
* Registers accessed:
* TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
* TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
* TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
*
* Context:
* Service domain
*/
void
{
switch (err_id) {
/* Clear error injection source for parity error */
break;
else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
#if defined(__i386)
#else
#endif
break;
}
}