t4_sge.c revision 56b2bdd1f04d465cfe4a95b88ae5cba5884154e4
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
*/
/*
* This file is part of the Chelsio T4 support code.
*
* Copyright (C) 2010-2013 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "version.h"
#include "common/t4_regs_values.h"
/* TODO: Tune. */
int rx_buf_size = 8192;
int tx_copy_threshold = 256;
enum {
};
/* Used to track coalesced tx work request */
struct txpkts {
};
/* All information needed to tx a frame */
struct txinfo {
};
int i);
#ifndef TCP_OFFLOAD_DISABLE
int intr_idx);
#endif
#ifndef TCP_OFFLOAD_DISABLE
#endif
static void refill_sfl(void *arg);
int *fl_bufs_used);
int len);
mblk_t *m);
int idx);
int idx);
mblk_t *m);
static inline int
{
unsigned int cidx;
else
}
void
{
uint32_t v;
/*
* Device access and DMA attributes for descriptor rings
*/
dma_attr->dma_attr_addr_lo = 0;
dma_attr->dma_attr_flags = 0;
/*
* Device access and DMA attributes for tx buffers
*/
dma_attr->dma_attr_addr_lo = 0;
dma_attr->dma_attr_flags = 0;
/*
* Device access and DMA attributes for rx buffers
*/
dma_attr->dma_attr_addr_lo = 0;
/*
* Low 4 bits of an rx buffer address have a special meaning to the SGE
* and an rx buf cannot have an address with any of these bits set.
* FL_ALIGN is >= 32 so we're sure things are ok.
*/
dma_attr->dma_attr_flags = 0;
V_THRESHOLD_0(p->counter_val[0]) |
}
/*
* Allocate and initialize the firmware event queue and the forwarded interrupt
* queues, if any. The adapter owns all these queues as they are not associated
* with any particular port.
*
* Returns errno on failure. Resources allocated up to that point may still be
* allocated. Caller is responsible for cleanup in case this function fails.
*/
int
{
int rc;
/*
* Firmware event queue
*/
if (rc != 0)
return (rc);
/*
* Management queue. This is just a control queue that uses the fwq as
* its associated iq.
*/
return (rc);
}
/*
* Idempotent
*/
int
{
return (0);
}
static inline int
{
int rc = T4_EXTRA_INTR, i;
return (0);
for_each_port(sc, i) {
break;
#ifndef TCP_OFFLOAD_DISABLE
else
#else
/*
* Not compiled with offload support and intr_count > 1. Only
* NIC queues exist and they'd better be taking direct
* interrupts.
*/
#endif
}
return (rc);
}
/*
* Given an arbitrary "index," come up with an iq that can be used by other
* queues (of this port) for interrupt forwarding, SGE egress updates, etc.
* The iq returned is guaranteed to be something that takes direct interrupts.
*/
static struct sge_iq *
{
#ifndef TCP_OFFLOAD_DISABLE
} else
} else {
else
}
#else
/*
* Not compiled with offload support and intr_count > 1. Only NIC
* queues exist and they'd better be taking direct interrupts.
*/
#endif
return (iq);
}
int
{
#ifndef TCP_OFFLOAD_DISABLE
struct sge_ofld_rxq *ofld_rxq;
#endif
/* Interrupt vector to start from (when using multiple vectors) */
/*
* First pass over all rx queues (NIC and TOE):
* a) initialize iq and fl
* b) allocate queue iff it will take direct interrupts.
*/
#ifndef TCP_OFFLOAD_DISABLE
#else
#endif
{
if (rc != 0)
goto done;
intr_idx++;
}
}
#ifndef TCP_OFFLOAD_DISABLE
p->qsize_rxq, RX_IQ_ESIZE);
if (rc != 0)
goto done;
intr_idx++;
}
}
#endif
/*
* Second pass over all rx queues (NIC and TOE). The queues forwarding
* their interrupts are allocated now.
*/
j = 0;
continue;
if (rc != 0)
goto done;
j++;
}
#ifndef TCP_OFFLOAD_DISABLE
continue;
if (rc != 0)
goto done;
j++;
}
#endif
/*
* Now the tx queues. Only one pass needed.
*/
j = 0;
if (rc != 0)
goto done;
}
#ifndef TCP_OFFLOAD_DISABLE
iqid);
if (rc != 0)
goto done;
}
#endif
/*
* Finally, the control queue.
*/
done:
if (rc != 0)
(void) t4_teardown_port_queues(pi);
return (rc);
}
/*
* Idempotent
*/
int
{
int i;
#ifndef TCP_OFFLOAD_DISABLE
struct sge_ofld_rxq *ofld_rxq;
#endif
}
}
}
#ifndef TCP_OFFLOAD_DISABLE
}
}
#endif
}
/*
* Then take down the rx queues that take direct interrupts.
*/
}
#ifndef TCP_OFFLOAD_DISABLE
}
#endif
return (0);
}
/* Deals with errors and forwarded interrupts */
{
return (DDI_INTR_CLAIMED);
}
/* Deals with interrupts on the given ingress queue */
/* ARGSUSED */
{
/* LINTED: E_BAD_PTR_CAST_ALIGN */
(void) service_iq(iq, 0);
}
return (DDI_INTR_CLAIMED);
}
/* Deals with error interrupts */
/* ARGSUSED */
{
/* LINTED: E_BAD_PTR_CAST_ALIGN */
(void) t4_slow_intr_handler(sc);
return (DDI_INTR_CLAIMED);
}
/*
* Deals with anything and everything on the given ingress queue.
*/
static int
{
struct sge_iq *q;
const struct rss_header *rss;
int rsp_type;
mblk_t *m;
/*
* We always come back and check the descriptor ring for new indirect
* interrupts and other responses after running a single handler.
*/
for (;;) {
m = NULL;
switch (rsp_type) {
case X_RSPD_TYPE_FLBUF:
if (m == NULL) {
__LINE__);
}
/* FALLTHRU */
case X_RSPD_TYPE_CPL:
break;
case X_RSPD_TYPE_INTR:
/*
* Interrupts should be forwarded only to queues
* that are not forwarding their interrupts.
* This means service_iq can recurse but only 1
* level deep.
*/
(void) atomic_cas_uint(
IQS_IDLE);
} else {
STAILQ_INSERT_TAIL(&iql, q,
link);
}
}
break;
default:
break;
}
ndescs = 0;
if (fl_bufs_used > 0) {
fl_bufs_used = 0;
}
if (budget != 0)
return (EINPROGRESS);
}
}
if (STAILQ_EMPTY(&iql) != 0)
break;
/*
* Process the head only, and send it to the back of the list if
* it's still not done.
*/
q = STAILQ_FIRST(&iql);
else
}
int starved;
if (starved != 0)
}
return (0);
}
int
{
}
/*
* Doesn't fail. Holds on to work requests it can't send right away.
*/
int
{
int can_reclaim;
#ifndef TCP_OFFLOAD_DISABLE
#else
#endif
else
}
mblk_t *m;
break;
}
}
}
else {
}
return (0);
}
/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
#define TXPKTS_PKT_HDR ((\
sizeof (struct ulp_txpkt) + \
sizeof (struct ulptx_idata) + \
sizeof (struct cpl_tx_pkt_core)) / 8)
/* Header of a coalesced tx WR, before SGL of first packet (in flits) */
#define TXPKTS_WR_HDR (\
sizeof (struct fw_eth_tx_pkts_wr) / 8 + \
/* Header of a tx WR, before SGL of first packet (in flits) */
#define TXPKT_WR_HDR ((\
sizeof (struct fw_eth_tx_pkt_wr) + \
sizeof (struct cpl_tx_pkt_core)) / 8)
/* Header of a tx LSO WR, before SGL of first packet (in flits) */
#define TXPKT_LSO_WR_HDR ((\
sizeof (struct fw_eth_tx_pkt_wr) + \
sizeof (struct cpl_tx_pkt_lso) + \
sizeof (struct cpl_tx_pkt_core)) / 8)
mblk_t *
{
int rc, coalescing;
coalescing = 0;
break;
if (next_frame != NULL)
coalescing = 1;
if (rc != 0) {
/* Short of resources, suspend tx */
break;
}
/*
* Unrecoverable error for this frame, throw it
* away and move on to the next.
*/
continue;
}
if (coalescing != 0 &&
/* Successfully absorbed into txpkts */
goto doorbell;
}
/*
* We weren't coalescing to begin with, or current frame could
* not be coalesced (add_to_txpkts flushes txpkts if a frame
* given to it can't be coalesced). Either way there should be
* nothing in txpkts.
*/
/* We're sending out individual frames now */
coalescing = 0;
if (rc != 0) {
/* Short of hardware descriptors, suspend tx */
/*
* This is an unlikely but expensive failure. We've
* done all the hard work (DMA bindings etc.) and now we
* can't send out the frame. What's worse, we have to
* spend even more time freeing up everything in txinfo.
*/
break;
}
/* Fewer and fewer doorbells as the queue fills up */
}
/*
* frame not NULL means there was an error but we haven't thrown it
* away. This can happen when we're short of tx descriptors (qfull) or
* maybe even DMA handles (dma_hdl_failed). Either way, a credit flush
* and reclaim will get things going again.
*
* If eq->avail is already 0 we know a credit flush was requested in the
* WR that reduced it to 0 so we don't need another flush (we don't have
* any descriptor for a flush WR anyway, duh).
*/
return (frame);
}
static inline void
{
if (pktc_idx >= 0) {
}
}
static inline void
{
}
static inline void
{
}
/*
* Allocates the ring for an ingress queue and an optional freelist. If the
* freelist is specified it will be allocated and then associated with the
* ingress queue.
*
* Returns errno on failure. Resources allocated up to that point may still be
* allocated. Caller is responsible for cleanup in case this function fails.
*
* If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
* the intr_idx specifies the vector, starting from 0. Otherwise it specifies
* the index of the queue to which its interrupts will be forwarded.
*/
static int
{
struct fw_iq_cmd c;
uint32_t v = 0;
if (rc != 0)
return (rc);
bzero(&c, sizeof (c));
V_FW_IQ_CMD_VFN(0));
FW_LEN16(c));
/* Special handling for firmware event queue */
v |= F_FW_IQ_CMD_IQASYNCH;
else
v |= F_FW_IQ_CMD_IQANDST;
v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
c.type_to_iqandstindex = cpu_to_be32(v |
if (cong >= 0)
if (rc != 0)
return (rc);
/* Allocate space for one software descriptor per buffer. */
KM_SLEEP);
c.iqns_to_fl0congen |=
if (cong >= 0) {
c.iqns_to_fl0congen |=
}
}
if (rc != 0) {
"failed to create ingress queue: %d", rc);
return (rc);
}
}
panic("%s: fl->cntxt_id (%d) more than the max (%d)",
}
}
/* Enable IQ interrupts */
return (0);
}
static int
{
int rc;
if (rc != 0) {
return (rc);
}
}
}
}
}
}
}
}
return (0);
}
static int
{
if (rc != 0) {
"failed to create firmware event queue: %d.", rc);
return (rc);
}
return (0);
}
static int
{
}
static int
{
int rc;
if (rc != 0) {
"failed to create management queue: %d\n", rc);
return (rc);
}
return (0);
}
static int
{
int rc;
if (rc != 0)
return (rc);
return (rc);
}
static int
{
int rc;
}
if (rc == 0)
return (rc);
}
#ifndef TCP_OFFLOAD_DISABLE
static int
int intr_idx)
{
int rc;
if (rc != 0)
return (rc);
return (rc);
}
static int
{
int rc;
if (rc == 0)
return (rc);
}
#endif
static int
{
struct fw_eq_ctrl_cmd c;
bzero(&c, sizeof (c));
V_FW_EQ_CTRL_CMD_VFN(0));
F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
c.physeqid_pkd = BE_32(0);
c.fetchszm_to_iqid =
c.dcaen_to_eqsize =
if (rc != 0) {
return (rc);
}
return (rc);
}
static int
{
struct fw_eq_eth_cmd c;
bzero(&c, sizeof (c));
V_FW_EQ_ETH_CMD_VFN(0));
F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid =
if (rc != 0) {
"failed to create Ethernet egress queue: %d", rc);
return (rc);
}
return (rc);
}
#ifndef TCP_OFFLOAD_DISABLE
static int
{
struct fw_eq_ofld_cmd c;
bzero(&c, sizeof (c));
V_FW_EQ_OFLD_CMD_VFN(0));
F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid =
c.dcaen_to_eqsize =
if (rc != 0) {
"failed to create egress queue for TCP offload: %d", rc);
return (rc);
}
return (rc);
}
#endif
static int
{
int rc;
if (rc != 0)
return (rc);
case EQ_CTRL:
break;
case EQ_ETH:
break;
#ifndef TCP_OFFLOAD_DISABLE
case EQ_OFLD:
break;
#endif
default:
}
if (rc != 0) {
"failed to allocate egress queue(%d): %d",
}
return (rc);
}
static int
{
int rc;
case EQ_CTRL:
break;
case EQ_ETH:
break;
#ifndef TCP_OFFLOAD_DISABLE
case EQ_OFLD:
break;
#endif
default:
}
if (rc != 0) {
"failed to free egress queue (%d): %d",
return (rc);
}
}
}
return (0);
}
/* ARGSUSED */
static int
int idx)
{
int rc;
if (rc != 0)
return (rc);
/*
* TODO: use idx to figure out what kind of wrq this is and install
* useful kstats for it.
*/
return (rc);
}
static int
{
int rc;
if (rc != 0)
return (rc);
return (0);
}
static int
{
int rc, i;
if (rc != 0)
return (rc);
if (rc == 0)
else
/*
* TODO: is this too low? Worst case would need around 4 times qsize
* (all tx descriptors filled to the brim with SGLs, with each entry in
* the SGL coming from a distinct DMA handle). Increase tx_dhdl_total
* if you see too many dma_hdl_failed.
*/
for (i = 0; i < txq->tx_dhdl_total; i++) {
if (rc != DDI_SUCCESS) {
"%s: failed to allocate DMA handle (%d)",
}
txq->tx_dhdl_avail++;
}
return (rc);
}
static int
{
int i;
}
}
(void) ddi_dma_unbind_handle(hdl);
txq->tx_dhdl_cidx = 0;
}
freemsgchain(sd->m);
}
}
for (i = 0; i < txq->tx_dhdl_total; i++) {
}
}
return (0);
}
/*
* Allocates a block of contiguous memory for DMA. Can be used to allocate
*
* Caller does not have to clean up anything if this function fails, it cleans
* up after itself.
*
* Caller provides the following:
* len length of the block of memory to allocate.
* flags DDI_DMA_* flags to use (CONSISTENT/STREAMING, READ/WRITE/RDWR)
* acc_attr device access attributes for the allocation.
* dma_attr DMA attributes for the allocation
*
* If the function is successful it fills up this information:
* dma_hdl DMA handle for the allocated memory
* acc_hdl access handle for the allocated memory
* ba bus address of the allocated memory
* va KVA of the allocated memory.
*/
static int
{
int rc;
/*
* DMA handle.
*/
if (rc != DDI_SUCCESS) {
"failed to allocate DMA handle: %d", rc);
}
/*
* Memory suitable for DMA.
*/
if (rc != DDI_SUCCESS) {
"failed to allocate DMA memory: %d", rc);
return (ENOMEM);
}
}
/*
* DMA bindings.
*/
if (rc != DDI_DMA_MAPPED) {
"failed to map DMA memory: %d", rc);
return (ENOMEM);
}
if (ccount != 1) {
"unusable DMA mapping (%d segments)", ccount);
}
return (0);
}
static int
{
(void) ddi_dma_unbind_handle(*dhdl);
return (0);
}
static int
{
}
static int
{
}
static int
{
}
static inline bool
{
}
static inline void
{
}
}
/*
* Fill up the freelist by upto nbufs and maybe ring its doorbell.
*
* Returns non-zero to indicate that it should be added to the list of starving
* freelists.
*/
static int
{
while (nbufs--) {
/*
* Buffer is available for recycling. Two ways
* this can happen:
*
* a) All the packets DMA'd into it last time
* around were within the rx_copy_threshold
* and no part of the buffer was ever passed
* up (ref_cnt never went over 1).
*
* b) Packets DMA'd into the buffer were passed
* up but have all been freed by the upper
* layers by now (ref_cnt went over 1 but is
* now back to 1).
*
* Either way the bus address in the descriptor
* ring is already valid.
*/
d++;
goto recycled;
} else {
/*
* Buffer still in use and we need a
* replacement. But first release our reference
* on the existing buffer.
*/
}
}
break;
sd++;
}
}
}
#ifndef TAILQ_FOREACH_SAFE
#endif
/*
* Attempt to refill all starving freelists.
*/
static void
refill_sfl(void *arg)
{
}
}
}
static void
{
drv_usectohz(100000));
}
}
}
static void
{
unsigned int i;
}
}
}
/*
* Note that fl->cidx and fl->offset are left unchanged in case of failure.
*/
static mblk_t *
{
/*
* The SGE won't pack a new frame into the current buffer if the entire
* payload doesn't fit in the remaining space. Move on to the next buf
* in that case.
*/
nbuf++;
}
if (copy != 0) {
if (m == NULL)
return (NULL);
}
while (len) {
if (copy != 0)
else {
if (m == NULL) {
return (NULL);
}
else
}
m->b_wptr += n;
len -= n;
offset = 0;
cidx = 0;
nbuf++;
}
}
(*fl_bufs_used) += nbuf;
}
/*
* We'll do immediate data tx for non-LSO, but only when not coalescing. We're
* willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
* of immediate data.
*/
#define IMM_LEN ( \
2 * EQ_ESIZE \
- sizeof (struct fw_eth_tx_pkt_wr) \
- sizeof (struct cpl_tx_pkt_core))
/*
* Returns non-zero on failure, no need to cleanup anything in that case.
*
* Note 1: We always try to pull up the mblk if required and return E2BIG only
* if this fails.
*
* Note 2: We'll also pullup incoming mblk if HW_LSO is set and the first mblk
* does not have the TCP header in it.
*/
static int
int sgl_only)
{
int rc;
/* total length and a rough estimate of # of segments */
n = 0;
for (; m; m = m->b_cont) {
}
m = *fp;
txq->pullup_early++;
if (m == NULL) {
txq->pullup_failed++;
return (E2BIG); /* (*fp) left as it was */
}
*fp = m;
}
return (0); /* nsegs = 0 tells caller to use imm. tx */
goto done;
for (; m; m = m->b_cont) {
/* Use tx copy buffer if this mblk is small enough */
continue;
/* Add DMA bindings for this mblk to the SGL */
txq->pullup_late++;
if (m != NULL) {
*fp = m;
goto start;
}
txq->pullup_failed++;
}
if (rc != 0) {
return (rc);
}
}
done:
/*
* Store the # of flits required to hold this frame's SGL in nflits. An
* SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
* multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used
* then len1 must be set to 0.
*/
if (n & 1)
return (0);
}
static inline int
{
return (0);
*waste = 0;
return (1);
}
}
#define TXB_CHUNK 64
/*
* Copies the specified # of bytes into txq's tx copy buffer and updates txinfo
* and txq to indicate resources used. Caller has to make sure that those many
* bytes are available in the mblk chain (b_cont linked).
*/
static inline int
{
int waste, n;
return (ENOMEM);
}
if (waste != 0) {
}
n += MBLKL(m);
}
return (0);
}
static inline void
{
} else {
}
}
/*
* This function cleans up any partially allocated resources when it fails so
* there's nothing for the caller to clean up in that case.
*
* EIO indicates permanent failure. Caller should drop the frame containing
* this mblk and continue.
*
* E2BIG indicates that the SGL length for this mblk exceeds the hardware
* limit. Caller should pull up the frame before trying to send it out.
* (This error means our pullup_early heuristic did not work for this frame)
*
* ENOMEM indicates a temporary shortage of resources (DMA handles, other DMA
* resources, etc.). Caller should suspend the tx queue and wait for reclaim to
* free up resources.
*/
static inline int
{
int rc;
if (txq->tx_dhdl_avail == 0) {
txq->dma_hdl_failed++;
return (ENOMEM);
}
&ccount);
if (rc != DDI_DMA_MAPPED) {
txq->dma_map_failed++;
}
(void) ddi_dma_unbind_handle(dhdl);
return (E2BIG);
}
while (--ccount) {
}
txq->tx_dhdl_pidx = 0;
txq->tx_dhdl_avail--;
return (0);
}
/*
* Releases all the txq resources used up in the specified txinfo.
*/
static void
{
int n;
if (n > 0) {
else {
}
}
if (txq->tx_dhdl_pidx > 0)
txq->tx_dhdl_pidx--;
else
txq->tx_dhdl_avail++;
}
}
/*
* Returns 0 to indicate that m has been accepted into a coalesced tx work
* request. It has either been folded into txpkts or txpkts was flushed and m
* has started a new coalesced work request (as the first frame in a fresh
* txpkts).
*
* Returns non-zero to indicate a failure - caller is responsible for
* transmitting m, if there was anything in txpkts it has been flushed.
*/
static int
{
int can_coalesce;
if (can_coalesce != 0) {
return (0);
}
/*
* Couldn't coalesce m into txpkts. The first order of business
* is to send txpkts on its way. Then we'll revisit m.
*/
}
/*
* Check if we can start a new coalesced tx work request with m as
* the first packet in it.
*/
if (can_coalesce == 0)
return (EINVAL);
/*
* Start a fresh coalesced tx WR with m as the first frame in it.
*/
txsd->m = m;
return (0);
}
/*
* Note that write_txpkts_wr can never run out of hardware descriptors (but
* write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for
* coalescing only if sufficient hardware descriptors are available.
*/
static void
{
struct fw_eth_tx_pkts_wr *wr;
V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */
/* Everything else already written */
txq->txpkts_wrs++;
}
static int
{
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
/*
* Do we have enough flits to send this frame out?
*/
ctrl = sizeof (struct cpl_tx_pkt_core);
ctrl += sizeof (struct cpl_tx_pkt_lso);
} else
else {
}
return (ENOMEM);
/* Firmware work request header */
char *p = (void *)m->b_rptr;
/* LINTED: E_BAD_PTR_CAST_ALIGN */
if (((struct ether_header *)p)->ether_type ==
htons(ETHERTYPE_VLAN)) {
p += sizeof (struct ether_vlan_header);
} else
p += sizeof (struct ether_header);
/* LINTED: E_BAD_PTR_CAST_ALIGN for IPH_HDR_LENGTH() */
/* LINTED: E_BAD_PTR_CAST_ALIGN for IPH_HDR_LENGTH() */
p += IPH_HDR_LENGTH(p);
} else
/* Checksum offload */
ctrl1 = 0;
if (ctrl1 == 0)
/* CPL header */
/* Software descriptor */
txsd->m = m;
/* LINTED: E_ASSIGN_NARROW_CONV */
/* SGL */
/* Need to zero-pad to a 16 byte boundary if not on one */
/* LINTED: E_BAD_PTR_CAST_ALIGN */
} else {
#ifdef DEBUG
#endif
for (; m; m = m->b_cont) {
#ifdef DEBUG
#endif
}
}
return (0);
}
static inline void
{
struct ulptx_idata *ulpsc;
struct cpl_tx_pkt_core *cpl;
/* Checksum offload */
ctrl = 0;
if (ctrl == 0)
/*
* The previous packet's SGL must have ended at a 16 byte boundary (this
* wrap around between the ULPTX master command and ULPTX subcommand (8
* bytes each), and that it can not wrap around in the middle of the
* cpl_tx_pkt_core either.
*/
/* ULP master command */
/* ULP subcommand */
/* CPL_TX_PKT */
/* SGL for this frame */
/* Zero pad and advance to a 16 byte boundary if not already at one. */
if (flitp & 0xf) {
/* no matter what, flitp should be on an 8 byte boundary */
}
}
static inline void
{
} else {
}
}
static inline void
{
/* pidx has wrapped around since last doorbell */
}
}
static int
{
else
if (can_reclaim == 0)
return (0);
do {
int ndesc;
/* Firmware doesn't return "partial" credits. */
/*
* We always keep mblk around, even for immediate data. If mblk
* is NULL, this has to be the software descriptor for a credit
* flush work request.
*/
freemsgchain(txsd->m);
#ifdef DEBUG
else {
}
#endif
can_reclaim -= ndesc;
for (; hdls_freed; hdls_freed--) {
txq->tx_dhdl_cidx = 0;
}
return (reclaimed);
}
static void
{
struct fw_eq_flush_wr *wr;
}
static int
{
m->b_rptr += FL_PKTSHIFT;
/* TODO: what about cpl->ip_frag? */
mac_hcksum_set(m, 0, 0, 0, 0xffff,
}
/* Add to the chain that we'll send up */
else
return (0);
}
static inline void
{
/* Hold back one credit if pidx = cidx */
ndesc--;
/*
* There are chances of ndesc modified above (to avoid pidx = cidx).
* If there is nothing to post, return.
*/
if (ndesc <= 0)
return;
/* There was a wrap */
/* From desc_start to the end of list */
/* From start of list to the desc_last */
if (desc_last != 0)
} else {
/* There was no wrap, sync from start_desc to last_desc */
}
/*
* Update pending count:
* Deduct the number of descriptors posted
*/
}
/* ARGSUSED */
static int
{
return (0);
}
int
int flags)
{
int i, rc;
for (i = 0; i < count; i++) {
if (rc != DDI_SUCCESS) {
"%s: failed to allocate DMA handle (%d)",
}
}
return (0);
}
#define KS_C_SET(x, ...) \
/*
* cxgbe:X:config
*/
struct cxgbe_port_config_kstats {
};
/*
* cxgbe:X:info
*/
struct cxgbe_port_info_kstats {
};
static kstat_t *
{
struct cxgbe_port_config_kstats *kstatp;
int ndata;
ndata = sizeof (struct cxgbe_port_config_kstats) /
sizeof (kstat_named_t);
return (NULL);
}
/* Do NOT set ksp->ks_update. These kstats do not change. */
/* Install the kstat */
return (ksp);
}
static kstat_t *
{
struct cxgbe_port_info_kstats *kstatp;
int ndata;
return (NULL);
}
/* Install the kstat */
return (ksp);
}
static int
{
struct cxgbe_port_info_kstats *kstatp =
"active TWINAX", "LRM" };
if (rw == KSTAT_WRITE)
return (0);
else
if (bgmap == 0)
else if (bgmap == 1)
else
bgmap = 1;
return (0);
}
/*
* cxgbe:X:rxqY
*/
struct rxq_kstats {
};
static kstat_t *
{
struct rxq_kstats *kstatp;
int ndata;
char str[16];
KSTAT_TYPE_NAMED, ndata, 0);
"%s: failed to initialize rxq kstats for queue %d.",
return (NULL);
}
return (ksp);
}
static int
{
if (rw == KSTAT_WRITE)
return (0);
return (0);
}
/*
* cxgbe:X:txqY
*/
struct txq_kstats {
};
static kstat_t *
{
struct txq_kstats *kstatp;
int ndata;
char str[16];
KSTAT_TYPE_NAMED, ndata, 0);
"%s: failed to initialize txq kstats for queue %d.",
return (NULL);
}
return (ksp);
}
static int
{
if (rw == KSTAT_WRITE)
return (0);
return (0);
}