Searched refs:rxq (Results 1 - 25 of 33) sorted by relevance

12

/illumos-gate/usr/src/uts/intel/io/vmxnet3s/
H A Dvmxnet3_rx.c185 vmxnet3_rx_populate(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq, uint16_t idx, argument
216 vmxnet3_cmdring_t *cmdRing = &rxq->cmdRing;
219 rxq->bufRing[idx].rxBuf = rxBuf;
239 vmxnet3_rxqueue_init(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq) argument
241 vmxnet3_cmdring_t *cmdRing = &rxq->cmdRing;
248 if ((err = vmxnet3_rx_populate(dp, rxq, cmdRing->next2fill,
269 vmxnet3_free_rxbuf(dp, rxq->bufRing[cmdRing->next2fill].rxBuf);
279 vmxnet3_rxqueue_fini(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq) argument
291 for (i = 0; i < rxq->cmdRing.size; i++) {
292 rxBuf = rxq
337 vmxnet3_rx_intr(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq) argument
[all...]
H A Dvmxnet3_main.c387 vmxnet3_rxqueue_t *rxq = &dp->rxQueue; local
390 ASSERT(!(rxq->cmdRing.size & VMXNET3_RING_SIZE_MASK));
391 ASSERT(!(rxq->compRing.size & VMXNET3_RING_SIZE_MASK));
392 ASSERT(!rxq->cmdRing.dma.buf && !rxq->compRing.dma.buf);
394 if ((err = vmxnet3_alloc_cmdring(dp, &rxq->cmdRing)) != 0) {
397 rqdesc->conf.rxRingBasePA[0] = rxq->cmdRing.dma.bufPA;
398 rqdesc->conf.rxRingSize[0] = rxq->cmdRing.size;
402 if ((err = vmxnet3_alloc_compring(dp, &rxq->compRing)) != 0) {
405 rqdesc->conf.compRingBasePA = rxq
452 vmxnet3_rxqueue_t *rxq = &dp->rxQueue; local
[all...]
H A Dvmxnet3.h192 int vmxnet3_rxqueue_init(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq);
193 mblk_t *vmxnet3_rx_intr(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq);
194 void vmxnet3_rxqueue_fini(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq);
/illumos-gate/usr/src/uts/common/io/bnxe/
H A Dbnxe_rx.c77 if (pUM->rxq[idx].rxLowWater > s_list_entry_cnt(&pLmRxChain->active_descq))
79 pUM->rxq[idx].rxLowWater = s_list_entry_cnt(&pLmRxChain->active_descq);
182 s_list_push_tail(&pUM->rxq[idx].doneRxQ,
186 if (s_list_entry_cnt(&pUM->rxq[idx].doneRxQ) >= pUM->devParams.maxRxFree)
188 doneRxQ = pUM->rxq[idx].doneRxQ;
189 s_list_clear(&pUM->rxq[idx].doneRxQ);
200 atomic_dec_32(&pUM->rxq[idx].rxBufUpInStack);
215 if ((cnt = pUM->rxq[FCOE_CID(&pUM->lm_dev)].rxBufUpInStack) == 0)
241 if ((cnt = pUM->rxq[idx].rxBufUpInStack) == 0)
298 pRxQ = &pUM->rxq[id
[all...]
H A Dbnxe_lock.c47 void BNXE_LOCK_ENTER_RX (um_device_t * pUM, int idx) { mutex_enter(&pUM->rxq[idx].rxMutex); }
48 void BNXE_LOCK_EXIT_RX (um_device_t * pUM, int idx) { mutex_exit(&pUM->rxq[idx].rxMutex); }
49 void BNXE_LOCK_ENTER_DONERX (um_device_t * pUM, int idx) { mutex_enter(&pUM->rxq[idx].doneRxMutex); }
50 void BNXE_LOCK_EXIT_DONERX (um_device_t * pUM, int idx) { mutex_exit(&pUM->rxq[idx].doneRxMutex); }
H A Dbnxe_main.c240 mutex_init(&pUM->rxq[idx].rxMutex, NULL,
242 mutex_init(&pUM->rxq[idx].doneRxMutex, NULL,
244 pUM->rxq[idx].pUM = pUM;
245 pUM->rxq[idx].idx = idx;
310 mutex_destroy(&pUM->rxq[idx].rxMutex);
311 mutex_destroy(&pUM->rxq[idx].doneRxMutex);
H A Dbnxe.h660 RxQueue rxq[MAX_ETH_CONS]; member in struct:_um_device
937 #define BNXE_LOCK_ENTER_RX(pUM, idx) mutex_enter(&(pUM)->rxq[(idx)].rxMutex)
938 #define BNXE_LOCK_EXIT_RX(pUM, idx) mutex_exit(&(pUM)->rxq[(idx)].rxMutex)
939 #define BNXE_LOCK_ENTER_DONERX(pUM, idx) mutex_enter(&(pUM)->rxq[(idx)].doneRxMutex)
940 #define BNXE_LOCK_EXIT_DONERX(pUM, idx) mutex_exit(&(pUM)->rxq[(idx)].doneRxMutex)
H A Dbnxe_intr.c151 RxQueue * pRxQ = &pUM->rxq[idx];
193 RxQueue * pRxQ = &pUM->rxq[idx];
758 if (pUM->rxq[idx].inPollMode)
H A Dbnxe_kstat.c1614 pStats->rxDoneDescs.value.ui64 = s_list_entry_cnt(&pUM->rxq[idx].doneRxQ);
1615 pStats->rxWaitingDescs.value.ui64 = s_list_entry_cnt(&pUM->rxq[idx].waitRxQ);
1616 pStats->rxCopied.value.ui64 = pUM->rxq[idx].rxCopied;
1617 pStats->rxDiscards.value.ui64 = pUM->rxq[idx].rxDiscards;
1618 pStats->rxBufUpInStack.value.ui64 = pUM->rxq[idx].rxBufUpInStack;
1619 pStats->rxLowWater.value.ui64 = pUM->rxq[idx].rxLowWater;
1620 pStats->inPollMode.value.ui64 = pUM->rxq[idx].inPollMode;
1621 pStats->pollCnt.value.ui64 = pUM->rxq[idx].pollCnt;
1622 pStats->intrDisableCnt.value.ui64 = pUM->rxq[idx].intrDisableCnt;
1623 pStats->intrEnableCnt.value.ui64 = pUM->rxq[id
[all...]
/illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/
H A Dadapter.h318 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
323 struct port_info *port; /* the port this rxq belongs to */
379 struct sge_rxq *rxq; /* NIC rx queues */ member in struct:sge
529 #define RXQ_LOCK(rxq) IQ_LOCK(&(rxq)->iq)
530 #define RXQ_UNLOCK(rxq) IQ_UNLOCK(&(rxq)->iq)
531 #define RXQ_LOCK_ASSERT_OWNED(rxq) IQ_LOCK_ASSERT_OWNED(&(rxq)->iq)
532 #define RXQ_LOCK_ASSERT_NOTOWNED(rxq) IQ_LOCK_ASSERT_NOTOWNE
[all...]
H A Dt4_sge.c87 static int alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx,
89 static int free_rxq(struct port_info *pi, struct sge_rxq *rxq);
157 static kstat_t *setup_rxq_kstats(struct port_info *pi, struct sge_rxq *rxq,
401 iq = &s->rxq[pi->first_rxq + idx].iq;
407 iq = &s->rxq[pi->first_rxq + idx].iq;
419 iq = &s->rxq[pi->first_rxq + idx].iq;
429 struct sge_rxq *rxq; local
451 for_each_rxq(pi, i, rxq) {
453 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, p->qsize_rxq,
456 init_fl(&rxq
572 struct sge_rxq *rxq; local
677 struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ local
1330 alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int i) argument
1345 free_rxq(struct port_info *pi, struct sge_rxq *rxq) argument
2933 struct sge_rxq *rxq = (void *)iq; local
3248 setup_rxq_kstats(struct port_info *pi, struct sge_rxq *rxq, int idx) argument
3283 struct sge_rxq *rxq = ksp->ks_private; local
[all...]
H A Dt4_nexus.c122 int nrxq10g; /* # of NIC rxq's for each 10G port */
124 int nrxq1g; /* # of NIC rxq's for each 1G port */
127 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
129 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
481 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */
506 s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
607 struct sge_rxq *rxq; local
619 rxq = &s->rxq[pi->first_rxq];
620 for (q = 0; q < pi->nrxq; q++, rxq
2026 struct sge_rxq *rxq; local
2083 struct sge_rxq *rxq; local
2124 struct sge_rxq *rxq; local
[all...]
H A Dt4_mac.c132 *val = 0; /* TODO should come from rxq->nomem */
1002 struct sge_rxq *rxq; local
1021 for_each_rxq(pi, i, rxq) {
1022 rxq->iq.intr_params = V_QINTR_TIMER_IDX(v) |
1034 for_each_rxq(pi, i, rxq) {
1035 rxq->iq.intr_params = V_QINTR_TIMER_IDX(pi->tmr_idx) |
1039 rxq->iq.intr_pktc_idx = v; /* this needs fresh plumb */
1144 t4_mac_rx(struct port_info *pi, struct sge_rxq *rxq, mblk_t *m) argument
/illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/
H A Dlm_recv.c1247 rxq lock is taken by caller */
1253 lm_rx_chain_t *rxq = &LM_RXQ(pdev, qidx); local
1255 rxq->ret_bytes += returned_bytes;
1265 if(S32_SUB(rxq->ret_bytes, rxq->ret_bytes_last_fw_update + HC_RET_BYTES_TH(pdev)) >= 0)
1275 LM_INTMEM_WRITE32(PFDEV(pdev), rxq->hc_sb_info.iro_dhc_offset, rxq->ret_bytes, BAR_CSTRORM_INTMEM);
1276 rxq->ret_bytes_last_fw_update = rxq->ret_bytes;
1278 VF_REG_WR(pdev, VF_BAR0_CSDM_QUEUES_OFFSET + rxq
[all...]
/illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/vf/channel_vf/
H A Dlm_vf.c280 rxq_params = &request->rxq;
1895 mess->rxq.rcq_addr = lm_bd_chain_phys_addr(&(LM_RCQ(pdev,vf_qid).bd_chain), 0).as_u64;
1896 mess->rxq.rcq_np_addr = lm_bd_chain_phys_addr(&(LM_RCQ(pdev,vf_qid).bd_chain), 1).as_u64;
1897 mess->rxq.rxq_addr = lm_bd_chain_phys_addr(&(LM_RXQ_CHAIN(pdev,vf_qid,0)), 0).as_u64;
1899 mess->rxq.sge_addr = LM_TPA_CHAIN_BD(pdev, vf_qid).bd_chain_phy.as_u64;
1900 if (mess->rxq.sge_addr) {
1901 mess->rxq.flags |= SW_VFPF_QUEUE_FLG_TPA;
1904 mess->rxq.sge_addr = 0;
1908 mess->rxq.vf_sb = vf_qid; /* relative to vf */
1909 mess->rxq
[all...]
/illumos-gate/usr/src/uts/common/io/i40e/core/
H A Di40e_virtchnl.h222 struct i40e_virtchnl_rxq_info rxq; member in struct:i40e_virtchnl_queue_pair_info
/illumos-gate/usr/src/uts/common/io/ral/
H A Drt2560.c691 RAL_WRITE(sc, RT2560_RXCSR2, sc->rxq.physaddr);
1189 dr = &sc->rxq.dr_desc;
1190 count = sc->rxq.count;
1192 mutex_enter(&sc->rxq.rx_lock);
1198 desc = &sc->rxq.desc[sc->rxq.cur];
1199 data = &sc->rxq.data[sc->rxq.cur];
1245 dr_bf = &sc->rxq.dr_rxbuf[sc->rxq
[all...]
H A Drt2560_var.h164 struct rt2560_rx_ring rxq; member in struct:rt2560_softc
/illumos-gate/usr/src/uts/common/io/yge/
H A Dyge.c2417 int32_t rxq; local
2423 rxq = port->p_rxq;
2584 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_CLR_RESET);
2585 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_OPER_INIT);
2586 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_FIFO_OP_ON);
2588 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), 0x80);
2590 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), MSK_BMU_RX_WM);
2595 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
2601 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR),
2636 uint32_t rxq; local
2710 uint32_t rxq = port->p_rxq; local
[all...]
/illumos-gate/usr/src/uts/common/io/rwd/
H A Drt2661_var.h116 struct rt2661_rx_ring rxq; member in struct:rt2661_softc
/illumos-gate/usr/src/uts/common/io/rwn/
H A Drt2860_var.h157 struct rt2860_rx_ring rxq; member in struct:rt2860_softc
H A Drt2860.c1717 struct rt2860_rx_data *data = &sc->rxq.data[sc->rxq.cur];
1718 struct rt2860_rxd *rxd = &sc->rxq.rxd[sc->rxq.cur];
1721 (void) ddi_dma_sync(sc->rxq.rxdesc_dma.dma_hdl,
1722 sc->rxq.cur * sizeof (struct rt2860_rxd),
1788 (void) ddi_dma_sync(sc->rxq.rxdesc_dma.dma_hdl,
1789 sc->rxq.cur * sizeof (struct rt2860_rxd),
1793 sc->rxq.cur = (sc->rxq
[all...]
/illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/include/vm/
H A Dhw_channel.h300 } rxq; member in struct:vfpf_setup_q_tlv
H A Dvfpf_if.h219 } rxq; member in struct:vf_pf_msg_setup_q
/illumos-gate/usr/src/uts/common/io/iwn/
H A Dif_iwnvar.h244 struct iwn_rx_ring rxq; member in struct:iwn_softc

Completed in 145 milliseconds

12