Lines Matching refs:dp

31 vmxnet3_alloc_rxbuf(vmxnet3_softc_t *dp, boolean_t canSleep)
39 atomic_inc_32(&dp->rx_alloc_failed);
43 if ((err = vmxnet3_alloc_dma_mem_1(dp, &rxBuf->dma, (dp->cur_mtu + 18),
45 VMXNET3_DEBUG(dp, 0, "Failed to allocate %d bytes for rx buf, "
46 "err:%d\n", (dp->cur_mtu + 18), err);
48 atomic_inc_32(&dp->rx_alloc_failed);
54 rxBuf->dp = dp;
56 atomic_inc_32(&dp->rx_num_bufs);
57 atomic_inc_32(&dp->rx_alloc_buf);
62 vmxnet3_free_rxbuf(vmxnet3_softc_t *dp, vmxnet3_rxbuf_t *rxBuf)
68 atomic_dec_32(&dp->rx_num_bufs);
71 uint32_t nv = atomic_dec_32_nv(&dp->rx_num_bufs);
88 vmxnet3_put_rxpool_buf(vmxnet3_softc_t *dp, vmxnet3_rxbuf_t *rxBuf,
91 vmxnet3_rxpool_t *rxPool = &dp->rxPool;
94 mutex_enter(&dp->rxPoolLock);
96 if ((dp->devEnabled || init) && rxPool->nBufs < rxPool->nBufsLimit) {
104 mutex_exit(&dp->rxPoolLock);
114 vmxnet3_softc_t *dp = rxBuf->dp;
116 if (!vmxnet3_put_rxpool_buf(dp, rxBuf, B_FALSE))
117 vmxnet3_free_rxbuf(dp, rxBuf);
127 vmxnet3_get_rxpool_buf(vmxnet3_softc_t *dp)
129 vmxnet3_rxpool_t *rxPool = &dp->rxPool;
132 mutex_enter(&dp->rxPoolLock);
140 mutex_exit(&dp->rxPoolLock);
151 vmxnet3_rxpool_init(vmxnet3_softc_t *dp)
156 ASSERT(dp->rxPool.nBufsLimit > 0);
157 while (dp->rxPool.nBufs < dp->rxPool.nBufsLimit) {
158 if ((rxBuf = vmxnet3_alloc_rxbuf(dp, B_FALSE)) == NULL) {
162 VERIFY(vmxnet3_put_rxpool_buf(dp, rxBuf, B_TRUE));
166 while ((rxBuf = vmxnet3_get_rxpool_buf(dp)) != NULL) {
167 vmxnet3_free_rxbuf(dp, rxBuf);
177 * dp->alloc_ok is true, then fall back to dynamic allocation. If pool is
185 vmxnet3_rx_populate(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq, uint16_t idx,
190 if (pool && (rxBuf = vmxnet3_get_rxpool_buf(dp)) == NULL) {
192 atomic_inc_32(&dp->rx_pool_empty);
193 if (!dp->alloc_ok) {
194 atomic_inc_32(&dp->rx_alloc_failed);
198 if (rxBuf == NULL && (!pool || dp->alloc_ok)) {
199 rxBuf = vmxnet3_alloc_rxbuf(dp, canSleep);
207 VERIFY(vmxnet3_put_rxpool_buf(dp, rxBuf,
210 vmxnet3_free_rxbuf(dp, rxBuf);
212 atomic_inc_32(&dp->rx_alloc_failed);
239 vmxnet3_rxqueue_init(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq)
244 dp->rxPool.nBufsLimit = vmxnet3_getprop(dp, "RxBufPoolLimit", 0,
248 if ((err = vmxnet3_rx_populate(dp, rxq, cmdRing->next2fill,
260 if ((err = vmxnet3_rxpool_init(dp)) != 0) {
269 vmxnet3_free_rxbuf(dp, rxq->bufRing[cmdRing->next2fill].rxBuf);
279 vmxnet3_rxqueue_fini(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq)
284 ASSERT(!dp->devEnabled);
287 while ((rxBuf = vmxnet3_get_rxpool_buf(dp)))
288 vmxnet3_free_rxbuf(dp, rxBuf);
309 vmxnet3_rx_hwcksum(vmxnet3_softc_t *dp, mblk_t *mp,
323 VMXNET3_DEBUG(dp, 3, "rx cksum flags = 0x%x\n", flags);
337 vmxnet3_rx_intr(vmxnet3_softc_t *dp, vmxnet3_rxqueue_t *rxq)
345 ASSERT(mutex_owned(&dp->intrLock));
389 if (vmxnet3_rx_populate(dp, rxq, rxdIdx, B_FALSE,
397 VMXNET3_DEBUG(dp, 3, "rx 0x%p on [%u]\n", mblk,
406 vmxnet3_rx_hwcksum(dp, mp,
453 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_RXPROD, rxprod);