Lines Matching defs:rgep

83 	rge_t *rgep;
89 rgep = (rge_t *)rx_buf->private;
97 if (rgep->rge_mac_state == RGE_MAC_UNATTACH ||
98 rgep->rge_mac_state == RGE_MAC_ATTACH)
106 rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
108 rge_problem(rgep, "rge_rx_recycle: desballoc() failed");
111 mutex_enter(rgep->rc_lock);
112 slot_recy = rgep->rc_next;
113 free_srbdp = &rgep->free_srbds[slot_recy];
117 rgep->rc_next = NEXT(slot_recy, RGE_BUF_SLOTS);
118 rge_atomic_renounce(&rgep->rx_free, 1);
119 if (rgep->rx_bcopy && rgep->rx_free == RGE_BUF_SLOTS)
120 rgep->rx_bcopy = B_FALSE;
121 ASSERT(rgep->rx_free <= RGE_BUF_SLOTS);
123 mutex_exit(rgep->rc_lock);
126 static int rge_rx_refill(rge_t *rgep, uint32_t slot);
130 rge_rx_refill(rge_t *rgep, uint32_t slot)
137 srbdp = &rgep->sw_rbds[slot];
138 hw_rbd_p = &rgep->rx_ring[slot];
139 free_slot = rgep->rf_next;
140 free_buf = rgep->free_srbds[free_slot].rx_buf;
143 rgep->free_srbds[free_slot].rx_buf = NULL;
144 hw_rbd_p->host_buf_addr = RGE_BSWAP_32(rgep->head_room +
148 rgep->rf_next = NEXT(free_slot, RGE_BUF_SLOTS);
154 rge_problem(rgep, "rge_rx_refill: free buffer %d is NULL",
156 rgep->rx_bcopy = B_TRUE;
161 static mblk_t *rge_receive_packet(rge_t *rgep, uint32_t slot);
165 rge_receive_packet(rge_t *rgep, uint32_t slot)
181 hw_rbd_p = &rgep->rx_ring[slot];
182 srbdp = &rgep->sw_rbds[slot];
198 rgep->stats.crc_err++;
200 rgep->stats.in_short++;
207 mutex_enter(rgep->genlock);
208 rgep->rge_chip_state = RGE_CHIP_ERROR;
209 mutex_exit(rgep->genlock);
222 maxsize = rgep->ethmax_size;
229 if (rgep->rx_bcopy || packet_len <= RGE_RECV_COPY_SIZE ||
230 !rge_atomic_reserve(&rgep->rx_free, 1)) {
237 rgep->stats.no_rcvbuf++;
246 bcopy(rx_ptr + rgep->head_room, dp, packet_len);
250 mp->b_rptr += rgep->head_room;
257 if (!rge_rx_refill(rgep, slot))
260 rgep->stats.rbytes += packet_len;
261 rgep->stats.rpackets ++;
280 rgep->stats.rbytes += VLAN_TAGSZ;
310 static mblk_t *rge_receive_ring(rge_t *rgep);
314 rge_receive_ring(rge_t *rgep)
322 ASSERT(mutex_owned(rgep->rx_lock));
328 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORKERNEL);
329 slot = rgep->rx_next;
330 hw_rbd_p = &rgep->rx_ring[slot];
335 if ((mp = rge_receive_packet(rgep, slot)) != NULL) {
344 RGE_BSWAP_32(rgep->rxbuf_size - rgep->head_room);
347 hw_rbd_p = &rgep->rx_ring[slot];
350 rgep->rx_next = slot;
357 void rge_receive(rge_t *rgep);
361 rge_receive(rge_t *rgep)
365 mutex_enter(rgep->rx_lock);
366 mp = rge_receive_ring(rgep);
367 mutex_exit(rgep->rx_lock);
370 mac_rx(rgep->mh, NULL, mp);
381 static uint32_t rge_send_claim(rge_t *rgep);
385 rge_send_claim(rge_t *rgep)
390 mutex_enter(rgep->tx_lock);
391 slot = rgep->tx_next;
393 rgep->tx_next = next;
394 rgep->tx_flow++;
395 mutex_exit(rgep->tx_lock);
406 ASSERT(next != rgep->tc_next);
416 void rge_send_recycle(rge_t *rgep);
420 rge_send_recycle(rge_t *rgep)
427 mutex_enter(rgep->tc_lock);
428 tc_head = rgep->tc_next;
429 tc_tail = rgep->tc_tail;
435 hw_sbd_p = &rgep->tx_ring[tc_tail];
444 if (rgep->watchdog == 0)
445 rgep->watchdog = 1;
446 mutex_exit(rgep->tc_lock);
456 rgep->tc_next = NEXT(tc_tail, RGE_SEND_SLOTS);
457 n = rgep->tc_next - tc_head;
458 if (rgep->tc_next < tc_head)
460 rge_atomic_renounce(&rgep->tx_free, n);
461 rgep->watchdog = 0;
462 ASSERT(rgep->tx_free <= RGE_SEND_SLOTS);
465 mutex_exit(rgep->tc_lock);
466 if (rgep->resched_needed &&
467 rgep->rge_mac_state == RGE_MAC_STARTED) {
468 rgep->resched_needed = B_FALSE;
469 mac_tx_update(rgep->mh);
476 static void rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci);
480 rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci)
505 slot = rge_send_claim(rgep);
506 ssbdp = &rgep->sw_sbds[slot];
530 if ((totlen += mblen) <= rgep->ethmax_size) {
536 rgep->stats.obytes += VLAN_TAGSZ;
540 if ((totlen += mblen) <= rgep->ethmax_size) {
545 rgep->stats.obytes += totlen;
546 rgep->stats.tx_pre_ismax = rgep->stats.tx_cur_ismax;
547 if (totlen == rgep->ethmax_size)
548 rgep->stats.tx_cur_ismax = B_TRUE;
550 rgep->stats.tx_cur_ismax = B_FALSE;
557 ASSERT(totlen <= rgep->ethmax_size);
563 hw_sbd_p = &rgep->tx_ring[slot];
611 rge_send(rge_t *rgep, mblk_t *mp)
621 if (!rge_atomic_reserve(&rgep->tx_free, 1)) {
623 rgep->stats.defer++;
624 rgep->resched_needed = B_TRUE;
643 ASSERT(rgep->tx_free < RGE_SEND_SLOTS);
644 rge_send_copy(rgep, mp, tci);
649 mutex_enter(rgep->tx_lock);
650 if (--rgep->tx_flow == 0) {
651 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
652 rgep->tc_tail = rgep->tx_next;
654 rgep->stats.opackets++;
655 mutex_exit(rgep->tx_lock);
663 rge_t *rgep;
665 rgep = (rge_t *)arg1;
668 rge_send_recycle(rgep);
670 if (rgep->chipid.is_pcie && rgep->tx_free != RGE_SEND_SLOTS) {
680 rge_tx_trigger(rgep);
692 rge_t *rgep = arg; /* private device info */
698 rw_enter(rgep->errlock, RW_READER);
699 if ((rgep->rge_mac_state != RGE_MAC_STARTED) ||
700 (rgep->rge_chip_state != RGE_CHIP_RUNNING) ||
701 (rgep->param_link_up != LINK_STATE_UP)) {
702 rw_exit(rgep->errlock);
712 if (!rge_send(rgep, mp)) {
720 rge_tx_trigger(rgep);
722 rw_exit(rgep->errlock);