Lines Matching defs:xnbp

159 	xnb_t *xnbp;
165 xnbp = ksp->ks_private;
172 (knp++)->value.ui64 = xnbp->xnb_stat_rx_cksum_deferred;
173 (knp++)->value.ui64 = xnbp->xnb_stat_tx_cksum_no_need;
174 (knp++)->value.ui64 = xnbp->xnb_stat_rx_rsp_notok;
175 (knp++)->value.ui64 = xnbp->xnb_stat_tx_notify_deferred;
176 (knp++)->value.ui64 = xnbp->xnb_stat_tx_notify_sent;
177 (knp++)->value.ui64 = xnbp->xnb_stat_rx_notify_deferred;
178 (knp++)->value.ui64 = xnbp->xnb_stat_rx_notify_sent;
179 (knp++)->value.ui64 = xnbp->xnb_stat_tx_too_early;
180 (knp++)->value.ui64 = xnbp->xnb_stat_rx_too_early;
181 (knp++)->value.ui64 = xnbp->xnb_stat_rx_allocb_failed;
182 (knp++)->value.ui64 = xnbp->xnb_stat_tx_allocb_failed;
183 (knp++)->value.ui64 = xnbp->xnb_stat_rx_foreign_page;
184 (knp++)->value.ui64 = xnbp->xnb_stat_mac_full;
185 (knp++)->value.ui64 = xnbp->xnb_stat_spurious_intr;
186 (knp++)->value.ui64 = xnbp->xnb_stat_allocation_success;
187 (knp++)->value.ui64 = xnbp->xnb_stat_allocation_failure;
188 (knp++)->value.ui64 = xnbp->xnb_stat_small_allocation_success;
189 (knp++)->value.ui64 = xnbp->xnb_stat_small_allocation_failure;
190 (knp++)->value.ui64 = xnbp->xnb_stat_other_allocation_failure;
191 (knp++)->value.ui64 = xnbp->xnb_stat_rx_pagebndry_crossed;
192 (knp++)->value.ui64 = xnbp->xnb_stat_rx_cpoparea_grown;
193 (knp++)->value.ui64 = xnbp->xnb_stat_csum_hardware;
194 (knp++)->value.ui64 = xnbp->xnb_stat_csum_software;
195 (knp++)->value.ui64 = xnbp->xnb_stat_tx_overflow_page;
196 (knp++)->value.ui64 = xnbp->xnb_stat_tx_unexpected_flags;
202 xnb_ks_init(xnb_t *xnbp)
212 xnbp->xnb_kstat_aux = kstat_create(ddi_driver_name(xnbp->xnb_devinfo),
213 ddi_get_instance(xnbp->xnb_devinfo), "aux_statistics", "net",
215 if (xnbp->xnb_kstat_aux == NULL)
218 xnbp->xnb_kstat_aux->ks_private = xnbp;
219 xnbp->xnb_kstat_aux->ks_update = xnb_ks_aux_update;
221 knp = xnbp->xnb_kstat_aux->ks_data;
230 kstat_install(xnbp->xnb_kstat_aux);
236 xnb_ks_free(xnb_t *xnbp)
238 kstat_delete(xnbp->xnb_kstat_aux);
245 xnb_software_csum(xnb_t *xnbp, mblk_t *mp)
247 _NOTE(ARGUNUSED(xnbp));
259 xnb_process_cksum_flags(xnb_t *xnbp, mblk_t *mp, uint32_t capab)
346 xnbp->xnb_stat_csum_hardware++;
378 xnbp->xnb_stat_csum_hardware++;
397 xnbp->xnb_stat_csum_software++;
399 return (xnb_software_csum(xnbp, mp));
405 xnb_t *xnbp;
409 xnbp = kmem_zalloc(sizeof (*xnbp), KM_SLEEP);
411 xnbp->xnb_flavour = flavour;
412 xnbp->xnb_flavour_data = flavour_data;
413 xnbp->xnb_devinfo = dip;
414 xnbp->xnb_evtchn = INVALID_EVTCHN;
415 xnbp->xnb_irq = B_FALSE;
416 xnbp->xnb_tx_ring_handle = INVALID_GRANT_HANDLE;
417 xnbp->xnb_rx_ring_handle = INVALID_GRANT_HANDLE;
418 xnbp->xnb_connected = B_FALSE;
419 xnbp->xnb_hotplugged = B_FALSE;
420 xnbp->xnb_detachable = B_FALSE;
421 xnbp->xnb_peer = xvdi_get_oeid(dip);
422 xnbp->xnb_be_status = XNB_STATE_INIT;
423 xnbp->xnb_fe_status = XNB_STATE_INIT;
425 xnbp->xnb_tx_buf_count = 0;
427 xnbp->xnb_rx_hv_copy = B_FALSE;
428 xnbp->xnb_multicast_control = B_FALSE;
430 xnbp->xnb_rx_va = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
431 ASSERT(xnbp->xnb_rx_va != NULL);
433 if (ddi_get_iblock_cookie(dip, 0, &xnbp->xnb_icookie)
438 xnbp->xnb_rx_cpop = NULL;
439 xnbp->xnb_rx_cpop_count = 0;
441 mutex_init(&xnbp->xnb_tx_lock, NULL, MUTEX_DRIVER,
442 xnbp->xnb_icookie);
443 mutex_init(&xnbp->xnb_rx_lock, NULL, MUTEX_DRIVER,
444 xnbp->xnb_icookie);
445 mutex_init(&xnbp->xnb_state_lock, NULL, MUTEX_DRIVER,
446 xnbp->xnb_icookie);
449 ddi_set_driver_private(dip, xnbp);
452 xnbp->xnb_tx_buf_cache = kmem_cache_create(cachename,
455 NULL, xnbp, NULL, 0);
456 if (xnbp->xnb_tx_buf_cache == NULL)
459 if (!xnb_ks_init(xnbp))
506 xnb_ks_free(xnbp);
509 kmem_cache_destroy(xnbp->xnb_tx_buf_cache);
512 mutex_destroy(&xnbp->xnb_state_lock);
513 mutex_destroy(&xnbp->xnb_rx_lock);
514 mutex_destroy(&xnbp->xnb_tx_lock);
517 vmem_free(heap_arena, xnbp->xnb_rx_va, PAGESIZE);
518 kmem_free(xnbp, sizeof (*xnbp));
525 xnb_t *xnbp = ddi_get_driver_private(dip);
527 ASSERT(xnbp != NULL);
528 ASSERT(!xnbp->xnb_connected);
529 ASSERT(xnbp->xnb_tx_buf_count == 0);
535 xnb_ks_free(xnbp);
537 kmem_cache_destroy(xnbp->xnb_tx_buf_cache);
541 mutex_destroy(&xnbp->xnb_state_lock);
542 mutex_destroy(&xnbp->xnb_rx_lock);
543 mutex_destroy(&xnbp->xnb_tx_lock);
545 if (xnbp->xnb_rx_cpop_count > 0)
546 kmem_free(xnbp->xnb_rx_cpop, sizeof (xnbp->xnb_rx_cpop[0])
547 * xnbp->xnb_rx_cpop_count);
549 ASSERT(xnbp->xnb_rx_va != NULL);
550 vmem_free(heap_arena, xnbp->xnb_rx_va, PAGESIZE);
552 kmem_free(xnbp, sizeof (*xnbp));
562 xnb_alloc_page(xnb_t *xnbp)
573 xnbp->xnb_stat_allocation_failure++;
580 if ((xnbp->xnb_stat_small_allocation_failure++
587 xnbp->xnb_stat_small_allocation_success++;
593 xnbp->xnb_stat_allocation_success++;
613 xnb_free_page(xnb_t *xnbp, mfn_t mfn)
615 _NOTE(ARGUNUSED(xnbp));
631 * Similar to RING_HAS_UNCONSUMED_REQUESTS(&xnbp->rx_ring) but using
644 xnb_to_peer(xnb_t *xnbp, mblk_t *mp)
668 mutex_enter(&xnbp->xnb_rx_lock);
675 if (!(xnbp->xnb_connected && xnbp->xnb_hotplugged)) {
676 mutex_exit(&xnbp->xnb_rx_lock);
678 xnbp->xnb_stat_rx_too_early++;
682 loop = xnbp->xnb_rx_ring.req_cons;
683 prod = xnbp->xnb_rx_ring.rsp_prod_pvt;
684 gop = xnbp->xnb_rx_top;
687 XNB_RING_HAS_UNCONSUMED_REQUESTS(&xnbp->xnb_rx_ring)) {
698 if ((mfn = xnb_alloc_page(xnbp)) == 0) {
699 xnbp->xnb_stat_rx_defer++;
704 rxreq = RING_GET_REQUEST(&xnbp->xnb_rx_ring, loop);
715 hat_devload(kas.a_hat, xnbp->xnb_rx_va, PAGESIZE,
720 valoop = xnbp->xnb_rx_va;
732 hat_unload(kas.a_hat, xnbp->xnb_rx_va, PAGESIZE,
738 gop->domid = xnbp->xnb_peer;
742 rxresp = RING_GET_RESPONSE(&xnbp->xnb_rx_ring, prod);
746 cksum_flags = xnbp->xnb_flavour->xf_cksum_to_peer(xnbp, mp);
748 xnbp->xnb_stat_rx_cksum_deferred++;
751 rxresp->id = RING_GET_REQUEST(&xnbp->xnb_rx_ring, prod)->id;
764 if (loop == xnbp->xnb_rx_ring.req_cons) {
765 mutex_exit(&xnbp->xnb_rx_lock);
777 if (HYPERVISOR_grant_table_op(GNTTABOP_transfer, xnbp->xnb_rx_top,
778 loop - xnbp->xnb_rx_ring.req_cons) != 0) {
782 loop = xnbp->xnb_rx_ring.req_cons;
783 prod = xnbp->xnb_rx_ring.rsp_prod_pvt;
784 gop = xnbp->xnb_rx_top;
809 xnb_free_page(xnbp, gop->mfn);
819 RING_GET_RESPONSE(&xnbp->xnb_rx_ring, prod)->status =
822 xnbp->xnb_stat_ipackets++;
823 xnbp->xnb_stat_rbytes += len;
831 xnbp->xnb_rx_ring.req_cons = loop;
832 xnbp->xnb_rx_ring.rsp_prod_pvt = prod;
836 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xnbp->xnb_rx_ring, notify);
838 ec_notify_via_evtchn(xnbp->xnb_evtchn);
839 xnbp->xnb_stat_rx_notify_sent++;
841 xnbp->xnb_stat_rx_notify_deferred++;
845 xnbp->xnb_stat_rx_defer++;
847 mutex_exit(&xnbp->xnb_rx_lock);
861 grow_cpop_area(xnb_t *xnbp)
866 ASSERT(MUTEX_HELD(&xnbp->xnb_rx_lock));
868 count = xnbp->xnb_rx_cpop_count + CPOP_DEFCNT;
871 xnbp->xnb_stat_other_allocation_failure++;
875 bcopy(xnbp->xnb_rx_cpop, new,
876 sizeof (xnbp->xnb_rx_cpop[0]) * xnbp->xnb_rx_cpop_count);
878 kmem_free(xnbp->xnb_rx_cpop,
879 sizeof (xnbp->xnb_rx_cpop[0]) * xnbp->xnb_rx_cpop_count);
881 xnbp->xnb_rx_cpop = new;
882 xnbp->xnb_rx_cpop_count = count;
884 xnbp->xnb_stat_rx_cpoparea_grown++;
939 setup_gop(xnb_t *xnbp, gnttab_copy_t *gp, uchar_t *rptr,
942 ASSERT(xnbp != NULL && gp != NULL);
954 gp->dest.domid = xnbp->xnb_peer;
961 xnb_copy_to_peer(xnb_t *xnbp, mblk_t *mp)
973 if (!xnbp->xnb_rx_hv_copy)
974 return (xnb_to_peer(xnbp, mp));
995 mutex_enter(&xnbp->xnb_rx_lock);
997 if (!(xnbp->xnb_connected && xnbp->xnb_hotplugged)) {
998 mutex_exit(&xnbp->xnb_rx_lock);
1000 xnbp->xnb_stat_rx_too_early++;
1004 loop = xnbp->xnb_rx_ring.req_cons;
1005 prod = xnbp->xnb_rx_ring.rsp_prod_pvt;
1008 XNB_RING_HAS_UNCONSUMED_REQUESTS(&xnbp->xnb_rx_ring)) {
1018 rxreq = RING_GET_REQUEST(&xnbp->xnb_rx_ring, loop);
1032 gop_cp = xnbp->xnb_rx_cpop;
1065 xnbp->xnb_stat_rx_foreign_page++;
1083 if (item_count == xnbp->xnb_rx_cpop_count) {
1084 if (!grow_cpop_area(xnbp))
1086 gop_cp = &xnbp->xnb_rx_cpop[item_count];
1099 xnbp->xnb_stat_rx_pagebndry_crossed++;
1104 setup_gop(xnbp, gop_cp, r_tmp, r_offset,
1126 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, xnbp->xnb_rx_cpop,
1133 rxresp = RING_GET_RESPONSE(&xnbp->xnb_rx_ring, prod);
1142 cksum_flags = xnbp->xnb_flavour->xf_cksum_to_peer(xnbp, mp);
1144 xnbp->xnb_stat_rx_cksum_deferred++;
1147 rxresp->id = RING_GET_REQUEST(&xnbp->xnb_rx_ring, prod)->id;
1155 if (xnbp->xnb_rx_cpop[i].status != 0) {
1157 (int)xnbp->xnb_rx_cpop[i].status,
1165 RING_GET_RESPONSE(&xnbp->xnb_rx_ring, prod)->status =
1167 xnbp->xnb_stat_rx_rsp_notok++;
1169 xnbp->xnb_stat_ipackets++;
1170 xnbp->xnb_stat_rbytes += len;
1182 if (loop == xnbp->xnb_rx_ring.req_cons) {
1183 mutex_exit(&xnbp->xnb_rx_lock);
1193 xnbp->xnb_rx_ring.req_cons = loop;
1194 xnbp->xnb_rx_ring.rsp_prod_pvt = prod;
1198 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xnbp->xnb_rx_ring, notify);
1200 ec_notify_via_evtchn(xnbp->xnb_evtchn);
1201 xnbp->xnb_stat_rx_notify_sent++;
1203 xnbp->xnb_stat_rx_notify_deferred++;
1207 xnbp->xnb_stat_rx_defer++;
1209 mutex_exit(&xnbp->xnb_rx_lock);
1219 xnb_tx_notify_peer(xnb_t *xnbp, boolean_t force)
1223 ASSERT(MUTEX_HELD(&xnbp->xnb_tx_lock));
1226 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xnbp->xnb_tx_ring, notify);
1228 ec_notify_via_evtchn(xnbp->xnb_evtchn);
1229 xnbp->xnb_stat_tx_notify_sent++;
1231 xnbp->xnb_stat_tx_notify_deferred++;
1236 xnb_tx_mark_complete(xnb_t *xnbp, RING_IDX id, int16_t status)
1241 ASSERT(MUTEX_HELD(&xnbp->xnb_tx_lock));
1243 i = xnbp->xnb_tx_ring.rsp_prod_pvt;
1245 txresp = RING_GET_RESPONSE(&xnbp->xnb_tx_ring, i);
1249 xnbp->xnb_tx_ring.rsp_prod_pvt = i + 1;
1260 xnb_t *xnbp = txp->xt_xnbp;
1262 kmem_cache_free(xnbp->xnb_tx_buf_cache, txp);
1264 xnbp->xnb_tx_buf_outstanding--;
1272 xnb_t *xnbp = arg;
1279 txp->xt_xnbp = xnbp;
1282 if (ddi_dma_alloc_handle(xnbp->xnb_devinfo, &buf_dma_attr,
1303 atomic_inc_32(&xnbp->xnb_tx_buf_count);
1304 xnbp->xnb_tx_buf_outstanding++;
1323 xnb_t *xnbp = arg;
1329 atomic_dec_32(&xnbp->xnb_tx_buf_count);
1336 xnb_from_peer(xnb_t *xnbp)
1346 ASSERT(MUTEX_HELD(&xnbp->xnb_tx_lock));
1352 RING_FINAL_CHECK_FOR_REQUESTS(&xnbp->xnb_tx_ring, work_to_do);
1355 xnb_tx_notify_peer(xnbp, need_notify);
1360 start = xnbp->xnb_tx_ring.req_cons;
1361 end = xnbp->xnb_tx_ring.sring->req_prod;
1375 xnbp->xnb_peer, (end - start));
1378 BACK_RING_ATTACH(&xnbp->xnb_tx_ring,
1379 (netif_tx_sring_t *)xnbp->xnb_tx_ring_addr, PAGESIZE);
1385 cop = xnbp->xnb_tx_cop;
1386 txpp = xnbp->xnb_tx_bufp;
1396 txreq = RING_GET_REQUEST(&xnbp->xnb_tx_ring, loop);
1408 xnbp->xnb_stat_tx_unexpected_flags++;
1411 xnb_tx_mark_complete(xnbp, txreq->id, NETIF_RSP_ERROR);
1422 RING_GET_REQUEST(&xnbp->xnb_tx_ring, loop);
1426 ASSERT(xnbp->xnb_multicast_control);
1427 status = xnbp->xnb_flavour->xf_mcast_add(xnbp,
1431 ASSERT(xnbp->xnb_multicast_control);
1432 status = xnbp->xnb_flavour->xf_mcast_del(xnbp,
1442 xnb_tx_mark_complete(xnbp, txreq->id,
1456 xnbp->xnb_stat_tx_overflow_page++;
1459 xnb_tx_mark_complete(xnbp, txreq->id, NETIF_RSP_ERROR);
1465 txp = kmem_cache_alloc(xnbp->xnb_tx_buf_cache,
1473 kmem_cache_free(xnbp->xnb_tx_buf_cache, txp);
1481 cop->source.domid = xnbp->xnb_peer;
1504 xnbp->xnb_tx_ring.req_cons = loop;
1510 xnbp->xnb_tx_cop, n_data_req) != 0) {
1514 txpp = xnbp->xnb_tx_bufp;
1517 kmem_cache_free(xnbp->xnb_tx_buf_cache, *txpp);
1525 txpp = xnbp->xnb_tx_bufp;
1526 cop = xnbp->xnb_tx_cop;
1532 txreq = RING_GET_REQUEST(&xnbp->xnb_tx_ring, txp->xt_idx);
1540 xnb_tx_mark_complete(xnbp, txp->xt_id, NETIF_RSP_ERROR);
1557 mp = xnbp->xnb_flavour->xf_cksum_from_peer(xnbp,
1559 xnbp->xnb_stat_tx_cksum_no_need++;
1573 xnbp->xnb_stat_opackets++;
1574 xnbp->xnb_stat_obytes += txreq->size;
1576 xnb_tx_mark_complete(xnbp, txp->xt_id, NETIF_RSP_OKAY);
1591 xnb_t *xnbp = (xnb_t *)arg;
1594 xnbp->xnb_stat_intr++;
1596 mutex_enter(&xnbp->xnb_tx_lock);
1598 ASSERT(xnbp->xnb_connected);
1600 mp = xnb_from_peer(xnbp);
1602 mutex_exit(&xnbp->xnb_tx_lock);
1604 if (!xnbp->xnb_hotplugged) {
1605 xnbp->xnb_stat_tx_too_early++;
1609 xnbp->xnb_stat_spurious_intr++;
1613 xnbp->xnb_flavour->xf_from_peer(xnbp, mp);
1626 xnb_read_xs_config(xnb_t *xnbp)
1631 xsname = xvdi_get_xsname(xnbp->xnb_devinfo);
1641 if (ether_aton(mac, xnbp->xnb_mac_addr) != ETHERADDRL) {
1655 xnb_read_oe_config(xnb_t *xnbp)
1660 oename = xvdi_get_oename(xnbp->xnb_devinfo);
1663 "event-channel", "%u", &xnbp->xnb_fe_evtchn,
1664 "tx-ring-ref", "%lu", &xnbp->xnb_tx_ring_ref,
1665 "rx-ring-ref", "%lu", &xnbp->xnb_rx_ring_ref,
1681 xnbp->xnb_rx_hv_copy = B_TRUE;
1690 xnbp->xnb_multicast_control = B_TRUE;
1720 xnb_start_connect(xnb_t *xnbp)
1722 dev_info_t *dip = xnbp->xnb_devinfo;
1730 if (!xnbp->xnb_flavour->xf_start_connect(xnbp)) {
1740 xnbp->xnb_flavour->xf_peer_disconnected(xnbp);
1750 xnb_t *xnbp = ddi_get_driver_private(dip);
1756 ASSERT(!xnbp->xnb_connected);
1769 xnbp->xnb_tx_ring_addr = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
1771 ASSERT(xnbp->xnb_tx_ring_addr != NULL);
1774 map_op.host_addr = (uint64_t)((long)xnbp->xnb_tx_ring_addr);
1776 map_op.ref = xnbp->xnb_tx_ring_ref;
1777 map_op.dom = xnbp->xnb_peer;
1778 hat_prepare_mapping(kas.a_hat, xnbp->xnb_tx_ring_addr, NULL);
1784 xnbp->xnb_tx_ring_handle = map_op.handle;
1787 BACK_RING_INIT(&xnbp->xnb_tx_ring,
1788 (netif_tx_sring_t *)xnbp->xnb_tx_ring_addr, PAGESIZE);
1791 xnbp->xnb_rx_ring_addr = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
1793 ASSERT(xnbp->xnb_rx_ring_addr != NULL);
1796 map_op.host_addr = (uint64_t)((long)xnbp->xnb_rx_ring_addr);
1798 map_op.ref = xnbp->xnb_rx_ring_ref;
1799 map_op.dom = xnbp->xnb_peer;
1800 hat_prepare_mapping(kas.a_hat, xnbp->xnb_rx_ring_addr, NULL);
1806 xnbp->xnb_rx_ring_handle = map_op.handle;
1809 BACK_RING_INIT(&xnbp->xnb_rx_ring,
1810 (netif_rx_sring_t *)xnbp->xnb_rx_ring_addr, PAGESIZE);
1813 if (xvdi_bind_evtchn(dip, xnbp->xnb_fe_evtchn) != DDI_SUCCESS) {
1815 "cannot bind event channel %d", xnbp->xnb_evtchn);
1816 xnbp->xnb_evtchn = INVALID_EVTCHN;
1819 xnbp->xnb_evtchn = xvdi_get_evtchn(dip);
1827 mutex_enter(&xnbp->xnb_tx_lock);
1828 mutex_enter(&xnbp->xnb_rx_lock);
1830 xnbp->xnb_connected = B_TRUE;
1832 mutex_exit(&xnbp->xnb_rx_lock);
1833 mutex_exit(&xnbp->xnb_tx_lock);
1836 if (ddi_add_intr(dip, 0, NULL, NULL, xnb_intr, (caddr_t)xnbp)
1841 xnbp->xnb_irq = B_TRUE;
1846 mutex_enter(&xnbp->xnb_tx_lock);
1847 mutex_enter(&xnbp->xnb_rx_lock);
1849 xnbp->xnb_connected = B_FALSE;
1851 mutex_exit(&xnbp->xnb_rx_lock);
1852 mutex_exit(&xnbp->xnb_tx_lock);
1860 xnb_t *xnbp = ddi_get_driver_private(dip);
1862 if (xnbp->xnb_irq) {
1864 xnbp->xnb_irq = B_FALSE;
1867 if (xnbp->xnb_evtchn != INVALID_EVTCHN) {
1869 xnbp->xnb_evtchn = INVALID_EVTCHN;
1872 if (xnbp->xnb_rx_ring_handle != INVALID_GRANT_HANDLE) {
1876 xnbp->xnb_rx_ring_addr;
1878 unmap_op.handle = xnbp->xnb_rx_ring_handle;
1885 xnbp->xnb_rx_ring_handle = INVALID_GRANT_HANDLE;
1888 if (xnbp->xnb_rx_ring_addr != NULL) {
1889 hat_release_mapping(kas.a_hat, xnbp->xnb_rx_ring_addr);
1890 vmem_free(heap_arena, xnbp->xnb_rx_ring_addr, PAGESIZE);
1891 xnbp->xnb_rx_ring_addr = NULL;
1894 if (xnbp->xnb_tx_ring_handle != INVALID_GRANT_HANDLE) {
1898 xnbp->xnb_tx_ring_addr;
1900 unmap_op.handle = xnbp->xnb_tx_ring_handle;
1907 xnbp->xnb_tx_ring_handle = INVALID_GRANT_HANDLE;
1910 if (xnbp->xnb_tx_ring_addr != NULL) {
1911 hat_release_mapping(kas.a_hat, xnbp->xnb_tx_ring_addr);
1912 vmem_free(heap_arena, xnbp->xnb_tx_ring_addr, PAGESIZE);
1913 xnbp->xnb_tx_ring_addr = NULL;
1922 xnb_t *xnbp = ddi_get_driver_private(dip);
1925 ASSERT(xnbp != NULL);
1930 if (xnbp->xnb_connected)
1933 if (!xnb_read_oe_config(xnbp) ||
1934 !xnbp->xnb_flavour->xf_peer_connected(xnbp)) {
1945 mutex_enter(&xnbp->xnb_state_lock);
1946 xnbp->xnb_fe_status = XNB_STATE_READY;
1947 if (xnbp->xnb_be_status == XNB_STATE_READY)
1948 xnb_start_connect(xnbp);
1949 mutex_exit(&xnbp->xnb_state_lock);
1955 xnbp->xnb_detachable = B_TRUE;
1965 xnbp->xnb_flavour->xf_peer_disconnected(xnbp);
1967 mutex_enter(&xnbp->xnb_tx_lock);
1968 mutex_enter(&xnbp->xnb_rx_lock);
1971 xnbp->xnb_connected = B_FALSE;
1973 mutex_exit(&xnbp->xnb_rx_lock);
1974 mutex_exit(&xnbp->xnb_tx_lock);
1985 xnbp->xnb_detachable = B_TRUE;
1999 xnb_t *xnbp = ddi_get_driver_private(dip);
2002 ASSERT(xnbp != NULL);
2007 if (xnbp->xnb_hotplugged)
2010 if (!xnb_read_xs_config(xnbp))
2013 if (!xnbp->xnb_flavour->xf_hotplug_connected(xnbp))
2016 mutex_enter(&xnbp->xnb_tx_lock);
2017 mutex_enter(&xnbp->xnb_rx_lock);
2019 xnbp->xnb_hotplugged = B_TRUE;
2021 mutex_exit(&xnbp->xnb_rx_lock);
2022 mutex_exit(&xnbp->xnb_tx_lock);
2024 mutex_enter(&xnbp->xnb_state_lock);
2025 xnbp->xnb_be_status = XNB_STATE_READY;
2026 if (xnbp->xnb_fe_status == XNB_STATE_READY)
2027 xnb_start_connect(xnbp);
2028 mutex_exit(&xnbp->xnb_state_lock);