Lines Matching refs:dp

108 #define	GET_TXBUF(dp, sn)	\
109 &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
114 #define MAXPKTBUF(dp) \
115 ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
131 static void gem_nd_setup(struct gem_dev *dp);
132 static void gem_nd_cleanup(struct gem_dev *dp);
140 static void gem_mii_link_watcher(struct gem_dev *dp);
141 static int gem_mac_init(struct gem_dev *dp);
142 static int gem_mac_start(struct gem_dev *dp);
143 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
144 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
190 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
194 (void) sprintf(propname, prop_template, dp->name);
196 return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
218 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
352 cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
363 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
367 int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
375 n = dp->gc.gc_rx_ring_size - head;
377 (void) ddi_dma_sync(dp->desc_dma_handle,
384 (void) ddi_dma_sync(dp->desc_dma_handle,
391 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
395 int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
403 n = dp->gc.gc_tx_ring_size - head;
405 (void) ddi_dma_sync(dp->desc_dma_handle,
406 (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
412 (void) ddi_dma_sync(dp->desc_dma_handle,
414 + (dp->tx_ring_dma - dp->rx_ring_dma)),
420 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
422 gem_rx_desc_dma_sync(dp,
423 SLOT(head, dp->gc.gc_rx_ring_size), nslot,
433 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
441 dp->name, title,
442 dp->tx_active_head,
443 SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
444 dp->tx_active_tail,
445 SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
446 dp->tx_active_tail - dp->tx_active_head,
447 dp->tx_softq_head,
448 SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
449 dp->tx_softq_tail,
450 SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
451 dp->tx_softq_tail - dp->tx_softq_head,
452 dp->tx_free_head,
453 SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
454 dp->tx_free_tail,
455 SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
456 dp->tx_free_tail - dp->tx_free_head,
457 dp->tx_desc_head,
458 SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
459 dp->tx_desc_tail,
460 SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
461 dp->tx_desc_tail - dp->tx_desc_head,
462 dp->tx_desc_intr,
463 SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
464 dp->tx_desc_intr - dp->tx_desc_head);
470 struct gem_dev *dp;
472 dp = rbp->rxb_devp;
473 ASSERT(mutex_owned(&dp->intrlock));
474 rbp->rxb_next = dp->rx_buf_freelist;
475 dp->rx_buf_freelist = rbp;
476 dp->rx_buf_freecnt++;
484 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
491 ASSERT(mutex_owned(&dp->intrlock));
494 dp->rx_buf_freecnt));
498 rbp = dp->rx_buf_freelist;
501 ASSERT(dp->rx_buf_freecnt > 0);
503 dp->rx_buf_freelist = rbp->rxb_next;
504 dp->rx_buf_freecnt--;
522 rbp->rxb_devp = dp;
525 if ((err = ddi_dma_alloc_handle(dp->dip,
526 &dp->gc.gc_dma_attr_rxbuf,
532 dp->name, __func__, err);
540 ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
541 &dp->gc.gc_buf_attr,
546 (dp->gc.gc_rx_header_len > 0)
555 dp->name, __func__, err);
564 NULL, rbp->rxb_buf, dp->rx_buf_len,
565 ((dp->gc.gc_rx_header_len > 0)
576 dp->name, __func__, err));
595 dp->rx_buf_allocated++;
606 gem_alloc_memory(struct gem_dev *dp)
622 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
624 dp->desc_dma_handle = NULL;
625 req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
631 if ((err = ddi_dma_alloc_handle(dp->dip,
632 &dp->gc.gc_dma_attr_desc,
634 &dp->desc_dma_handle)) != DDI_SUCCESS) {
637 dp->name, __func__, err);
641 if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
642 req_size, &dp->gc.gc_desc_attr,
645 &dp->desc_acc_handle)) != DDI_SUCCESS) {
649 dp->name, __func__, err, (int)req_size);
650 ddi_dma_free_handle(&dp->desc_dma_handle);
654 if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
662 dp->name, __func__, err);
663 ddi_dma_mem_free(&dp->desc_acc_handle);
664 ddi_dma_free_handle(&dp->desc_dma_handle);
670 dp->rx_ring = ring;
671 dp->rx_ring_dma = ring_cookie.dmac_laddress;
674 dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
675 dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
678 dp->io_area = dp->tx_ring + dp->tx_desc_size;
679 dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
685 ASSERT(dp->gc.gc_tx_buf_size > 0);
688 dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
694 tx_buf_len = MAXPKTBUF(dp);
699 for (i = 0, tbp = dp->tx_buf;
700 i < dp->gc.gc_tx_buf_size; i++, tbp++) {
703 if ((err = ddi_dma_alloc_handle(dp->dip,
711 dp->name, __func__, err, i);
717 &dp->gc.gc_buf_attr,
724 dp->name, __func__, err, tx_buf_len);
737 dp->name, __func__, err);
750 if (dp->gc.gc_tx_buf_size > 0) {
752 (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
753 ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
754 ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
758 if (dp->desc_dma_handle) {
759 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
760 ddi_dma_mem_free(&dp->desc_acc_handle);
761 ddi_dma_free_handle(&dp->desc_dma_handle);
762 dp->desc_dma_handle = NULL;
769 gem_free_memory(struct gem_dev *dp)
775 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
778 if (dp->desc_dma_handle) {
779 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
780 ddi_dma_mem_free(&dp->desc_acc_handle);
781 ddi_dma_free_handle(&dp->desc_dma_handle);
782 dp->desc_dma_handle = NULL;
786 for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
794 while ((rbp = dp->rx_buf_freelist) != NULL) {
796 ASSERT(dp->rx_buf_freecnt > 0);
798 dp->rx_buf_freelist = rbp->rxb_next;
799 dp->rx_buf_freecnt--;
829 gem_init_rx_ring(struct gem_dev *dp)
832 int rx_ring_size = dp->gc.gc_rx_ring_size;
835 dp->name, __func__,
836 rx_ring_size, dp->gc.gc_rx_buf_max));
840 (*dp->gc.gc_rx_desc_init)(dp, i);
842 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
844 dp->rx_active_head = (seqnum_t)0;
845 dp->rx_active_tail = (seqnum_t)0;
847 ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
848 ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
855 gem_prepare_rx_buf(struct gem_dev *dp)
861 ASSERT(mutex_owned(&dp->intrlock));
865 nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
867 if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
870 gem_append_rxbuf(dp, rbp);
873 gem_rx_desc_dma_sync(dp,
874 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
881 gem_clean_rx_buf(struct gem_dev *dp)
885 int rx_ring_size = dp->gc.gc_rx_ring_size;
889 ASSERT(mutex_owned(&dp->intrlock));
892 dp->name, __func__, dp->rx_buf_freecnt));
897 (*dp->gc.gc_rx_desc_clean)(dp, i);
899 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
907 while ((rbp = dp->rx_buf_head) != NULL) {
912 dp->rx_buf_head = rbp->rxb_next;
917 dp->rx_buf_tail = (struct rxbuf *)NULL;
921 dp->name, __func__, total, dp->rx_buf_freecnt));
928 gem_init_tx_ring(struct gem_dev *dp)
931 int tx_buf_size = dp->gc.gc_tx_buf_size;
932 int tx_ring_size = dp->gc.gc_tx_ring_size;
935 dp->name, __func__,
936 dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
938 ASSERT(!dp->mac_active);
941 dp->tx_slots_base =
942 SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
943 dp->tx_softq_tail -= dp->tx_softq_head;
944 dp->tx_softq_head = (seqnum_t)0;
946 dp->tx_active_head = dp->tx_softq_head;
947 dp->tx_active_tail = dp->tx_softq_head;
949 dp->tx_free_head = dp->tx_softq_tail;
950 dp->tx_free_tail = dp->gc.gc_tx_buf_limit;
952 dp->tx_desc_head = (seqnum_t)0;
953 dp->tx_desc_tail = (seqnum_t)0;
954 dp->tx_desc_intr = (seqnum_t)0;
957 (*dp->gc.gc_tx_desc_init)(dp, i);
959 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
979 gem_clean_tx_buf(struct gem_dev *dp)
986 int tx_ring_size = dp->gc.gc_tx_ring_size;
991 ASSERT(!dp->mac_active);
992 ASSERT(dp->tx_busy == 0);
993 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
999 (*dp->gc.gc_tx_desc_clean)(dp, i);
1001 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1004 head = dp->tx_active_head;
1005 tail = dp->tx_softq_tail;
1007 ASSERT(dp->tx_free_head - head >= 0);
1008 tbp = GET_TXBUF(dp, head);
1012 dp->stats.errxmt++;
1019 while (sn != head + dp->gc.gc_tx_buf_size) {
1023 dp->name, __func__,
1024 sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1033 gem_dump_txbuf(dp, CE_WARN,
1038 dp->tx_free_tail += tail - head;
1039 ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1042 dp->tx_active_head = dp->tx_free_head;
1043 dp->tx_active_tail = dp->tx_free_head;
1044 dp->tx_softq_head = dp->tx_free_head;
1045 dp->tx_softq_tail = dp->tx_free_head;
1052 gem_reclaim_txbuf(struct gem_dev *dp)
1061 int tx_ring_size = dp->gc.gc_tx_ring_size;
1062 uint_t (*tx_desc_stat)(struct gem_dev *dp,
1063 int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1072 mutex_enter(&dp->xmitlock);
1074 head = dp->tx_active_head;
1075 tail = dp->tx_active_tail;
1081 dp->name, __func__,
1082 head, SLOT(head, dp->gc.gc_tx_buf_size),
1083 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1087 if (dp->tx_reclaim_busy == 0) {
1089 ASSERT(dp->tx_free_tail - dp->tx_active_head
1090 == dp->gc.gc_tx_buf_limit);
1094 dp->tx_reclaim_busy++;
1097 gem_tx_desc_dma_sync(dp,
1098 SLOT(dp->tx_desc_head, tx_ring_size),
1099 dp->tx_desc_tail - dp->tx_desc_head,
1102 tbp = GET_TXBUF(dp, head);
1103 desc_head = dp->tx_desc_head;
1105 dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1115 txstat = (*tx_desc_stat)(dp,
1123 if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1124 dp->tx_blocked = now;
1132 dp->name, sn, SLOT(sn, tx_ring_size));
1137 dp->name, (now - tbp->txb_stime)*10);
1144 if (dp->tx_desc_head != desc_head) {
1146 dp->tx_desc_head = desc_head;
1149 if (desc_head - dp->tx_desc_intr > 0) {
1150 dp->tx_desc_intr = desc_head;
1153 mutex_exit(&dp->xmitlock);
1156 tbp = GET_TXBUF(dp, head);
1162 head, SLOT(head, dp->gc.gc_tx_buf_size),
1163 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1171 mutex_enter(&dp->xmitlock);
1172 if (--dp->tx_reclaim_busy == 0) {
1176 sn = dp->tx_free_tail;
1177 tbp = GET_TXBUF(dp, new_tail);
1178 while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1187 ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1189 dp->tx_free_tail =
1190 dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1192 if (!dp->mac_active) {
1194 cv_broadcast(&dp->tx_drain_cv);
1199 dp->name, __func__,
1200 dp->tx_free_head, dp->tx_free_tail,
1201 dp->tx_free_tail - dp->tx_free_head, tail - head);
1203 mutex_exit(&dp->xmitlock);
1214 gem_tx_load_descs_oo(struct gem_dev *dp,
1219 int tx_ring_size = dp->gc.gc_tx_ring_size;
1221 (struct gem_dev *dp, int slot,
1223 int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1227 tbp = GET_TXBUF(dp, sn);
1230 if (dp->tx_cnt < 100) {
1231 dp->tx_cnt++;
1237 tbp->txb_ndescs = (*tx_desc_write)(dp,
1252 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1285 (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1303 dp->name, __func__,
1313 if (dp->gc.gc_tx_max_frags >= 3 &&
1333 gem_tx_start_unit(struct gem_dev *dp)
1341 ASSERT(mutex_owned(&dp->xmitlock));
1342 ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1344 head = dp->tx_softq_head;
1345 tail = dp->tx_softq_tail;
1349 dp->name, __func__, head, tail, tail - head,
1350 dp->tx_desc_head, dp->tx_desc_tail,
1351 dp->tx_desc_tail - dp->tx_desc_head));
1355 dp->tx_desc_tail = tail;
1357 tbp_head = GET_TXBUF(dp, head);
1358 tbp_tail = GET_TXBUF(dp, tail - 1);
1360 ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1362 dp->gc.gc_tx_start(dp,
1363 SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1367 dp->tx_softq_head = dp->tx_active_tail = tail;
1380 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1409 if (dp->misc_flag & GEM_VLAN_HARD) {
1426 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1454 mutex_enter(&dp->xmitlock);
1455 if (dp->mac_suspended) {
1456 mutex_exit(&dp->xmitlock);
1466 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1469 mutex_exit(&dp->xmitlock);
1474 head = dp->tx_free_head;
1475 avail = dp->tx_free_tail - head;
1479 dp->name, __func__,
1480 dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1482 avail = min(avail, dp->tx_max_packets);
1488 dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1494 dp->tx_free_head = head + nmblk;
1495 load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1499 tbp = GET_TXBUF(dp, head + avail - 1);
1501 dp->tx_desc_intr = head + avail;
1503 mutex_exit(&dp->xmitlock);
1505 tbp = GET_TXBUF(dp, head);
1531 txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1534 len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1537 (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1540 mutex_enter(&dp->xmitlock);
1542 if ((--dp->tx_busy) == 0) {
1544 dp->tx_softq_tail = dp->tx_free_head;
1546 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1553 cv_broadcast(&dp->tx_drain_cv);
1555 ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1556 gem_tx_start_unit(dp);
1559 dp->stats.obytes += len_total;
1560 dp->stats.opackets += nmblk;
1561 dp->stats.obcast += bcast;
1562 dp->stats.omcast += mcast;
1564 mutex_exit(&dp->xmitlock);
1575 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1577 ASSERT(mutex_owned(&dp->intrlock));
1579 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1582 gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1586 if (dp->mac_suspended) {
1595 if (dp->mac_active) {
1598 dp->rxmode &= ~RXMODE_ENABLE;
1599 (void) (*dp->gc.gc_set_rx_filter)(dp);
1601 (void) gem_mac_stop(dp, flags);
1605 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1607 dp->name, __func__);
1611 if (gem_mac_init(dp) != GEM_SUCCESS) {
1616 if (dp->mii_state == MII_STATE_LINKUP) {
1617 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1623 dp->rxmode |= RXMODE_ENABLE;
1624 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1635 if (dp->mii_state == MII_STATE_LINKUP) {
1637 ASSERT(!dp->mac_active);
1638 (void) gem_mac_start(dp);
1647 gem_tx_timeout(struct gem_dev *dp)
1653 mutex_enter(&dp->intrlock);
1658 mutex_enter(&dp->xmitlock);
1659 if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1660 mutex_exit(&dp->xmitlock);
1663 mutex_exit(&dp->xmitlock);
1666 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1668 (void) gem_restart_nic(dp, 0);
1670 dp->tx_blocked = (clock_t)0;
1675 mutex_enter(&dp->xmitlock);
1677 if (dp->tx_active_head == dp->tx_active_tail) {
1679 if (dp->tx_blocked &&
1680 now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1681 gem_dump_txbuf(dp, CE_WARN,
1684 dp->tx_blocked = (clock_t)0;
1686 mutex_exit(&dp->xmitlock);
1690 tbp = GET_TXBUF(dp, dp->tx_active_head);
1691 if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1692 mutex_exit(&dp->xmitlock);
1695 mutex_exit(&dp->xmitlock);
1697 gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1700 (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1702 dp->tx_blocked = (clock_t)0;
1705 mutex_exit(&dp->intrlock);
1709 mac_tx_update(dp->mh);
1714 dp->name, BOOLEAN(dp->tx_blocked),
1715 dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1716 dp->timeout_id =
1718 (void *)dp, dp->gc.gc_tx_timeout_interval);
1728 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1732 int rx_ring_size = dp->gc.gc_rx_ring_size;
1735 ASSERT(mutex_owned(&dp->intrlock));
1738 dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1743 if (dp->rx_buf_head == NULL) {
1744 dp->rx_buf_head = rbp_head;
1745 ASSERT(dp->rx_buf_tail == NULL);
1747 dp->rx_buf_tail->rxb_next = rbp_head;
1750 tail = dp->rx_active_tail;
1753 dp->rx_buf_tail = rbp;
1755 dp->gc.gc_rx_desc_write(dp,
1760 dp->rx_active_tail = tail = tail + 1;
1766 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1768 int rx_header_len = dp->gc.gc_rx_header_len;
1799 gem_receive(struct gem_dev *dp)
1810 int rx_ring_size = dp->gc.gc_rx_ring_size;
1812 uint64_t (*rx_desc_stat)(struct gem_dev *dp,
1815 int ethermax = dp->mtu + sizeof (struct ether_header);
1816 int rx_header_len = dp->gc.gc_rx_header_len;
1818 ASSERT(mutex_owned(&dp->intrlock));
1821 dp->name, dp->rx_buf_head));
1823 rx_desc_stat = dp->gc.gc_rx_desc_stat;
1826 for (active_head = dp->rx_active_head;
1827 (rbp = dp->rx_buf_head) != NULL; active_head++) {
1830 cnt = max(dp->poll_pkt_delay*2, 10);
1832 dp->rx_active_tail - active_head);
1833 gem_rx_desc_dma_sync(dp,
1844 if (((rxstat = (*rx_desc_stat)(dp,
1853 dp->rx_buf_head = rbp->rxb_next;
1863 dp->name, __func__, rxstat, len));
1868 if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1870 dp->stats.norcvbuf++;
1878 ethermax = dp->mtu + sizeof (struct ether_header);
1885 dp->stats.errrcv++;
1886 dp->stats.runt++;
1892 dp->stats.errrcv++;
1893 dp->stats.frame_too_long++;
1902 gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1913 dp->stats.rbcast++;
1915 dp->stats.rmcast++;
1927 if ((cnt = active_head - dp->rx_active_head) > 0) {
1928 dp->stats.rbytes += len_total;
1929 dp->stats.rpackets += cnt;
1931 dp->rx_active_head = active_head;
1937 if (dp->rx_buf_head == NULL) {
1938 dp->rx_buf_tail = NULL;
1942 dp->name, __func__, cnt, rx_head));
1951 head = dp->rx_active_tail;
1952 gem_append_rxbuf(dp, newbufs);
1955 dp->gc.gc_rx_start(dp,
1956 SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1963 mutex_exit(&dp->intrlock);
1964 mac_rx(dp->mh, NULL, rx_head);
1965 mutex_enter(&dp->intrlock);
1975 gem_tx_done(struct gem_dev *dp)
1979 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1980 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1982 dp->name, dp->tx_active_head, dp->tx_active_tail));
1987 mutex_enter(&dp->xmitlock);
1990 ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1995 ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1996 if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
2003 dp->tx_blocked = (clock_t)0;
2004 dp->tx_max_packets =
2005 min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2008 mutex_exit(&dp->xmitlock);
2011 dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2017 gem_intr(struct gem_dev *dp)
2021 mutex_enter(&dp->intrlock);
2022 if (dp->mac_suspended) {
2023 mutex_exit(&dp->intrlock);
2026 dp->intr_busy = B_TRUE;
2028 ret = (*dp->gc.gc_interrupt)(dp);
2031 dp->intr_busy = B_FALSE;
2032 mutex_exit(&dp->intrlock);
2036 if (!dp->mac_active) {
2037 cv_broadcast(&dp->tx_drain_cv);
2041 dp->stats.intr++;
2042 dp->intr_busy = B_FALSE;
2044 mutex_exit(&dp->intrlock);
2047 DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2048 mac_tx_update(dp->mh);
2055 gem_intr_watcher(struct gem_dev *dp)
2057 (void) gem_intr(dp);
2060 dp->intr_watcher_id =
2061 timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2070 gem_choose_forcedmode(struct gem_dev *dp)
2073 if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2074 dp->speed = GEM_SPD_1000;
2075 dp->full_duplex = dp->anadv_1000fdx;
2076 } else if (dp->anadv_100fdx || dp->anadv_100t4) {
2077 dp->speed = GEM_SPD_100;
2078 dp->full_duplex = B_TRUE;
2079 } else if (dp->anadv_100hdx) {
2080 dp->speed = GEM_SPD_100;
2081 dp->full_duplex = B_FALSE;
2083 dp->speed = GEM_SPD_10;
2084 dp->full_duplex = dp->anadv_10fdx;
2089 gem_mii_read(struct gem_dev *dp, uint_t reg)
2091 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2092 (*dp->gc.gc_mii_sync)(dp);
2094 return ((*dp->gc.gc_mii_read)(dp, reg));
2098 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2100 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2101 (*dp->gc.gc_mii_sync)(dp);
2103 (*dp->gc.gc_mii_write)(dp, reg, val);
2111 gem_mii_config_default(struct gem_dev *dp)
2122 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2127 mii_stat = dp->mii_status;
2130 dp->name, __func__, mii_stat, MII_STATUS_BITS));
2135 dp->name, mii_stat, MII_STATUS_BITS);
2140 val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2144 dp->name, __func__,
2145 dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2146 dp->anadv_10fdx, dp->anadv_10hdx));
2148 if (dp->anadv_100t4) {
2151 if (dp->anadv_100fdx) {
2154 if (dp->anadv_100hdx) {
2157 if (dp->anadv_10fdx) {
2160 if (dp->anadv_10hdx) {
2165 val |= fc_cap_encode[dp->anadv_flow_control];
2169 dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2170 dp->anadv_flow_control));
2172 gem_mii_write(dp, MII_AN_ADVERT, val);
2178 if (!dp->anadv_autoneg) {
2183 if (dp->anadv_1000fdx) {
2186 if (dp->anadv_1000hdx) {
2192 dp->name, __func__, val, MII_1000TC_BITS));
2194 gem_mii_write(dp, MII_1000TC, val);
2200 #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
2201 #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
2235 gem_mii_link_check(struct gem_dev *dp)
2252 old_mii_state = dp->mii_state;
2255 dp->name, __func__, now, dp->mii_state));
2257 diff = now - dp->mii_last_check;
2258 dp->mii_last_check = now;
2264 if (dp->linkup_delay > 0) {
2265 if (dp->linkup_delay > diff) {
2266 dp->linkup_delay -= diff;
2269 dp->linkup_delay = -1;
2274 switch (dp->mii_state) {
2277 (*dp->gc.gc_mii_sync)(dp);
2281 dp->mii_timer -= diff;
2282 if (dp->mii_timer > 0) {
2284 dp->mii_interval = WATCH_INTERVAL_FAST;
2290 if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2292 (*dp->gc.gc_mii_sync)(dp);
2294 val = gem_mii_read(dp, MII_CONTROL);
2299 dp->name, ddi_get_lbolt(),
2305 gem_mii_write(dp, MII_CONTROL, 0);
2308 if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2314 gem_choose_forcedmode(dp);
2316 dp->mii_lpable = 0;
2317 dp->mii_advert = 0;
2318 dp->mii_exp = 0;
2319 dp->mii_ctl1000 = 0;
2320 dp->mii_stat1000 = 0;
2321 dp->flow_control = FLOW_CONTROL_NONE;
2323 if (!dp->anadv_autoneg) {
2325 dp->mii_state = MII_STATE_MEDIA_SETUP;
2326 dp->mii_timer = 0;
2327 dp->mii_interval = 0;
2338 dp->mii_timer -= diff;
2339 if (dp->mii_timer -
2340 (dp->gc.gc_mii_an_timeout
2341 - dp->gc.gc_mii_an_wait) > 0) {
2346 dp->mii_interval = WATCH_INTERVAL_FAST;
2351 status = gem_mii_read(dp, MII_STATUS);
2354 dp->name, __func__, dp->mii_state,
2364 dp->name);
2369 if (dp->mii_timer <= 0) {
2374 if (!dp->mii_supress_msg) {
2377 dp->name);
2378 dp->mii_supress_msg = B_TRUE;
2385 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2393 dp->mii_supress_msg = B_FALSE;
2394 dp->mii_state = MII_STATE_AN_DONE;
2397 dp->name, status, MII_STATUS_BITS));
2399 if (dp->gc.gc_mii_an_delay > 0) {
2400 dp->mii_timer = dp->gc.gc_mii_an_delay;
2401 dp->mii_interval = drv_usectohz(20*1000);
2405 dp->mii_timer = 0;
2413 dp->mii_timer -= diff;
2414 if (dp->mii_timer > 0) {
2416 dp->mii_interval = WATCH_INTERVAL_FAST;
2428 if (dp->gc.gc_mii_an_delay > 0) {
2433 status = gem_mii_read(dp, MII_STATUS);
2435 advert = gem_mii_read(dp, MII_AN_ADVERT);
2436 lpable = gem_mii_read(dp, MII_AN_LPABLE);
2437 exp = gem_mii_read(dp, MII_AN_EXPANSION);
2444 if (dp->mii_status & MII_STATUS_XSTATUS) {
2445 ctl1000 = gem_mii_read(dp, MII_1000TC);
2446 stat1000 = gem_mii_read(dp, MII_1000TS);
2448 dp->mii_lpable = lpable;
2449 dp->mii_advert = advert;
2450 dp->mii_exp = exp;
2451 dp->mii_ctl1000 = ctl1000;
2452 dp->mii_stat1000 = stat1000;
2456 dp->name,
2461 if (dp->mii_status & MII_STATUS_XSTATUS) {
2475 dp->name);
2499 cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2509 dp->speed = GEM_SPD_1000;
2510 dp->full_duplex = B_TRUE;
2514 dp->speed = GEM_SPD_1000;
2515 dp->full_duplex = B_FALSE;
2518 dp->speed = GEM_SPD_100;
2519 dp->full_duplex = B_TRUE;
2522 dp->speed = GEM_SPD_100;
2523 dp->full_duplex = B_TRUE;
2526 dp->speed = GEM_SPD_100;
2527 dp->full_duplex = B_FALSE;
2530 dp->speed = GEM_SPD_10;
2531 dp->full_duplex = B_TRUE;
2534 dp->speed = GEM_SPD_10;
2535 dp->full_duplex = B_FALSE;
2543 val = gem_mii_read(dp, MII_CONTROL);
2546 dp->speed = (val & MII_CONTROL_100MB) ?
2548 dp->full_duplex = dp->speed != GEM_SPD_10;
2556 dp->name,
2560 gem_speed_value[dp->speed],
2561 dp->full_duplex ? "full" : "half");
2564 if (dp->full_duplex) {
2565 dp->flow_control =
2569 dp->flow_control = FLOW_CONTROL_NONE;
2571 dp->mii_state = MII_STATE_MEDIA_SETUP;
2575 dp->mii_state = MII_STATE_LINKDOWN;
2576 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2577 DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2578 dp->mii_supress_msg = B_FALSE;
2581 dp->mii_interval = WATCH_INTERVAL_FAST;
2583 if ((!dp->anadv_autoneg) ||
2584 dp->gc.gc_mii_an_oneshot || fix_phy) {
2589 val = gem_mii_read(dp, MII_CONTROL);
2593 if (dp->full_duplex) {
2597 switch (dp->speed) {
2608 dp->name, dp->speed);
2615 if (dp->mii_status & MII_STATUS_XSTATUS) {
2616 gem_mii_write(dp,
2619 gem_mii_write(dp, MII_CONTROL, val);
2622 if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2624 (*dp->gc.gc_set_media)(dp);
2627 if ((void *)dp->gc.gc_mii_tune_phy) {
2630 (*dp->gc.gc_mii_tune_phy)(dp);
2636 status = gem_mii_read(dp, MII_STATUS);
2641 dp->mii_state = MII_STATE_LINKUP;
2642 dp->mii_supress_msg = B_FALSE;
2646 dp->name, status, MII_STATUS_BITS));
2654 dp->name,
2655 gem_speed_value[dp->speed],
2656 dp->full_duplex ? "full" : "half",
2657 gem_fc_type[dp->flow_control]);
2659 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2662 if (dp->gc.gc_mii_hw_link_detection &&
2663 dp->nic_state == NIC_STATE_ONLINE) {
2664 dp->mii_interval = 0;
2667 if (dp->nic_state == NIC_STATE_ONLINE) {
2668 if (!dp->mac_active) {
2669 (void) gem_mac_start(dp);
2676 dp->mii_supress_msg = B_TRUE;
2677 if (dp->anadv_autoneg) {
2678 dp->mii_timer -= diff;
2679 if (dp->mii_timer <= 0) {
2685 dp->gc.gc_mii_linkdown_timeout_action;
2693 status = gem_mii_read(dp, MII_STATUS);
2700 dp->name, status, MII_STATUS_BITS);
2702 if (dp->nic_state == NIC_STATE_ONLINE &&
2703 dp->mac_active &&
2704 dp->gc.gc_mii_stop_mac_on_linkdown) {
2705 (void) gem_mac_stop(dp, 0);
2707 if (dp->tx_blocked) {
2713 if (dp->anadv_autoneg) {
2715 linkdown_action = dp->gc.gc_mii_linkdown_action;
2719 dp->mii_state = MII_STATE_LINKDOWN;
2720 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2722 if ((void *)dp->gc.gc_mii_tune_phy) {
2724 (*dp->gc.gc_mii_tune_phy)(dp);
2726 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2731 if (dp->gc.gc_mii_hw_link_detection &&
2732 dp->nic_state == NIC_STATE_ONLINE) {
2733 dp->mii_interval = 0;
2738 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2746 if (!dp->mii_supress_msg) {
2747 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2749 dp->mii_supress_msg = B_TRUE;
2753 dp->mii_supress_msg = B_TRUE;
2754 if (dp->gc.gc_mii_an_oneshot) {
2758 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2759 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2760 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2764 if (!dp->mii_supress_msg) {
2766 dp->name);
2768 dp->mii_supress_msg = B_TRUE;
2773 dp->name, dp->gc.gc_mii_linkdown_action);
2774 dp->mii_supress_msg = B_TRUE;
2779 if (!dp->mii_supress_msg) {
2780 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2782 dp->mii_state = MII_STATE_RESETTING;
2783 dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2784 if (!dp->gc.gc_mii_dont_reset) {
2785 gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2787 dp->mii_interval = WATCH_INTERVAL_FAST;
2791 if (!dp->mii_supress_msg) {
2792 cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2794 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2795 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2798 val = gem_mii_read(dp, MII_CONTROL) &
2801 gem_mii_write(dp, MII_CONTROL,
2804 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2807 if (dp->link_watcher_id == 0 && dp->mii_interval) {
2809 dp->link_watcher_id =
2811 (void *)dp, dp->mii_interval);
2814 if (old_mii_state != dp->mii_state) {
2816 if (dp->mii_state == MII_STATE_LINKUP) {
2817 dp->linkup_delay = 0;
2818 GEM_LINKUP(dp);
2819 } else if (dp->linkup_delay <= 0) {
2820 GEM_LINKDOWN(dp);
2822 } else if (dp->linkup_delay < 0) {
2824 dp->linkup_delay = 0;
2825 GEM_LINKDOWN(dp);
2832 gem_mii_link_watcher(struct gem_dev *dp)
2836 mutex_enter(&dp->intrlock);
2838 dp->link_watcher_id = 0;
2839 tx_sched = gem_mii_link_check(dp);
2841 if (dp->link_watcher_id == 0) {
2842 cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2845 mutex_exit(&dp->intrlock);
2849 mac_tx_update(dp->mh);
2854 gem_mii_probe_default(struct gem_dev *dp)
2861 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2867 dp->mii_status = 0;
2870 if (dp->mii_phy_addr) {
2871 status = gem_mii_read(dp, MII_STATUS);
2873 gem_mii_write(dp, MII_CONTROL, 0);
2877 if (dp->mii_phy_addr < 0) {
2880 dp->name);
2886 dp->name, dp->mii_phy_addr);
2890 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2891 dp->mii_phy_addr = phy;
2892 status = gem_mii_read(dp, MII_STATUS);
2895 gem_mii_write(dp, MII_CONTROL, 0);
2900 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2901 dp->mii_phy_addr = phy;
2902 gem_mii_write(dp, MII_CONTROL, 0);
2903 status = gem_mii_read(dp, MII_STATUS);
2910 cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2911 dp->mii_phy_addr = -1;
2916 dp->mii_status = status;
2917 dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2918 gem_mii_read(dp, MII_PHYIDL);
2920 if (dp->mii_phy_addr < 0) {
2922 dp->name, dp->mii_phy_id);
2925 dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2929 dp->name,
2930 gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2932 gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2933 gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2935 dp->mii_xstatus = 0;
2937 dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2940 dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2944 adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2946 gem_mii_write(dp, MII_AN_ADVERT,
2949 adv = gem_mii_read(dp, MII_AN_ADVERT);
2952 dp->gc.gc_flow_control &= ~1;
2956 dp->gc.gc_flow_control &= ~2;
2959 gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2965 gem_mii_start(struct gem_dev *dp)
2967 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2970 dp->mii_state = MII_STATE_UNKNOWN;
2971 dp->mii_last_check = ddi_get_lbolt();
2972 dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2973 (void) gem_mii_link_watcher(dp);
2977 gem_mii_stop(struct gem_dev *dp)
2979 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2982 mutex_enter(&dp->intrlock);
2983 if (dp->link_watcher_id) {
2984 while (untimeout(dp->link_watcher_id) == -1)
2986 dp->link_watcher_id = 0;
2988 mutex_exit(&dp->intrlock);
2992 gem_get_mac_addr_conf(struct gem_dev *dp)
3005 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3010 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3057 dp->dev_addr.ether_addr_octet[i] = mac[i];
3065 dp->name, valstr);
3079 gem_mac_set_rx_filter(struct gem_dev *dp)
3081 return ((*dp->gc.gc_set_rx_filter)(dp));
3088 gem_mac_init(struct gem_dev *dp)
3090 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3092 if (dp->mac_suspended) {
3096 dp->mac_active = B_FALSE;
3098 gem_init_rx_ring(dp);
3099 gem_init_tx_ring(dp);
3102 dp->tx_blocked = (clock_t)0;
3103 dp->tx_busy = 0;
3104 dp->tx_reclaim_busy = 0;
3105 dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3107 if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3111 gem_prepare_rx_buf(dp);
3119 gem_mac_start(struct gem_dev *dp)
3121 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3123 ASSERT(mutex_owned(&dp->intrlock));
3124 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3125 ASSERT(dp->mii_state == MII_STATE_LINKUP);
3128 mutex_enter(&dp->xmitlock);
3129 if (dp->mac_suspended) {
3130 mutex_exit(&dp->xmitlock);
3133 dp->mac_active = B_TRUE;
3134 mutex_exit(&dp->xmitlock);
3137 (*dp->gc.gc_rx_start)(dp,
3138 SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3139 dp->rx_active_tail - dp->rx_active_head);
3141 if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3143 dp->name, __func__);
3147 mutex_enter(&dp->xmitlock);
3150 ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3151 if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3152 gem_tx_load_descs_oo(dp,
3153 dp->tx_softq_head, dp->tx_softq_tail,
3156 gem_tx_start_unit(dp);
3159 mutex_exit(&dp->xmitlock);
3165 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3175 dp->name, __func__, dp->rx_buf_freecnt));
3177 ASSERT(mutex_owned(&dp->intrlock));
3178 ASSERT(!mutex_owned(&dp->xmitlock));
3183 mutex_enter(&dp->xmitlock);
3184 if (dp->mac_suspended) {
3185 mutex_exit(&dp->xmitlock);
3188 dp->mac_active = B_FALSE;
3190 while (dp->tx_busy > 0) {
3191 cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3193 mutex_exit(&dp->xmitlock);
3200 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3201 (dp->tx_active_tail - dp->tx_active_head);
3204 dp->name, __func__, wait_time));
3209 while (dp->tx_active_tail != dp->tx_active_head) {
3213 dp->name, __func__);
3216 (void) gem_reclaim_txbuf(dp);
3222 dp->name, __func__, i,
3229 if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3231 dp->name, __func__);
3232 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3234 dp->name, __func__);
3242 (void) gem_receive(dp);
3244 gem_clean_rx_buf(dp);
3249 (*dp->gc.gc_get_stats)(dp);
3254 ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3255 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3258 dp->tx_active_tail = dp->tx_active_head;
3259 dp->tx_softq_head = dp->tx_active_head;
3261 gem_clean_tx_buf(dp);
3268 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3273 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3275 mutex_enter(&dp->intrlock);
3276 if (dp->mac_suspended) {
3277 mutex_exit(&dp->intrlock);
3281 if (dp->mc_count_req++ < GEM_MAXMC) {
3283 cnt = dp->mc_count;
3284 bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3286 if (dp->gc.gc_multicast_hash) {
3287 dp->mc_list[cnt].hash =
3288 (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3290 dp->mc_count = cnt + 1;
3293 if (dp->mc_count_req != dp->mc_count) {
3295 dp->rxmode |= RXMODE_MULTI_OVF;
3297 dp->rxmode &= ~RXMODE_MULTI_OVF;
3301 err = gem_mac_set_rx_filter(dp);
3303 mutex_exit(&dp->intrlock);
3309 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3316 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3318 mutex_enter(&dp->intrlock);
3319 if (dp->mac_suspended) {
3320 mutex_exit(&dp->intrlock);
3324 dp->mc_count_req--;
3325 cnt = dp->mc_count;
3327 if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3331 len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3333 bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3335 dp->mc_count--;
3339 if (dp->mc_count_req != dp->mc_count) {
3341 dp->rxmode |= RXMODE_MULTI_OVF;
3343 dp->rxmode &= ~RXMODE_MULTI_OVF;
3346 err = gem_mac_set_rx_filter(dp);
3348 mutex_exit(&dp->intrlock);
3419 struct gem_dev *dp;
3426 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3431 dp->name, __func__, item));
3435 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3440 val = BOOLEAN(dp->gc.gc_flow_control & 1);
3444 val = BOOLEAN(dp->gc.gc_flow_control & 2);
3448 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3449 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3453 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3454 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3458 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3462 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3466 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3470 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3474 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3478 val = dp->anadv_autoneg;
3482 val = BOOLEAN(dp->anadv_flow_control & 1);
3486 val = BOOLEAN(dp->anadv_flow_control & 2);
3490 val = dp->anadv_1000fdx;
3494 val = dp->anadv_1000hdx;
3498 val = dp->anadv_100t4;
3502 val = dp->anadv_100fdx;
3506 val = dp->anadv_100hdx;
3510 val = dp->anadv_10fdx;
3514 val = dp->anadv_10hdx;
3518 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3522 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3526 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
3530 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3534 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3538 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3542 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3546 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3550 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3554 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3558 val = (dp->mii_state == MII_STATE_LINKUP);
3562 val = gem_speed_value[dp->speed];
3567 if (dp->mii_state == MII_STATE_LINKUP) {
3568 val = dp->full_duplex ? 2 : 1;
3573 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3577 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3578 (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3582 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3583 (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3593 dp->name, item);
3605 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3610 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3623 if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3626 dp->anadv_autoneg = (int)val;
3634 dp->anadv_flow_control |= 1;
3636 dp->anadv_flow_control &= ~1;
3645 dp->anadv_flow_control |= 2;
3647 dp->anadv_flow_control &= ~2;
3655 if (val && (dp->mii_xstatus &
3660 dp->anadv_1000fdx = (int)val;
3667 if (val && (dp->mii_xstatus &
3671 dp->anadv_1000hdx = (int)val;
3678 if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3681 dp->anadv_100t4 = (int)val;
3688 if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3691 dp->anadv_100fdx = (int)val;
3698 if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3701 dp->anadv_100hdx = (int)val;
3708 if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3711 dp->anadv_10fdx = (int)val;
3718 if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3721 dp->anadv_10hdx = (int)val;
3726 gem_choose_forcedmode(dp);
3728 dp->mii_state = MII_STATE_UNKNOWN;
3729 if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3731 (void) gem_mii_link_check(dp);
3740 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3747 arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3748 arg->dp = dp;
3752 dp->name, __func__, name, item));
3753 (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3757 gem_nd_setup(struct gem_dev *dp)
3760 dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3762 ASSERT(dp->nd_arg_p == NULL);
3764 dp->nd_arg_p =
3769 gem_nd_load(dp, "autoneg_cap",
3771 gem_nd_load(dp, "pause_cap",
3773 gem_nd_load(dp, "asym_pause_cap",
3775 gem_nd_load(dp, "1000fdx_cap",
3777 gem_nd_load(dp, "1000hdx_cap",
3779 gem_nd_load(dp, "100T4_cap",
3781 gem_nd_load(dp, "100fdx_cap",
3783 gem_nd_load(dp, "100hdx_cap",
3785 gem_nd_load(dp, "10fdx_cap",
3787 gem_nd_load(dp, "10hdx_cap",
3791 gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3792 SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3794 gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3795 SETFUNC(dp->gc.gc_flow_control & 1),
3797 gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3798 SETFUNC(dp->gc.gc_flow_control & 2),
3800 gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3801 SETFUNC(dp->mii_xstatus &
3804 gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3805 SETFUNC(dp->mii_xstatus &
3808 gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3809 SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3810 !dp->mii_advert_ro),
3812 gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3813 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3814 !dp->mii_advert_ro),
3816 gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3817 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3818 !dp->mii_advert_ro),
3820 gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3821 SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3822 !dp->mii_advert_ro),
3824 gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3825 SETFUNC((dp->mii_status & MII_STATUS_10) &&
3826 !dp->mii_advert_ro),
3830 gem_nd_load(dp, "lp_autoneg_cap",
3832 gem_nd_load(dp, "lp_pause_cap",
3834 gem_nd_load(dp, "lp_asym_pause_cap",
3836 gem_nd_load(dp, "lp_1000fdx_cap",
3838 gem_nd_load(dp, "lp_1000hdx_cap",
3840 gem_nd_load(dp, "lp_100T4_cap",
3842 gem_nd_load(dp, "lp_100fdx_cap",
3844 gem_nd_load(dp, "lp_100hdx_cap",
3846 gem_nd_load(dp, "lp_10fdx_cap",
3848 gem_nd_load(dp, "lp_10hdx_cap",
3852 gem_nd_load(dp, "link_status",
3854 gem_nd_load(dp, "link_speed",
3856 gem_nd_load(dp, "link_duplex",
3858 gem_nd_load(dp, "link_autoneg",
3860 gem_nd_load(dp, "link_rx_pause",
3862 gem_nd_load(dp, "link_tx_pause",
3865 gem_nd_load(dp, "resume_test",
3873 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3877 ASSERT(mutex_owned(&dp->intrlock));
3879 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3883 ok = nd_getset(wq, dp->nd_data_p, mp);
3885 "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3889 ok = nd_getset(wq, dp->nd_data_p, mp);
3892 dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3905 cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3911 gem_nd_cleanup(struct gem_dev *dp)
3913 ASSERT(dp->nd_data_p != NULL);
3914 ASSERT(dp->nd_arg_p != NULL);
3916 nd_free(&dp->nd_data_p);
3918 kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3919 dp->nd_arg_p = NULL;
3923 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3929 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3938 DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3940 mutex_enter(&dp->intrlock);
3941 mutex_enter(&dp->xmitlock);
3951 status = gem_nd_ioctl(dp, wq, mp, iocp);
3955 mutex_exit(&dp->xmitlock);
3956 mutex_exit(&dp->intrlock);
3960 gem_suspend(dp->dip);
3961 gem_resume(dp->dip);
4014 gem_mac_xcvr_inuse(struct gem_dev *dp)
4018 if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4019 if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4021 } else if (dp->mii_status &
4025 } else if (dp->mii_status &
4029 } else if (dp->mii_status &
4033 } else if (dp->mii_xstatus &
4036 } else if (dp->mii_xstatus &
4079 struct gem_dev *dp = arg;
4081 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4083 mutex_enter(&dp->intrlock);
4084 if (dp->mac_suspended) {
4088 if (gem_mac_init(dp) != GEM_SUCCESS) {
4092 dp->nic_state = NIC_STATE_INITIALIZED;
4095 dp->mc_count = 0;
4096 dp->mc_count_req = 0;
4099 if (dp->mii_state == MII_STATE_LINKUP) {
4100 (dp->gc.gc_set_media)(dp);
4104 bcopy(dp->dev_addr.ether_addr_octet,
4105 dp->cur_addr.ether_addr_octet, ETHERADDRL);
4106 dp->rxmode |= RXMODE_ENABLE;
4108 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4113 dp->nic_state = NIC_STATE_ONLINE;
4114 if (dp->mii_state == MII_STATE_LINKUP) {
4115 if (gem_mac_start(dp) != GEM_SUCCESS) {
4121 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4122 (void *)dp, dp->gc.gc_tx_timeout_interval);
4123 mutex_exit(&dp->intrlock);
4127 dp->nic_state = NIC_STATE_STOPPED;
4128 mutex_exit(&dp->intrlock);
4135 struct gem_dev *dp = arg;
4137 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4140 mutex_enter(&dp->intrlock);
4141 if (dp->mac_suspended) {
4142 mutex_exit(&dp->intrlock);
4145 dp->rxmode &= ~RXMODE_ENABLE;
4146 (void) gem_mac_set_rx_filter(dp);
4147 mutex_exit(&dp->intrlock);
4150 if (dp->timeout_id) {
4151 while (untimeout(dp->timeout_id) == -1)
4153 dp->timeout_id = 0;
4157 mutex_enter(&dp->intrlock);
4158 if (dp->mac_suspended) {
4159 mutex_exit(&dp->intrlock);
4162 dp->nic_state = NIC_STATE_STOPPED;
4165 mutex_enter(&dp->xmitlock);
4166 dp->mac_active = B_FALSE;
4167 mutex_exit(&dp->xmitlock);
4170 while (dp->intr_busy) {
4171 cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4173 (void) gem_mac_stop(dp, 0);
4174 mutex_exit(&dp->intrlock);
4182 struct gem_dev *dp = arg;
4184 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4187 ret = gem_add_multicast(dp, ep);
4189 ret = gem_remove_multicast(dp, ep);
4204 struct gem_dev *dp = arg;
4206 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4208 mutex_enter(&dp->intrlock);
4209 if (dp->mac_suspended) {
4210 mutex_exit(&dp->intrlock);
4214 dp->rxmode |= RXMODE_PROMISC;
4216 dp->rxmode &= ~RXMODE_PROMISC;
4219 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4222 mutex_exit(&dp->intrlock);
4230 struct gem_dev *dp = arg;
4231 struct gem_stats *gstp = &dp->stats;
4234 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4236 if (mutex_owned(&dp->intrlock)) {
4237 if (dp->mac_suspended) {
4241 mutex_enter(&dp->intrlock);
4242 if (dp->mac_suspended) {
4243 mutex_exit(&dp->intrlock);
4246 mutex_exit(&dp->intrlock);
4249 if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4255 val = gem_speed_value[dp->speed] *1000000ull;
4367 val = dp->mii_phy_addr;
4371 val = dp->mii_phy_id;
4375 val = gem_mac_xcvr_inuse(dp);
4379 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4380 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4384 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4385 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4389 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4393 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4397 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4401 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4405 val = BOOLEAN(dp->gc.gc_flow_control & 2);
4409 val = BOOLEAN(dp->gc.gc_flow_control & 1);
4413 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4417 val = dp->anadv_1000fdx;
4421 val = dp->anadv_1000hdx;
4425 val = dp->anadv_100fdx;
4429 val = dp->anadv_100hdx;
4433 val = dp->anadv_10fdx;
4437 val = dp->anadv_10hdx;
4441 val = BOOLEAN(dp->anadv_flow_control & 2);
4445 val = BOOLEAN(dp->anadv_flow_control & 1);
4449 val = dp->anadv_autoneg;
4453 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4457 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4461 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4465 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4469 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4473 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4477 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
4481 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4485 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4489 val = BOOLEAN(dp->flow_control & 2);
4493 val = BOOLEAN(dp->flow_control & 1);
4497 val = dp->anadv_autoneg &&
4498 BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4502 val = (dp->mii_state == MII_STATE_LINKUP) ?
4503 (dp->full_duplex ? 2 : 1) : 0;
4510 val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4518 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4522 val = dp->anadv_100t4;
4526 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4547 struct gem_dev *dp = arg;
4549 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4551 mutex_enter(&dp->intrlock);
4552 if (dp->mac_suspended) {
4553 mutex_exit(&dp->intrlock);
4556 bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4557 dp->rxmode |= RXMODE_ENABLE;
4559 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4562 mutex_exit(&dp->intrlock);
4574 struct gem_dev *dp = arg;
4577 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4579 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4580 if (dp->mii_state != MII_STATE_LINKUP) {
4591 return (gem_send_common(dp, mp, flags));
4611 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4614 macp->m_driver = dp;
4615 macp->m_dip = dp->dip;
4616 macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4619 macp->m_max_sdu = dp->mtu;
4621 if (dp->misc_flag & GEM_VLAN) {
4632 gem_read_conf(struct gem_dev *dp)
4636 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4641 dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4642 dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4643 dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4644 dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4645 dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4646 dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4647 dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4648 dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4650 if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4652 dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4653 dp->anadv_autoneg = B_FALSE;
4654 if (dp->full_duplex) {
4655 dp->anadv_1000hdx = B_FALSE;
4656 dp->anadv_100hdx = B_FALSE;
4657 dp->anadv_10hdx = B_FALSE;
4659 dp->anadv_1000fdx = B_FALSE;
4660 dp->anadv_100fdx = B_FALSE;
4661 dp->anadv_10fdx = B_FALSE;
4665 if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4666 dp->anadv_autoneg = B_FALSE;
4669 dp->speed = GEM_SPD_1000;
4670 dp->anadv_100t4 = B_FALSE;
4671 dp->anadv_100fdx = B_FALSE;
4672 dp->anadv_100hdx = B_FALSE;
4673 dp->anadv_10fdx = B_FALSE;
4674 dp->anadv_10hdx = B_FALSE;
4677 dp->speed = GEM_SPD_100;
4678 dp->anadv_1000fdx = B_FALSE;
4679 dp->anadv_1000hdx = B_FALSE;
4680 dp->anadv_10fdx = B_FALSE;
4681 dp->anadv_10hdx = B_FALSE;
4684 dp->speed = GEM_SPD_10;
4685 dp->anadv_1000fdx = B_FALSE;
4686 dp->anadv_1000hdx = B_FALSE;
4687 dp->anadv_100t4 = B_FALSE;
4688 dp->anadv_100fdx = B_FALSE;
4689 dp->anadv_100hdx = B_FALSE;
4694 dp->name, "speed", val);
4695 dp->anadv_autoneg = B_TRUE;
4700 val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4704 dp->name, "flow-control", val);
4706 val = min(val, dp->gc.gc_flow_control);
4708 dp->anadv_flow_control = val;
4710 if (gem_prop_get_int(dp, "nointr", 0)) {
4711 dp->misc_flag |= GEM_NOINTR;
4712 cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4715 dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4716 dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4717 dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4718 dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4719 dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4738 struct gem_dev *dp;
4760 dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4767 /* ddi_set_driver_private(dip, dp); */
4770 dp->private = lp;
4771 dp->priv_size = lmsize;
4772 dp->mc_list = (struct mcast_addr *)&dp[1];
4774 dp->dip = dip;
4775 (void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4783 dp->name);
4786 dp->iblock_cookie = c;
4791 mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4792 mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4793 cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4798 dp->base_addr = base;
4799 dp->regs_handle = *regs_handlep;
4800 dp->gc = *gc;
4801 gc = &dp->gc;
4837 dp->rx_desc_size =
4842 dp->tx_desc_size =
4847 dp->mtu = ETHERMTU;
4848 dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4850 for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4851 dp->tx_buf[i].txb_next =
4852 &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4855 dp->rxmode = 0;
4856 dp->speed = GEM_SPD_10; /* default is 10Mbps */
4857 dp->full_duplex = B_FALSE; /* default is half */
4858 dp->flow_control = FLOW_CONTROL_NONE;
4859 dp->poll_pkt_delay = 8; /* typical coalease for rx packets */
4862 dp->txthr = ETHERMAX; /* tx fifo threshold */
4863 dp->txmaxdma = 16*4; /* tx max dma burst size */
4864 dp->rxthr = 128; /* rx fifo threshold */
4865 dp->rxmaxdma = 16*4; /* rx max dma burst size */
4870 gem_read_conf(dp);
4873 dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4878 mutex_enter(&dp->intrlock);
4879 dp->nic_state = NIC_STATE_STOPPED;
4880 ret = (*dp->gc.gc_reset_chip)(dp);
4881 mutex_exit(&dp->intrlock);
4889 mutex_enter(&dp->intrlock);
4890 ret = (*dp->gc.gc_attach_chip)(dp);
4891 mutex_exit(&dp->intrlock);
4897 dp->gc.gc_tx_copy_thresh = dp->mtu;
4900 if (gem_alloc_memory(dp)) {
4906 dp->name, (long)dp->base_addr,
4907 dp->dev_addr.ether_addr_octet[0],
4908 dp->dev_addr.ether_addr_octet[1],
4909 dp->dev_addr.ether_addr_octet[2],
4910 dp->dev_addr.ether_addr_octet[3],
4911 dp->dev_addr.ether_addr_octet[4],
4912 dp->dev_addr.ether_addr_octet[5]));
4915 dp->cur_addr = dp->dev_addr;
4917 gem_gld3_init(dp, macp);
4920 dp->mii_lpable = 0;
4921 dp->mii_advert = 0;
4922 dp->mii_exp = 0;
4923 dp->mii_ctl1000 = 0;
4924 dp->mii_stat1000 = 0;
4925 if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4930 dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4931 dp->anadv_1000fdx &=
4932 BOOLEAN(dp->mii_xstatus &
4934 dp->anadv_1000hdx &=
4935 BOOLEAN(dp->mii_xstatus &
4937 dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4938 dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4939 dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4940 dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4941 dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4943 gem_choose_forcedmode(dp);
4946 if (dp->gc.gc_mii_init) {
4947 if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4955 gem_nd_setup(dp);
4960 if (ret = mac_register(macp, &dp->mh)) {
4962 dp->name, ret);
4968 if (dp->misc_flag & GEM_SOFTINTR) {
4970 DDI_SOFTINT_LOW, &dp->soft_id,
4973 (caddr_t)dp) != DDI_SUCCESS) {
4975 dp->name);
4978 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4981 (caddr_t)dp) != DDI_SUCCESS) {
4982 cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4990 dp->intr_watcher_id =
4992 (void *)dp, drv_usectohz(3*1000000));
4996 dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
4997 dp->port = port;
4998 ddi_set_driver_private(dip, (caddr_t)dp);
5001 gem_mii_start(dp);
5004 return (dp);
5007 (void) mac_unregister(dp->mh);
5010 gem_nd_cleanup(dp);
5013 gem_free_memory(dp);
5015 ddi_regs_map_free(&dp->regs_handle);
5017 mutex_destroy(&dp->xmitlock);
5018 mutex_destroy(&dp->intrlock);
5019 cv_destroy(&dp->tx_drain_cv);
5024 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5032 struct gem_dev *dp;
5038 dp = GEM_GET_DEV(dip);
5039 if (dp == NULL) {
5043 rh = dp->regs_handle;
5044 private = dp->private;
5045 priv_size = dp->priv_size;
5047 while (dp) {
5049 if (mac_unregister(dp->mh) != 0) {
5054 if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5058 dp->name, __func__,
5059 dp->rx_buf_allocated, dp->rx_buf_freecnt);
5064 gem_mii_stop(dp);
5067 if (dp->misc_flag & GEM_SOFTINTR) {
5068 ddi_remove_softintr(dp->soft_id);
5069 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5070 ddi_remove_intr(dip, 0, dp->iblock_cookie);
5073 if (dp->intr_watcher_id) {
5074 while (untimeout(dp->intr_watcher_id) == -1)
5076 dp->intr_watcher_id = 0;
5081 gem_nd_cleanup(dp);
5083 gem_free_memory(dp);
5086 mutex_destroy(&dp->xmitlock);
5087 mutex_destroy(&dp->intrlock);
5088 cv_destroy(&dp->tx_drain_cv);
5091 tmp = dp->next;
5092 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5093 dp = tmp;
5111 struct gem_dev *dp;
5116 dp = GEM_GET_DEV(dip);
5117 ASSERT(dp);
5119 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5121 for (; dp; dp = dp->next) {
5124 gem_mii_stop(dp);
5127 if (dp->misc_flag & GEM_NOINTR) {
5128 if (dp->intr_watcher_id) {
5129 while (untimeout(dp->intr_watcher_id) == -1)
5132 dp->intr_watcher_id = 0;
5136 if (dp->timeout_id) {
5137 while (untimeout(dp->timeout_id) == -1)
5139 dp->timeout_id = 0;
5143 mutex_enter(&dp->intrlock);
5144 (void) gem_mac_stop(dp, 0);
5145 ASSERT(!dp->mac_active);
5148 dp->mac_suspended = B_TRUE;
5149 mutex_exit(&dp->intrlock);
5160 struct gem_dev *dp;
5165 dp = GEM_GET_DEV(dip);
5166 ASSERT(dp);
5168 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5170 for (; dp; dp = dp->next) {
5177 ASSERT(!dp->mac_active);
5180 mutex_enter(&dp->intrlock);
5182 dp->mac_suspended = B_FALSE;
5183 dp->nic_state = NIC_STATE_STOPPED;
5185 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5187 dp->name, __func__);
5188 mutex_exit(&dp->intrlock);
5191 mutex_exit(&dp->intrlock);
5194 if (dp->gc.gc_mii_init) {
5195 (void) (*dp->gc.gc_mii_init)(dp);
5198 if (dp->misc_flag & GEM_NOINTR) {
5203 dp->intr_watcher_id =
5205 (void *)dp, drv_usectohz(3*1000000));
5209 gem_mii_start(dp);
5212 mutex_enter(&dp->intrlock);
5214 if (gem_mac_init(dp) != GEM_SUCCESS) {
5215 mutex_exit(&dp->intrlock);
5218 dp->nic_state = NIC_STATE_INITIALIZED;
5221 if (dp->mii_state == MII_STATE_LINKUP) {
5222 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5223 mutex_exit(&dp->intrlock);
5229 dp->rxmode |= RXMODE_ENABLE;
5230 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5231 mutex_exit(&dp->intrlock);
5234 dp->nic_state = NIC_STATE_ONLINE;
5237 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5238 (void *)dp,
5239 dp->gc.gc_tx_timeout_interval);
5242 if (dp->mii_state == MII_STATE_LINKUP) {
5243 if (gem_mac_start(dp) != GEM_SUCCESS) {
5244 mutex_exit(&dp->intrlock);
5248 mutex_exit(&dp->intrlock);
5254 if (dp->intr_watcher_id) {
5255 while (untimeout(dp->intr_watcher_id) == -1)
5257 dp->intr_watcher_id = 0;
5259 mutex_enter(&dp->intrlock);
5260 (*dp->gc.gc_reset_chip)(dp);
5261 dp->nic_state = NIC_STATE_STOPPED;
5262 mutex_exit(&dp->intrlock);