Lines Matching defs:igb

35 static char igb_version[] = "igb 2.3.8-ish";
426 igb_t *igb;
449 igb = kmem_zalloc(sizeof (igb_t), KM_SLEEP);
451 igb->dip = devinfo;
452 igb->instance = instance;
454 hw = &igb->hw;
455 osdep = &igb->osdep;
457 osdep->igb = igb;
460 ddi_set_driver_private(devinfo, igb);
464 igb->fm_capabilities = igb_get_prop(igb, "fm-capable",
468 igb_fm_init(igb);
469 igb->attach_progress |= ATTACH_PROGRESS_FMINIT;
475 igb_log(igb, IGB_LOG_ERROR, "Failed to map PCI configurations");
478 igb->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
483 if (igb_identify_hardware(igb) != IGB_SUCCESS) {
484 igb_log(igb, IGB_LOG_ERROR, "Failed to identify hardware");
491 if (igb_regs_map(igb) != IGB_SUCCESS) {
492 igb_log(igb, IGB_LOG_ERROR, "Failed to map device registers");
495 igb->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
500 igb_init_properties(igb);
501 igb->attach_progress |= ATTACH_PROGRESS_PROPS;
506 if (igb_alloc_intrs(igb) != IGB_SUCCESS) {
507 igb_log(igb, IGB_LOG_ERROR, "Failed to allocate interrupts");
510 igb->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
518 if (igb_alloc_rings(igb) != IGB_SUCCESS) {
519 igb_log(igb, IGB_LOG_ERROR,
523 igb->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
528 if (igb_add_intr_handlers(igb) != IGB_SUCCESS) {
529 igb_log(igb, IGB_LOG_ERROR, "Failed to add interrupt handlers");
532 igb->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
537 if (igb_init_driver_settings(igb) != IGB_SUCCESS) {
538 igb_log(igb, IGB_LOG_ERROR,
543 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK) {
544 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
554 igb_init_locks(igb);
555 igb->attach_progress |= ATTACH_PROGRESS_LOCKS;
560 if (igb_init(igb) != IGB_SUCCESS) {
561 igb_log(igb, IGB_LOG_ERROR, "Failed to initialize adapter");
564 igb->attach_progress |= ATTACH_PROGRESS_INIT_ADAPTER;
569 if (igb_init_stats(igb) != IGB_SUCCESS) {
570 igb_log(igb, IGB_LOG_ERROR, "Failed to initialize statistics");
573 igb->attach_progress |= ATTACH_PROGRESS_STATS;
578 if (igb_register_mac(igb) != IGB_SUCCESS) {
579 igb_log(igb, IGB_LOG_ERROR, "Failed to register MAC");
582 igb->attach_progress |= ATTACH_PROGRESS_MAC;
588 if (igb_enable_intrs(igb) != IGB_SUCCESS) {
589 igb_log(igb, IGB_LOG_ERROR, "Failed to enable DDI interrupts");
592 igb->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
594 igb_log(igb, IGB_LOG_INFO, "%s", igb_version);
595 atomic_or_32(&igb->igb_state, IGB_INITIALIZED);
601 if (igb->hw.mac.type == e1000_i350)
602 (void) e1000_set_eee_i350(&igb->hw, B_FALSE, B_FALSE);
603 else if (igb->hw.mac.type == e1000_i354)
604 (void) e1000_set_eee_i354(&igb->hw, B_FALSE, B_FALSE);
609 igb_unconfigure(devinfo, igb);
631 igb_t *igb;
651 igb = (igb_t *)ddi_get_driver_private(devinfo);
652 if (igb == NULL)
658 if (mac_unregister(igb->mac_hdl) != 0) {
659 igb_log(igb, IGB_LOG_ERROR, "Failed to unregister MAC");
662 igb->attach_progress &= ~ATTACH_PROGRESS_MAC;
670 mutex_enter(&igb->gen_lock);
671 if (igb->igb_state & IGB_STARTED) {
672 atomic_and_32(&igb->igb_state, ~IGB_STARTED);
673 igb_stop(igb, B_TRUE);
674 mutex_exit(&igb->gen_lock);
676 igb_disable_watchdog_timer(igb);
678 mutex_exit(&igb->gen_lock);
684 if (!igb_rx_drain(igb))
690 igb_unconfigure(devinfo, igb);
708 igb_t *igb;
711 igb = (igb_t *)ddi_get_driver_private(devinfo);
713 if (igb == NULL)
716 hw = &igb->hw;
721 igb_disable_adapter_interrupts(igb);
744 igb_unconfigure(dev_info_t *devinfo, igb_t *igb)
749 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
750 (void) igb_disable_intrs(igb);
756 if (igb->attach_progress & ATTACH_PROGRESS_MAC) {
757 (void) mac_unregister(igb->mac_hdl);
763 if (igb->attach_progress & ATTACH_PROGRESS_STATS) {
764 kstat_delete((kstat_t *)igb->igb_ks);
770 if (igb->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
771 igb_rem_intr_handlers(igb);
777 if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
778 igb_rem_intrs(igb);
784 if (igb->attach_progress & ATTACH_PROGRESS_PROPS) {
791 if (igb->attach_progress & ATTACH_PROGRESS_INIT_ADAPTER) {
792 mutex_enter(&igb->gen_lock);
793 igb_stop_adapter(igb);
794 mutex_exit(&igb->gen_lock);
795 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
796 ddi_fm_service_impact(igb->dip, DDI_SERVICE_UNAFFECTED);
802 igb_release_multicast(igb);
807 if (igb->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
808 if (igb->osdep.reg_handle != NULL)
809 ddi_regs_map_free(&igb->osdep.reg_handle);
815 if (igb->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
816 if (igb->osdep.cfg_handle != NULL)
817 pci_config_teardown(&igb->osdep.cfg_handle);
823 if (igb->attach_progress & ATTACH_PROGRESS_LOCKS) {
824 igb_destroy_locks(igb);
830 if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
831 igb_free_rings(igb);
837 if (igb->attach_progress & ATTACH_PROGRESS_FMINIT) {
838 igb_fm_fini(igb);
844 kmem_free(igb, sizeof (igb_t));
854 igb_register_mac(igb_t *igb)
856 struct e1000_hw *hw = &igb->hw;
864 mac->m_driver = igb;
865 mac->m_dip = igb->dip;
869 mac->m_max_sdu = igb->max_frame_size -
875 status = mac_register(mac, &igb->mac_hdl);
886 igb_identify_hardware(igb_t *igb)
888 struct e1000_hw *hw = &igb->hw;
889 struct igb_osdep *osdep = &igb->osdep;
917 igb->capab = &igb_82575_cap;
920 igb->capab = &igb_82576_cap;
923 igb->capab = &igb_82580_cap;
926 igb->capab = &igb_i350_cap;
930 igb->capab = &igb_i210_cap;
933 igb->capab = &igb_i354_cap;
946 igb_regs_map(igb_t *igb)
948 dev_info_t *devinfo = igb->dip;
949 struct e1000_hw *hw = &igb->hw;
950 struct igb_osdep *osdep = &igb->osdep;
978 igb_init_properties(igb_t *igb)
984 igb_get_conf(igb);
995 igb_init_driver_settings(igb_t *igb)
997 struct e1000_hw *hw = &igb->hw;
1021 igb->page_size = ddi_ptob(igb->dip, (ulong_t)1);
1029 rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1030 igb->rx_buf_size = ((rx_size >> 10) +
1036 tx_size = igb->max_frame_size;
1037 igb->tx_buf_size = ((tx_size >> 10) +
1043 for (i = 0; i < igb->num_rx_rings; i++) {
1044 rx_ring = &igb->rx_rings[i];
1046 rx_ring->igb = igb;
1049 for (i = 0; i < igb->num_tx_rings; i++) {
1050 tx_ring = &igb->tx_rings[i];
1052 tx_ring->igb = igb;
1053 if (igb->tx_head_wb_enable)
1058 tx_ring->ring_size = igb->tx_ring_size;
1059 tx_ring->free_list_size = igb->tx_ring_size +
1060 (igb->tx_ring_size >> 1);
1067 igb->intr_throttling[i] = igb->intr_throttling[0];
1072 igb->link_state = LINK_STATE_UNKNOWN;
1081 igb_init_locks(igb_t *igb)
1087 for (i = 0; i < igb->num_rx_rings; i++) {
1088 rx_ring = &igb->rx_rings[i];
1090 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1093 for (i = 0; i < igb->num_tx_rings; i++) {
1094 tx_ring = &igb->tx_rings[i];
1096 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1098 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1100 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1102 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1105 mutex_init(&igb->gen_lock, NULL,
1106 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1108 mutex_init(&igb->watchdog_lock, NULL,
1109 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1111 mutex_init(&igb->link_lock, NULL,
1112 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri));
1119 igb_destroy_locks(igb_t *igb)
1125 for (i = 0; i < igb->num_rx_rings; i++) {
1126 rx_ring = &igb->rx_rings[i];
1130 for (i = 0; i < igb->num_tx_rings; i++) {
1131 tx_ring = &igb->tx_rings[i];
1138 mutex_destroy(&igb->gen_lock);
1139 mutex_destroy(&igb->watchdog_lock);
1140 mutex_destroy(&igb->link_lock);
1146 igb_t *igb;
1148 igb = (igb_t *)ddi_get_driver_private(devinfo);
1149 if (igb == NULL)
1152 mutex_enter(&igb->gen_lock);
1157 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1158 if (igb_enable_intrs(igb) != IGB_SUCCESS) {
1159 igb_log(igb, IGB_LOG_ERROR,
1161 mutex_exit(&igb->gen_lock);
1166 if (igb->igb_state & IGB_STARTED) {
1167 if (igb_start(igb, B_FALSE) != IGB_SUCCESS) {
1168 mutex_exit(&igb->gen_lock);
1175 igb_enable_watchdog_timer(igb);
1178 atomic_and_32(&igb->igb_state, ~IGB_SUSPENDED);
1180 mutex_exit(&igb->gen_lock);
1188 igb_t *igb;
1190 igb = (igb_t *)ddi_get_driver_private(devinfo);
1191 if (igb == NULL)
1194 mutex_enter(&igb->gen_lock);
1196 atomic_or_32(&igb->igb_state, IGB_SUSPENDED);
1201 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1202 (void) igb_disable_intrs(igb);
1205 if (!(igb->igb_state & IGB_STARTED)) {
1206 mutex_exit(&igb->gen_lock);
1210 igb_stop(igb, B_FALSE);
1212 mutex_exit(&igb->gen_lock);
1217 igb_disable_watchdog_timer(igb);
1223 igb_init(igb_t *igb)
1225 mutex_enter(&igb->gen_lock);
1230 if (igb_init_adapter(igb) != IGB_SUCCESS) {
1231 mutex_exit(&igb->gen_lock);
1232 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE);
1233 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1237 mutex_exit(&igb->gen_lock);
1245 * On success, the MAC address is entered in the igb->hw.mac.addr
1256 igb_init_mac_address(igb_t *igb)
1258 struct e1000_hw *hw = &igb->hw;
1260 ASSERT(mutex_owned(&igb->gen_lock));
1267 igb_log(igb, IGB_LOG_ERROR, "Adapter reset failed.");
1274 if (((igb->hw.mac.type != e1000_i210) &&
1275 (igb->hw.mac.type != e1000_i211)) &&
1283 igb_log(igb, IGB_LOG_ERROR,
1294 if (!igb_find_mac_address(igb)) {
1295 igb_log(igb, IGB_LOG_ERROR, "Failed to get the mac address");
1301 igb_log(igb, IGB_LOG_ERROR, "Invalid mac address");
1315 igb_init_adapter(igb_t *igb)
1317 struct e1000_hw *hw = &igb->hw;
1327 ASSERT(mutex_owned(&igb->gen_lock));
1334 if (igb_init_mac_address(igb) != IGB_SUCCESS) {
1335 igb_log(igb, IGB_LOG_ERROR, "Failed to initialize MAC address");
1366 default_mtu = igb_get_prop(igb, PROP_DEFAULT_MTU,
1373 min_tx = (igb->max_frame_size +
1377 min_rx = igb->max_frame_size;
1405 ((pba << 10) - 2 * igb->max_frame_size));
1425 igb_log(igb, IGB_LOG_ERROR, "Second reset failed");
1446 (void) igb_setup_link(igb, B_FALSE);
1452 igb_log(igb, IGB_LOG_ERROR, "Failed to initialize hardware");
1459 igb_start_link_timer(igb);
1485 if (igb->intr_type == DDI_INTR_TYPE_MSIX)
1486 igb->capab->setup_msix(igb);
1491 igb_init_unicst(igb);
1496 igb_setup_multicst(igb);
1501 for (i = 0; i < igb->intr_cnt; i++)
1502 E1000_WRITE_REG(hw, E1000_EITR(i), igb->intr_throttling[i]);
1508 (void) e1000_read_nvm(&igb->hw, NVM_OEM_OFFSET_0, 1, &nvmword);
1510 (void) e1000_read_nvm(&igb->hw, NVM_OEM_OFFSET_1, 1, &nvmword);
1512 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, igb->dip,
1516 (void) e1000_read_pba_string(&igb->hw, pbanum, sizeof (pbanum));
1518 (void) ddi_prop_update_string(DDI_DEV_T_NONE, igb->dip,
1523 (void) e1000_read_nvm(&igb->hw, NVM_VERSION, 1, &nvmword);
1527 (void) ddi_prop_update_string(DDI_DEV_T_NONE, igb->dip,
1534 igb_get_phy_state(igb);
1536 igb_param_sync(igb);
1554 igb_stop_adapter(igb_t *igb)
1556 struct e1000_hw *hw = &igb->hw;
1558 ASSERT(mutex_owned(&igb->gen_lock));
1561 igb_stop_link_timer(igb);
1570 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE);
1571 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1586 igb_reset(igb_t *igb)
1590 mutex_enter(&igb->gen_lock);
1592 ASSERT(igb->igb_state & IGB_STARTED);
1593 atomic_and_32(&igb->igb_state, ~IGB_STARTED);
1599 igb_disable_adapter_interrupts(igb);
1604 (void) igb_tx_drain(igb);
1606 for (i = 0; i < igb->num_rx_rings; i++)
1607 mutex_enter(&igb->rx_rings[i].rx_lock);
1608 for (i = 0; i < igb->num_tx_rings; i++)
1609 mutex_enter(&igb->tx_rings[i].tx_lock);
1614 igb_stop_adapter(igb);
1619 igb_tx_clean(igb);
1624 if (igb_init_adapter(igb) != IGB_SUCCESS) {
1625 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE);
1632 igb->tx_ring_init = B_FALSE;
1633 igb_setup_rings(igb);
1635 atomic_and_32(&igb->igb_state, ~(IGB_ERROR | IGB_STALL));
1641 igb->capab->enable_intr(igb);
1643 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK)
1646 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
1649 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1650 mutex_exit(&igb->tx_rings[i].tx_lock);
1651 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1652 mutex_exit(&igb->rx_rings[i].rx_lock);
1654 atomic_or_32(&igb->igb_state, IGB_STARTED);
1656 mutex_exit(&igb->gen_lock);
1661 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1662 mutex_exit(&igb->tx_rings[i].tx_lock);
1663 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1664 mutex_exit(&igb->rx_rings[i].rx_lock);
1666 mutex_exit(&igb->gen_lock);
1668 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1677 igb_tx_clean(igb_t *igb)
1687 for (i = 0; i < igb->num_tx_rings; i++) {
1688 tx_ring = &igb->tx_rings[i];
1723 if (igb->tx_head_wb_enable)
1726 E1000_WRITE_REG(&igb->hw, E1000_TDH(tx_ring->index), 0);
1727 E1000_WRITE_REG(&igb->hw, E1000_TDT(tx_ring->index), 0);
1744 igb_tx_drain(igb_t *igb)
1763 for (j = 0; j < igb->num_tx_rings; j++) {
1764 tx_ring = &igb->tx_rings[j];
1782 igb_rx_drain(igb_t *igb)
1798 done = (igb->rcb_pending == 0);
1813 igb_start(igb_t *igb, boolean_t alloc_buffer)
1817 ASSERT(mutex_owned(&igb->gen_lock));
1820 if (igb_alloc_rx_data(igb) != IGB_SUCCESS) {
1821 igb_log(igb, IGB_LOG_ERROR,
1827 if (igb_alloc_dma(igb) != IGB_SUCCESS) {
1828 igb_log(igb, IGB_LOG_ERROR,
1833 igb->tx_ring_init = B_TRUE;
1835 igb->tx_ring_init = B_FALSE;
1838 for (i = 0; i < igb->num_rx_rings; i++)
1839 mutex_enter(&igb->rx_rings[i].rx_lock);
1840 for (i = 0; i < igb->num_tx_rings; i++)
1841 mutex_enter(&igb->tx_rings[i].tx_lock);
1846 if ((igb->attach_progress & ATTACH_PROGRESS_INIT_ADAPTER) == 0) {
1847 if (igb_init_adapter(igb) != IGB_SUCCESS) {
1848 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE);
1851 igb->attach_progress |= ATTACH_PROGRESS_INIT_ADAPTER;
1857 igb_setup_rings(igb);
1863 igb->capab->enable_intr(igb);
1865 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK)
1868 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
1871 if (igb->hw.mac.type == e1000_i350)
1872 (void) e1000_set_eee_i350(&igb->hw, B_FALSE, B_FALSE);
1873 else if (igb->hw.mac.type == e1000_i354)
1874 (void) e1000_set_eee_i354(&igb->hw, B_FALSE, B_FALSE);
1876 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1877 mutex_exit(&igb->tx_rings[i].tx_lock);
1878 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1879 mutex_exit(&igb->rx_rings[i].rx_lock);
1884 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1885 mutex_exit(&igb->tx_rings[i].tx_lock);
1886 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1887 mutex_exit(&igb->rx_rings[i].rx_lock);
1889 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1898 igb_stop(igb_t *igb, boolean_t free_buffer)
1902 ASSERT(mutex_owned(&igb->gen_lock));
1904 igb->attach_progress &= ~ATTACH_PROGRESS_INIT_ADAPTER;
1909 igb_disable_adapter_interrupts(igb);
1914 (void) igb_tx_drain(igb);
1916 for (i = 0; i < igb->num_rx_rings; i++)
1917 mutex_enter(&igb->rx_rings[i].rx_lock);
1918 for (i = 0; i < igb->num_tx_rings; i++)
1919 mutex_enter(&igb->tx_rings[i].tx_lock);
1924 igb_stop_adapter(igb);
1929 igb_tx_clean(igb);
1931 for (i = igb->num_tx_rings - 1; i >= 0; i--)
1932 mutex_exit(&igb->tx_rings[i].tx_lock);
1933 for (i = igb->num_rx_rings - 1; i >= 0; i--)
1934 mutex_exit(&igb->rx_rings[i].rx_lock);
1936 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK)
1937 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
1939 if (igb->link_state == LINK_STATE_UP) {
1940 igb->link_state = LINK_STATE_UNKNOWN;
1941 mac_link_update(igb->mac_hdl, igb->link_state);
1948 igb_free_dma(igb);
1949 igb_free_rx_data(igb);
1957 igb_alloc_rings(igb_t *igb)
1962 igb->rx_rings = kmem_zalloc(
1963 sizeof (igb_rx_ring_t) * igb->num_rx_rings,
1966 if (igb->rx_rings == NULL) {
1973 igb->tx_rings = kmem_zalloc(
1974 sizeof (igb_tx_ring_t) * igb->num_tx_rings,
1977 if (igb->tx_rings == NULL) {
1978 kmem_free(igb->rx_rings,
1979 sizeof (igb_rx_ring_t) * igb->num_rx_rings);
1980 igb->rx_rings = NULL;
1987 igb->rx_groups = kmem_zalloc(
1988 sizeof (igb_rx_group_t) * igb->num_rx_groups,
1991 if (igb->rx_groups == NULL) {
1992 kmem_free(igb->rx_rings,
1993 sizeof (igb_rx_ring_t) * igb->num_rx_rings);
1994 kmem_free(igb->tx_rings,
1995 sizeof (igb_tx_ring_t) * igb->num_tx_rings);
1996 igb->rx_rings = NULL;
1997 igb->tx_rings = NULL;
2008 igb_free_rings(igb_t *igb)
2010 if (igb->rx_rings != NULL) {
2011 kmem_free(igb->rx_rings,
2012 sizeof (igb_rx_ring_t) * igb->num_rx_rings);
2013 igb->rx_rings = NULL;
2016 if (igb->tx_rings != NULL) {
2017 kmem_free(igb->tx_rings,
2018 sizeof (igb_tx_ring_t) * igb->num_tx_rings);
2019 igb->tx_rings = NULL;
2022 if (igb->rx_groups != NULL) {
2023 kmem_free(igb->rx_groups,
2024 sizeof (igb_rx_group_t) * igb->num_rx_groups);
2025 igb->rx_groups = NULL;
2030 igb_alloc_rx_data(igb_t *igb)
2035 for (i = 0; i < igb->num_rx_rings; i++) {
2036 rx_ring = &igb->rx_rings[i];
2043 igb_free_rx_data(igb);
2048 igb_free_rx_data(igb_t *igb)
2054 for (i = 0; i < igb->num_rx_rings; i++) {
2055 rx_ring = &igb->rx_rings[i];
2057 mutex_enter(&igb->rx_pending_lock);
2069 mutex_exit(&igb->rx_pending_lock);
2077 igb_setup_rings(igb_t *igb)
2086 igb_setup_rx(igb);
2088 igb_setup_tx(igb);
2094 igb_t *igb = rx_ring->igb;
2096 struct e1000_hw *hw = &igb->hw;
2106 ASSERT(mutex_owned(&igb->gen_lock));
2111 for (i = 0; i < igb->rx_ring_size; i++) {
2137 ((igb->rx_buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) |
2144 rxdctl &= igb->capab->rxdctl_mask;
2155 igb_setup_rx(igb_t *igb)
2160 struct e1000_hw *hw = &igb->hw;
2190 for (i = 0; i < igb->num_rx_groups; i++) {
2191 rx_group = &igb->rx_groups[i];
2193 rx_group->igb = igb;
2200 ring_per_group = igb->num_rx_rings / igb->num_rx_groups;
2201 for (i = 0; i < igb->num_rx_rings; i++) {
2202 rx_ring = &igb->rx_rings[i];
2214 E1000_WRITE_REG(hw, E1000_RLPML, igb->max_frame_size);
2219 if (igb->rx_hcksum_enable) {
2230 switch (igb->vmdq_mode) {
2236 if (igb->num_rx_rings > 1)
2237 igb_setup_rss(igb);
2244 igb_setup_mac_classify(igb);
2251 igb_setup_mac_rss_classify(igb);
2265 for (i = 0; i < igb->num_rx_rings; i++) {
2266 rx_ring = &igb->rx_rings[i];
2285 igb_t *igb = tx_ring->igb;
2286 struct e1000_hw *hw = &igb->hw;
2293 ASSERT(mutex_owned(&igb->gen_lock));
2319 if (igb->tx_head_wb_enable) {
2356 if (igb->tx_ring_init == B_TRUE) {
2376 igb_setup_tx(igb_t *igb)
2379 struct e1000_hw *hw = &igb->hw;
2383 for (i = 0; i < igb->num_tx_rings; i++) {
2384 tx_ring = &igb->tx_rings[i];
2406 igb_setup_rss(igb_t *igb)
2408 struct e1000_hw *hw = &igb->hw;
2424 reta.bytes[i & 3] = (i % igb->num_rx_rings) << shift;
2473 igb_setup_mac_rss_classify(igb_t *igb)
2475 struct e1000_hw *hw = &igb->hw;
2485 ring_per_group = igb->num_rx_rings / igb->num_rx_groups;
2549 igb_setup_mac_classify(igb_t *igb)
2551 struct e1000_hw *hw = &igb->hw;
2584 igb_init_unicst(igb_t *igb)
2586 struct e1000_hw *hw = &igb->hw;
2610 if (!igb->unicst_init) {
2613 igb->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2614 igb->unicst_avail = igb->unicst_total;
2616 for (slot = 0; slot < igb->unicst_total; slot++)
2617 igb->unicst_addr[slot].mac.set = 0;
2619 igb->unicst_init = B_TRUE;
2622 for (slot = 0; slot < igb->unicst_total; slot++) {
2624 igb->unicst_addr[slot].mac.addr,
2625 slot, igb->vmdq_mode,
2626 igb->unicst_addr[slot].mac.group_index);
2635 igb_unicst_find(igb_t *igb, const uint8_t *mac_addr)
2639 ASSERT(mutex_owned(&igb->gen_lock));
2641 for (slot = 0; slot < igb->unicst_total; slot++) {
2642 if (bcmp(igb->unicst_addr[slot].mac.addr,
2654 igb_unicst_set(igb_t *igb, const uint8_t *mac_addr,
2657 struct e1000_hw *hw = &igb->hw;
2659 ASSERT(mutex_owned(&igb->gen_lock));
2664 bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
2671 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
2672 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
2683 igb_multicst_add(igb_t *igb, const uint8_t *multiaddr)
2689 ASSERT(mutex_owned(&igb->gen_lock));
2692 igb_log(igb, IGB_LOG_ERROR, "Illegal multicast address");
2696 if (igb->mcast_count >= igb->mcast_max_num) {
2697 igb_log(igb, IGB_LOG_ERROR,
2699 igb->mcast_max_num);
2703 if (igb->mcast_count == igb->mcast_alloc_count) {
2704 old_len = igb->mcast_alloc_count *
2706 new_len = (igb->mcast_alloc_count + MCAST_ALLOC_COUNT) *
2711 igb_log(igb, IGB_LOG_ERROR,
2716 if (igb->mcast_table != NULL) {
2717 bcopy(igb->mcast_table, new_table, old_len);
2718 kmem_free(igb->mcast_table, old_len);
2720 igb->mcast_alloc_count += MCAST_ALLOC_COUNT;
2721 igb->mcast_table = new_table;
2725 &igb->mcast_table[igb->mcast_count], ETHERADDRL);
2726 igb->mcast_count++;
2731 igb_setup_multicst(igb);
2733 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
2734 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
2745 igb_multicst_remove(igb_t *igb, const uint8_t *multiaddr)
2752 ASSERT(mutex_owned(&igb->gen_lock));
2754 for (i = 0; i < igb->mcast_count; i++) {
2755 if (bcmp(multiaddr, &igb->mcast_table[i],
2757 for (i++; i < igb->mcast_count; i++) {
2758 igb->mcast_table[i - 1] =
2759 igb->mcast_table[i];
2761 igb->mcast_count--;
2766 if ((igb->mcast_alloc_count - igb->mcast_count) >
2768 old_len = igb->mcast_alloc_count *
2770 new_len = (igb->mcast_alloc_count - MCAST_ALLOC_COUNT) *
2775 bcopy(igb->mcast_table, new_table, new_len);
2776 kmem_free(igb->mcast_table, old_len);
2777 igb->mcast_alloc_count -= MCAST_ALLOC_COUNT;
2778 igb->mcast_table = new_table;
2785 igb_setup_multicst(igb);
2787 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
2788 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
2796 igb_release_multicast(igb_t *igb)
2798 if (igb->mcast_table != NULL) {
2799 kmem_free(igb->mcast_table,
2800 igb->mcast_alloc_count * sizeof (struct ether_addr));
2801 igb->mcast_table = NULL;
2812 igb_setup_multicst(igb_t *igb)
2816 struct e1000_hw *hw = &igb->hw;
2818 ASSERT(mutex_owned(&igb->gen_lock));
2819 ASSERT(igb->mcast_count <= igb->mcast_max_num);
2821 mc_addr_list = (uint8_t *)igb->mcast_table;
2822 mc_addr_count = igb->mcast_count;
2834 * file igb.conf.
2843 igb_get_conf(igb_t *igb)
2845 struct e1000_hw *hw = &igb->hw;
2852 * igb driver supports the following user configurations:
2882 igb->param_adv_autoneg_cap = igb_get_prop(igb,
2884 igb->param_adv_1000fdx_cap = igb_get_prop(igb,
2886 igb->param_adv_100fdx_cap = igb_get_prop(igb,
2888 igb->param_adv_100hdx_cap = igb_get_prop(igb,
2890 igb->param_adv_10fdx_cap = igb_get_prop(igb,
2892 igb->param_adv_10hdx_cap = igb_get_prop(igb,
2898 default_mtu = igb_get_prop(igb, PROP_DEFAULT_MTU,
2901 igb->max_frame_size = default_mtu +
2907 flow_control = igb_get_prop(igb, PROP_FLOW_CONTROL,
2917 igb->tx_ring_size = igb_get_prop(igb, PROP_TX_RING_SIZE,
2919 igb->rx_ring_size = igb_get_prop(igb, PROP_RX_RING_SIZE,
2922 igb->mr_enable = igb_get_prop(igb, PROP_MR_ENABLE, 0, 1, 0);
2923 igb->num_rx_groups = igb_get_prop(igb, PROP_RX_GROUP_NUM,
2930 igb->num_rx_groups = 1;
2932 if (igb->mr_enable) {
2933 igb->num_tx_rings = igb->capab->def_tx_que_num;
2934 igb->num_rx_rings = igb->capab->def_rx_que_num;
2936 igb->num_tx_rings = 1;
2937 igb->num_rx_rings = 1;
2939 if (igb->num_rx_groups > 1) {
2940 igb_log(igb, IGB_LOG_ERROR,
2943 igb->num_rx_groups = 1;
2950 for (i = igb->num_rx_groups; i > 0; i--) {
2951 if ((igb->num_rx_rings % i) == 0)
2954 if (i != igb->num_rx_groups) {
2955 igb_log(igb, IGB_LOG_ERROR,
2958 igb->num_rx_groups = i;
2964 ring_per_group = igb->num_rx_rings / igb->num_rx_groups;
2966 if (igb->num_rx_groups == 1) {
2970 igb->vmdq_mode = E1000_VMDQ_OFF;
2975 igb->vmdq_mode = E1000_VMDQ_MAC;
2980 igb->vmdq_mode = E1000_VMDQ_MAC_RSS;
2991 igb->intr_force = igb_get_prop(igb, PROP_INTR_FORCE,
2994 igb->tx_hcksum_enable = igb_get_prop(igb, PROP_TX_HCKSUM_ENABLE,
2996 igb->rx_hcksum_enable = igb_get_prop(igb, PROP_RX_HCKSUM_ENABLE,
2998 igb->lso_enable = igb_get_prop(igb, PROP_LSO_ENABLE,
3000 igb->tx_head_wb_enable = igb_get_prop(igb, PROP_TX_HEAD_WB_ENABLE,
3004 * igb LSO needs the tx h/w checksum support.
3007 if (igb->tx_hcksum_enable == B_FALSE)
3008 igb->lso_enable = B_FALSE;
3010 igb->tx_copy_thresh = igb_get_prop(igb, PROP_TX_COPY_THRESHOLD,
3013 igb->tx_recycle_thresh = igb_get_prop(igb, PROP_TX_RECYCLE_THRESHOLD,
3016 igb->tx_overload_thresh = igb_get_prop(igb, PROP_TX_OVERLOAD_THRESHOLD,
3019 igb->tx_resched_thresh = igb_get_prop(igb, PROP_TX_RESCHED_THRESHOLD,
3021 MIN(igb->tx_ring_size, MAX_TX_RESCHED_THRESHOLD),
3022 igb->tx_ring_size > DEFAULT_TX_RESCHED_THRESHOLD ?
3025 igb->rx_copy_thresh = igb_get_prop(igb, PROP_RX_COPY_THRESHOLD,
3028 igb->rx_limit_per_intr = igb_get_prop(igb, PROP_RX_LIMIT_PER_INTR,
3032 igb->intr_throttling[0] = igb_get_prop(igb, PROP_INTR_THROTTLING,
3033 igb->capab->min_intr_throttle,
3034 igb->capab->max_intr_throttle,
3035 igb->capab->def_intr_throttle);
3040 igb->mcast_max_num =
3041 igb_get_prop(igb, PROP_MCAST_MAX_NUM,
3046 * igb_get_prop - Get a property value out of the configuration file igb.conf
3055 igb_get_prop(igb_t *igb,
3066 value = ddi_prop_get_int(DDI_DEV_T_ANY, igb->dip,
3082 igb_setup_link(igb_t *igb, boolean_t setup_hw)
3088 mac = &igb->hw.mac;
3089 phy = &igb->hw.phy;
3092 if (igb->param_adv_autoneg_cap == 1) {
3099 if (igb->param_adv_1000fdx_cap == 1)
3102 if (igb->param_adv_100fdx_cap == 1)
3105 if (igb->param_adv_100hdx_cap == 1)
3108 if (igb->param_adv_10fdx_cap == 1)
3111 if (igb->param_adv_10hdx_cap == 1)
3122 if (igb->param_adv_100fdx_cap == 1)
3124 else if (igb->param_adv_100hdx_cap == 1)
3126 else if (igb->param_adv_10fdx_cap == 1)
3128 else if (igb->param_adv_10hdx_cap == 1)
3135 igb_log(igb, IGB_LOG_INFO, "Invalid link settings. Setup "
3144 if (e1000_setup_link(&igb->hw) != E1000_SUCCESS)
3156 igb_is_link_up(igb_t *igb)
3158 struct e1000_hw *hw = &igb->hw;
3161 ASSERT(mutex_owned(&igb->gen_lock));
3195 igb_link_check(igb_t *igb)
3197 struct e1000_hw *hw = &igb->hw;
3201 ASSERT(mutex_owned(&igb->gen_lock));
3203 if (igb_is_link_up(igb)) {
3207 if (igb->link_state != LINK_STATE_UP) {
3209 igb->link_speed = speed;
3210 igb->link_duplex = duplex;
3211 igb->link_state = LINK_STATE_UP;
3213 if (!igb->link_complete)
3214 igb_stop_link_timer(igb);
3216 } else if (igb->link_complete) {
3217 if (igb->link_state != LINK_STATE_DOWN) {
3218 igb->link_speed = 0;
3219 igb->link_duplex = 0;
3220 igb->link_state = LINK_STATE_DOWN;
3225 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
3226 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
3242 igb_t *igb = (igb_t *)arg;
3245 if (igb->igb_state & IGB_ERROR) {
3246 igb->reset_count++;
3247 if (igb_reset(igb) == IGB_SUCCESS)
3248 ddi_fm_service_impact(igb->dip, DDI_SERVICE_RESTORED);
3250 igb_restart_watchdog_timer(igb);
3254 if (igb_stall_check(igb) || (igb->igb_state & IGB_STALL)) {
3255 igb_fm_ereport(igb, DDI_FM_DEVICE_STALL);
3256 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST);
3257 igb->reset_count++;
3258 if (igb_reset(igb) == IGB_SUCCESS)
3259 ddi_fm_service_impact(igb->dip, DDI_SERVICE_RESTORED);
3261 igb_restart_watchdog_timer(igb);
3265 mutex_enter(&igb->gen_lock);
3266 if (!(igb->igb_state & IGB_SUSPENDED) && (igb->igb_state & IGB_STARTED))
3267 link_changed = igb_link_check(igb);
3268 mutex_exit(&igb->gen_lock);
3271 mac_link_update(igb->mac_hdl, igb->link_state);
3273 igb_restart_watchdog_timer(igb);
3289 igb_t *igb = (igb_t *)arg;
3291 mutex_enter(&igb->link_lock);
3292 igb->link_complete = B_TRUE;
3293 igb->link_tid = 0;
3294 mutex_exit(&igb->link_lock);
3304 * value exceeds the threshold, the igb is assumed to
3308 igb_stall_check(igb_t *igb)
3311 struct e1000_hw *hw = &igb->hw;
3315 if (igb->link_state != LINK_STATE_UP)
3322 for (i = 0; i < igb->num_tx_rings; i++) {
3323 tx_ring = &igb->tx_rings[i];
3367 igb_find_mac_address(igb_t *igb)
3369 struct e1000_hw *hw = &igb->hw;
3386 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip,
3401 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip, 0,
3418 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip,
3447 igb_arm_watchdog_timer(igb_t *igb)
3452 igb->watchdog_tid =
3454 (void *)igb, 1 * drv_usectohz(1000000));
3462 igb_enable_watchdog_timer(igb_t *igb)
3464 mutex_enter(&igb->watchdog_lock);
3466 if (!igb->watchdog_enable) {
3467 igb->watchdog_enable = B_TRUE;
3468 igb->watchdog_start = B_TRUE;
3469 igb_arm_watchdog_timer(igb);
3472 mutex_exit(&igb->watchdog_lock);
3480 igb_disable_watchdog_timer(igb_t *igb)
3484 mutex_enter(&igb->watchdog_lock);
3486 igb->watchdog_enable = B_FALSE;
3487 igb->watchdog_start = B_FALSE;
3488 tid = igb->watchdog_tid;
3489 igb->watchdog_tid = 0;
3491 mutex_exit(&igb->watchdog_lock);
3502 igb_start_watchdog_timer(igb_t *igb)
3504 mutex_enter(&igb->watchdog_lock);
3506 if (igb->watchdog_enable) {
3507 if (!igb->watchdog_start) {
3508 igb->watchdog_start = B_TRUE;
3509 igb_arm_watchdog_timer(igb);
3513 mutex_exit(&igb->watchdog_lock);
3520 igb_restart_watchdog_timer(igb_t *igb)
3522 mutex_enter(&igb->watchdog_lock);
3524 if (igb->watchdog_start)
3525 igb_arm_watchdog_timer(igb);
3527 mutex_exit(&igb->watchdog_lock);
3534 igb_stop_watchdog_timer(igb_t *igb)
3538 mutex_enter(&igb->watchdog_lock);
3540 igb->watchdog_start = B_FALSE;
3541 tid = igb->watchdog_tid;
3542 igb->watchdog_tid = 0;
3544 mutex_exit(&igb->watchdog_lock);
3554 igb_start_link_timer(struct igb *igb)
3556 struct e1000_hw *hw = &igb->hw;
3565 mutex_enter(&igb->link_lock);
3567 igb->link_complete = B_TRUE;
3569 igb->link_complete = B_FALSE;
3570 igb->link_tid = timeout(igb_link_timer, (void *)igb,
3573 mutex_exit(&igb->link_lock);
3580 igb_stop_link_timer(struct igb *igb)
3584 mutex_enter(&igb->link_lock);
3585 igb->link_complete = B_TRUE;
3586 tid = igb->link_tid;
3587 igb->link_tid = 0;
3588 mutex_exit(&igb->link_lock);
3598 igb_disable_adapter_interrupts(igb_t *igb)
3600 struct e1000_hw *hw = &igb->hw;
3612 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
3625 igb_enable_adapter_interrupts_82580(igb_t *igb)
3627 struct e1000_hw *hw = &igb->hw;
3631 igb->ims_mask |= E1000_IMS_DRSTA;
3633 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
3636 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
3637 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
3638 igb->ims_mask = (E1000_IMS_LSC | E1000_IMS_DRSTA);
3639 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
3642 igb->ims_mask = IMS_ENABLE_MASK | E1000_IMS_TXQE;
3643 igb->ims_mask |= E1000_IMS_DRSTA;
3644 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
3657 igb_enable_adapter_interrupts_82576(igb_t *igb)
3659 struct e1000_hw *hw = &igb->hw;
3664 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
3667 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
3668 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
3669 igb->ims_mask = E1000_IMS_LSC;
3674 igb->ims_mask = IMS_ENABLE_MASK | E1000_IMS_TXQE;
3689 igb_enable_adapter_interrupts_82575(igb_t *igb)
3691 struct e1000_hw *hw = &igb->hw;
3697 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
3699 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
3700 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
3701 igb->ims_mask = E1000_IMS_LSC;
3714 igb->ims_mask = IMS_ENABLE_MASK;
3734 igb_loopback_ioctl(igb_t *igb, struct iocblk *iocp, mblk_t *mp)
3743 hw = &igb->hw;
3797 *lbmp = igb->loopback_mode;
3806 if (!igb_set_loopback_mode(igb, *lbmp))
3814 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
3815 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
3826 igb_set_loopback_mode(igb_t *igb, uint32_t mode)
3831 if (mode == igb->loopback_mode)
3834 hw = &igb->hw;
3836 igb->loopback_mode = mode;
3841 (void) igb_reset(igb);
3846 mutex_enter(&igb->gen_lock);
3850 mutex_exit(&igb->gen_lock);
3854 igb_set_external_loopback(igb);
3858 igb_set_internal_phy_loopback(igb);
3862 igb_set_internal_serdes_loopback(igb);
3866 mutex_exit(&igb->gen_lock);
3874 mutex_enter(&igb->gen_lock);
3875 (void) igb_link_check(igb);
3876 mutex_exit(&igb->gen_lock);
3878 if (igb->link_state == LINK_STATE_UP)
3884 if (igb->link_state != LINK_STATE_UP) {
3889 igb->loopback_mode = IGB_LB_NONE;
3893 (void) igb_reset(igb);
3896 igb_log(igb, IGB_LOG_INFO, "Set external loopback "
3910 igb_set_external_loopback(igb_t *igb)
3915 hw = &igb->hw;
3932 igb_set_internal_phy_loopback(igb_t *igb)
3939 hw = &igb->hw;
3966 igb_set_internal_serdes_loopback(igb_t *igb)
3974 hw = &igb->hw;
4022 mac_rx_ring(rx_ring->igb->mac_hdl, rx_ring->ring_handle, mp,
4033 igb_t *igb = tx_ring->igb;
4040 (tx_ring->tbd_free >= igb->tx_resched_thresh)) {
4042 mac_tx_ring_update(tx_ring->igb->mac_hdl, tx_ring->ring_handle);
4052 igb_intr_link_work(igb_t *igb)
4056 igb_stop_watchdog_timer(igb);
4058 mutex_enter(&igb->gen_lock);
4064 igb->hw.mac.get_link_status = B_TRUE;
4067 link_changed = igb_link_check(igb);
4070 igb_get_phy_state(igb);
4072 mutex_exit(&igb->gen_lock);
4075 mac_link_update(igb->mac_hdl, igb->link_state);
4077 igb_start_watchdog_timer(igb);
4086 igb_t *igb = (igb_t *)arg1;
4096 mutex_enter(&igb->gen_lock);
4098 if (igb->igb_state & IGB_SUSPENDED) {
4099 mutex_exit(&igb->gen_lock);
4106 icr = E1000_READ_REG(&igb->hw, E1000_ICR);
4108 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
4109 mutex_exit(&igb->gen_lock);
4110 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
4111 atomic_or_32(&igb->igb_state, IGB_ERROR);
4121 ASSERT(igb->num_rx_rings == 1);
4122 ASSERT(igb->num_tx_rings == 1);
4125 (void) E1000_READ_REG(&igb->hw, E1000_EICR);
4128 mp = igb_rx(&igb->rx_rings[0], IGB_NO_POLL);
4132 tx_ring = &igb->tx_rings[0];
4139 (tx_ring->tbd_free >= igb->tx_resched_thresh));
4147 igb->hw.mac.get_link_status = B_TRUE;
4150 link_changed = igb_link_check(igb);
4153 igb_get_phy_state(igb);
4158 atomic_or_32(&igb->igb_state, IGB_STALL);
4170 mutex_exit(&igb->gen_lock);
4176 mac_rx(igb->mac_hdl, NULL, mp);
4180 mac_tx_ring_update(igb->mac_hdl, tx_ring->ring_handle);
4185 mac_link_update(igb->mac_hdl, igb->link_state);
4196 igb_t *igb = (igb_t *)arg1;
4201 icr = E1000_READ_REG(&igb->hw, E1000_ICR);
4203 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
4204 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
4205 atomic_or_32(&igb->igb_state, IGB_ERROR);
4210 (void) E1000_READ_REG(&igb->hw, E1000_EICR);
4216 ASSERT(igb->num_rx_rings == 1);
4217 ASSERT(igb->num_tx_rings == 1);
4220 igb_intr_rx_work(&igb->rx_rings[0]);
4224 igb_intr_tx_work(&igb->tx_rings[0]);
4228 igb_intr_link_work(igb);
4233 atomic_or_32(&igb->igb_state, IGB_STALL);
4284 igb_t *igb = (igb_t *)arg1;
4289 icr = E1000_READ_REG(&igb->hw, E1000_ICR);
4291 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
4292 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
4293 atomic_or_32(&igb->igb_state, IGB_ERROR);
4302 igb_intr_tx_work(&igb->tx_rings[0]);
4308 igb_intr_link_work(igb);
4320 IGB_STAT(igb->dout_sync);
4325 atomic_or_32(&igb->igb_state, IGB_STALL);
4336 * igb->intr_force can be used to force sequence to start with
4341 igb_alloc_intrs(igb_t *igb)
4347 devinfo = igb->dip;
4353 igb_log(igb, IGB_LOG_ERROR,
4357 igb_log(igb, IGB_LOG_INFO, "Supported interrupt types: %x",
4360 igb->intr_type = 0;
4364 (igb->intr_force <= IGB_INTR_MSIX)) {
4365 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_MSIX);
4370 igb_log(igb, IGB_LOG_INFO,
4375 igb->num_rx_rings = 1;
4376 igb->num_tx_rings = 1;
4377 igb_log(igb, IGB_LOG_INFO,
4382 (igb->intr_force <= IGB_INTR_MSI)) {
4383 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_MSI);
4388 igb_log(igb, IGB_LOG_INFO,
4394 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_FIXED);
4399 igb_log(igb, IGB_LOG_INFO,
4416 igb_alloc_intr_handles(igb_t *igb, int intr_type)
4423 devinfo = igb->dip;
4429 igb_log(igb, IGB_LOG_INFO, "interrupt type: legacy");
4435 igb_log(igb, IGB_LOG_INFO, "interrupt type: MSI");
4444 request = igb->num_rx_rings + igb->num_tx_rings;
4447 igb_log(igb, IGB_LOG_INFO, "interrupt type: MSI-X");
4451 igb_log(igb, IGB_LOG_INFO,
4456 igb_log(igb, IGB_LOG_INFO,
4465 igb_log(igb, IGB_LOG_INFO,
4470 igb_log(igb, IGB_LOG_INFO, "interrupts supported: %d", count);
4477 igb_log(igb, IGB_LOG_INFO,
4482 igb_log(igb, IGB_LOG_INFO, "interrupts available: %d", avail);
4485 igb_log(igb, IGB_LOG_INFO,
4492 igb->intr_cnt = 0;
4497 igb->intr_size = request * sizeof (ddi_intr_handle_t);
4498 igb->htable = kmem_alloc(igb->intr_size, KM_SLEEP);
4500 rc = ddi_intr_alloc(devinfo, igb->htable, intr_type, 0,
4503 igb_log(igb, IGB_LOG_INFO, "Allocate interrupts failed. "
4508 igb_log(igb, IGB_LOG_INFO, "interrupts actually allocated: %d", actual);
4510 igb->intr_cnt = actual;
4513 igb_log(igb, IGB_LOG_INFO,
4524 if (diff < igb->num_tx_rings) {
4525 igb_log(igb, IGB_LOG_INFO,
4527 igb->num_tx_rings - diff);
4528 igb->num_tx_rings -= diff;
4530 igb_log(igb, IGB_LOG_INFO,
4532 igb->num_tx_rings = 1;
4534 igb_log(igb, IGB_LOG_INFO,
4537 igb->num_rx_rings = actual - 1;
4544 rc = ddi_intr_get_pri(igb->htable[0], &igb->intr_pri);
4546 igb_log(igb, IGB_LOG_INFO,
4551 rc = ddi_intr_get_cap(igb->htable[0], &igb->intr_cap);
4553 igb_log(igb, IGB_LOG_INFO,
4558 igb->intr_type = intr_type;
4563 igb_rem_intrs(igb);
4575 igb_add_intr_handlers(igb_t *igb)
4585 switch (igb->intr_type) {
4588 tx_ring = &igb->tx_rings[0];
4589 rc = ddi_intr_add_handler(igb->htable[vector],
4591 (void *)igb, NULL);
4594 igb_log(igb, IGB_LOG_INFO,
4602 for (i = 0; i < igb->num_rx_rings; i++) {
4603 rx_ring = &igb->rx_rings[i];
4605 rc = ddi_intr_add_handler(igb->htable[vector],
4610 igb_log(igb, IGB_LOG_INFO,
4615 igb->htable[vector]);
4626 for (i = 1; i < igb->num_tx_rings; i++) {
4627 tx_ring = &igb->tx_rings[i];
4629 rc = ddi_intr_add_handler(igb->htable[vector],
4634 igb_log(igb, IGB_LOG_INFO,
4639 igb->htable[vector]);
4653 rc = ddi_intr_add_handler(igb->htable[vector],
4655 (void *)igb, NULL);
4658 igb_log(igb, IGB_LOG_INFO,
4663 rx_ring = &igb->rx_rings[0];
4671 rc = ddi_intr_add_handler(igb->htable[vector],
4673 (void *)igb, NULL);
4676 igb_log(igb, IGB_LOG_INFO,
4681 rx_ring = &igb->rx_rings[0];
4691 ASSERT(vector == igb->intr_cnt);
4702 igb_setup_msix_82575(igb_t *igb)
4706 struct e1000_hw *hw = &igb->hw;
4714 igb->eims_mask = E1000_EICR_TX_QUEUE0 | E1000_EICR_OTHER;
4715 E1000_WRITE_REG(hw, E1000_MSIXBM(vector), igb->eims_mask);
4718 for (i = 0; i < igb->num_rx_rings; i++) {
4729 igb->eims_mask |= eims;
4734 for (i = 1; i < igb->num_tx_rings; i++) {
4745 igb->eims_mask |= eims;
4750 ASSERT(vector == igb->intr_cnt);
4769 igb_setup_msix_82576(igb_t *igb)
4771 struct e1000_hw *hw = &igb->hw;
4796 igb->eims_mask = (1 << vector);
4799 for (i = 0; i < igb->num_rx_rings; i++) {
4818 igb->eims_mask |= (1 << vector);
4823 for (i = 1; i < igb->num_tx_rings; i++) {
4843 igb->eims_mask |= (1 << vector);
4848 ASSERT(vector == igb->intr_cnt);
4860 igb_setup_msix_82580(igb_t *igb)
4862 struct e1000_hw *hw = &igb->hw;
4886 igb->eims_mask = (1 << vector);
4890 for (i = 0; i < igb->num_rx_rings; i++) {
4909 igb->eims_mask |= (1 << vector);
4914 for (i = 1; i < igb->num_tx_rings; i++) {
4934 igb->eims_mask |= (1 << vector);
4938 ASSERT(vector == igb->intr_cnt);
4945 igb_rem_intr_handlers(igb_t *igb)
4950 for (i = 0; i < igb->intr_cnt; i++) {
4951 rc = ddi_intr_remove_handler(igb->htable[i]);
4953 igb_log(igb, IGB_LOG_INFO,
4963 igb_rem_intrs(igb_t *igb)
4968 for (i = 0; i < igb->intr_cnt; i++) {
4969 rc = ddi_intr_free(igb->htable[i]);
4971 igb_log(igb, IGB_LOG_INFO,
4976 kmem_free(igb->htable, igb->intr_size);
4977 igb->htable = NULL;
4984 igb_enable_intrs(igb_t *igb)
4990 if (igb->intr_cap & DDI_INTR_FLAG_BLOCK) {
4992 rc = ddi_intr_block_enable(igb->htable, igb->intr_cnt);
4994 igb_log(igb, IGB_LOG_ERROR,
5000 for (i = 0; i < igb->intr_cnt; i++) {
5001 rc = ddi_intr_enable(igb->htable[i]);
5003 igb_log(igb, IGB_LOG_ERROR,
5017 igb_disable_intrs(igb_t *igb)
5023 if (igb->intr_cap & DDI_INTR_FLAG_BLOCK) {
5024 rc = ddi_intr_block_disable(igb->htable, igb->intr_cnt);
5026 igb_log(igb, IGB_LOG_ERROR,
5031 for (i = 0; i < igb->intr_cnt; i++) {
5032 rc = ddi_intr_disable(igb->htable[i]);
5034 igb_log(igb, IGB_LOG_ERROR,
5048 igb_get_phy_state(igb_t *igb)
5050 struct e1000_hw *hw = &igb->hw;
5060 ASSERT(mutex_owned(&igb->gen_lock));
5073 igb->param_autoneg_cap =
5075 igb->param_pause_cap =
5077 igb->param_asym_pause_cap =
5079 igb->param_1000fdx_cap =
5082 igb->param_1000hdx_cap =
5085 igb->param_100t4_cap =
5087 igb->param_100fdx_cap = ((phy_status & MII_SR_100X_FD_CAPS) ||
5089 igb->param_100hdx_cap = ((phy_status & MII_SR_100X_HD_CAPS) ||
5091 igb->param_10fdx_cap =
5093 igb->param_10hdx_cap =
5095 igb->param_rem_fault =
5098 igb->param_adv_autoneg_cap = hw->mac.autoneg;
5099 igb->param_adv_pause_cap =
5101 igb->param_adv_asym_pause_cap =
5103 igb->param_adv_1000hdx_cap =
5105 igb->param_adv_100t4_cap =
5107 igb->param_adv_rem_fault =
5109 if (igb->param_adv_autoneg_cap == 1) {
5110 igb->param_adv_1000fdx_cap =
5112 igb->param_adv_100fdx_cap =
5114 igb->param_adv_100hdx_cap =
5116 igb->param_adv_10fdx_cap =
5118 igb->param_adv_10hdx_cap =
5122 igb->param_lp_autoneg_cap =
5124 igb->param_lp_pause_cap =
5126 igb->param_lp_asym_pause_cap =
5128 igb->param_lp_1000fdx_cap =
5130 igb->param_lp_1000hdx_cap =
5132 igb->param_lp_100t4_cap =
5134 igb->param_lp_100fdx_cap =
5136 igb->param_lp_100hdx_cap =
5138 igb->param_lp_10fdx_cap =
5140 igb->param_lp_10hdx_cap =
5142 igb->param_lp_rem_fault =
5148 igb->param_autoneg_cap = 0;
5149 igb->param_pause_cap = 1;
5150 igb->param_asym_pause_cap = 1;
5151 igb->param_1000fdx_cap = 1;
5152 igb->param_1000hdx_cap = 0;
5153 igb->param_100t4_cap = 0;
5154 igb->param_100fdx_cap = 0;
5155 igb->param_100hdx_cap = 0;
5156 igb->param_10fdx_cap = 0;
5157 igb->param_10hdx_cap = 0;
5159 igb->param_adv_autoneg_cap = 0;
5160 igb->param_adv_pause_cap = 1;
5161 igb->param_adv_asym_pause_cap = 1;
5162 igb->param_adv_1000fdx_cap = 1;
5163 igb->param_adv_1000hdx_cap = 0;
5164 igb->param_adv_100t4_cap = 0;
5165 igb->param_adv_100fdx_cap = 0;
5166 igb->param_adv_100hdx_cap = 0;
5167 igb->param_adv_10fdx_cap = 0;
5168 igb->param_adv_10hdx_cap = 0;
5170 igb->param_lp_autoneg_cap = 0;
5171 igb->param_lp_pause_cap = 0;
5172 igb->param_lp_asym_pause_cap = 0;
5173 igb->param_lp_1000fdx_cap = 0;
5174 igb->param_lp_1000hdx_cap = 0;
5175 igb->param_lp_100t4_cap = 0;
5176 igb->param_lp_100fdx_cap = 0;
5177 igb->param_lp_100hdx_cap = 0;
5178 igb->param_lp_10fdx_cap = 0;
5179 igb->param_lp_10hdx_cap = 0;
5180 igb->param_lp_rem_fault = 0;
5193 igb_param_sync(igb_t *igb)
5195 igb->param_en_1000fdx_cap = igb->param_adv_1000fdx_cap;
5196 igb->param_en_1000hdx_cap = igb->param_adv_1000hdx_cap;
5197 igb->param_en_100t4_cap = igb->param_adv_100t4_cap;
5198 igb->param_en_100fdx_cap = igb->param_adv_100fdx_cap;
5199 igb->param_en_100hdx_cap = igb->param_adv_100hdx_cap;
5200 igb->param_en_10fdx_cap = igb->param_adv_10fdx_cap;
5201 igb->param_en_10hdx_cap = igb->param_adv_10hdx_cap;
5291 igb_fm_init(igb_t *igb)
5297 if (igb->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5303 if (igb->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5311 if (igb->fm_capabilities) {
5314 ddi_fm_init(igb->dip, &igb->fm_capabilities, &iblk);
5319 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities) ||
5320 DDI_FM_ERRCB_CAP(igb->fm_capabilities))
5321 pci_ereport_setup(igb->dip);
5326 if (DDI_FM_ERRCB_CAP(igb->fm_capabilities))
5327 ddi_fm_handler_register(igb->dip,
5328 igb_fm_error_cb, (void*) igb);
5333 igb_fm_fini(igb_t *igb)
5336 if (igb->fm_capabilities) {
5341 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities) ||
5342 DDI_FM_ERRCB_CAP(igb->fm_capabilities))
5343 pci_ereport_teardown(igb->dip);
5348 if (DDI_FM_ERRCB_CAP(igb->fm_capabilities))
5349 ddi_fm_handler_unregister(igb->dip);
5352 ddi_fm_fini(igb->dip);
5357 igb_fm_ereport(igb_t *igb, char *detail)
5364 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities)) {
5365 ddi_fm_ereport_post(igb->dip, buf, ena, DDI_NOSLEEP,