Lines Matching refs:erip
101 static void eri_uninit(struct eri *erip);
148 static void eri_stop_timer(struct eri *erip);
149 static void eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec);
231 #define BUMP_InNUcast(erip, pkt) \
233 HSTAT(erip, brdcstrcv); \
235 HSTAT(erip, multircv); \
238 #define BUMP_OutNUcast(erip, pkt) \
240 HSTAT(erip, brdcstxmt); \
242 HSTAT(erip, multixmt); \
251 #define ERI_PROCESS_READ(erip, bp, sum) \
261 HSTAT(erip, ipackets64); \
262 HSTATN(erip, rbytes64, len); \
263 BUMP_InNUcast(erip, bp->b_rptr); \
282 #define ERI_PROCESS_READ(erip, bp) \
290 HSTAT(erip, ipackets64); \
291 HSTATN(erip, rbytes64, len); \
292 BUMP_InNUcast(erip, bp->b_rptr); \
333 #define ERI_IOPBIOADDR(erip, a) \
334 ((erip)->iopbiobase + ((uintptr_t)a - (erip)->iopbkbase))
661 struct eri *erip = NULL;
673 if ((erip = ddi_get_driver_private(dip)) == NULL)
676 mutex_enter(&erip->intrlock);
677 erip->flags &= ~ERI_SUSPENDED;
678 erip->init_macregs = 1;
680 erip->stats.link_up = LINK_STATE_DOWN;
681 erip->linkcheck = 0;
683 doinit = (erip->flags & ERI_STARTED) ? B_TRUE : B_FALSE;
684 mutex_exit(&erip->intrlock);
686 if (doinit && !eri_init(erip)) {
698 erip = kmem_zalloc(sizeof (struct eri), KM_SLEEP);
703 ddi_set_driver_private(dip, erip);
704 erip->dip = dip; /* dip */
705 erip->instance = ddi_get_instance(dip); /* instance */
706 erip->flags = 0;
707 erip->multi_refcnt = 0;
708 erip->promisc = B_FALSE;
711 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
716 macp->m_driver = erip;
718 macp->m_src_addr = erip->ouraddr;
736 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
744 if (pci_config_setup(dip, &erip->pci_config_handle) != DDI_SUCCESS) {
745 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
753 erip->dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
754 erip->dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
755 erip->dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
757 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->globregp), 0, 0,
758 &erip->dev_attr, &erip->globregh)) {
761 erip->etxregh = erip->globregh;
762 erip->erxregh = erip->globregh;
763 erip->bmacregh = erip->globregh;
764 erip->mifregh = erip->globregh;
766 erip->etxregp = (void *)(((caddr_t)erip->globregp) + 0x2000);
767 erip->erxregp = (void *)(((caddr_t)erip->globregp) + 0x4000);
768 erip->bmacregp = (void *)(((caddr_t)erip->globregp) + 0x6000);
769 erip->mifregp = (void *)(((caddr_t)erip->globregp) + 0x6200);
774 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->sw_reset_reg),
775 0x1010, 4, &erip->dev_attr, &erip->sw_reset_regh)) {
776 ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
785 if (eri_stop(erip))
791 pci_config_put8(erip->pci_config_handle, PCI_CONF_LATENCY_TIMER,
795 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
804 if (ddi_get_iblock_cookie(dip, 0, &erip->cookie) != DDI_SUCCESS)
810 mutex_init(&erip->xmitlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
811 mutex_init(&erip->intrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
812 mutex_init(&erip->linklock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
813 mutex_init(&erip->xcvrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
820 if (ddi_add_intr(dip, 0, &erip->cookie, 0, eri_intr, (caddr_t)erip) ==
830 (void) eri_setup_mac_address(erip, dip);
832 if (eri_init_xfer_params(erip))
835 if (eri_burstsize(erip) == DDI_FAILURE) {
845 erip->rpending_mask = ERI_RPENDING - 1;
846 erip->rmdmax_mask = ERI_RPENDING - 1;
847 erip->mif_config = (ERI_PHY_BMSR << ERI_MIF_CFGPR_SHIFT);
849 erip->stats.pmcap = ERI_PMCAP_NONE;
852 erip->stats.pmcap = ERI_PMCAP_4MHZ;
854 if (mac_register(macp, &erip->mh) != 0)
862 if (erip->pci_config_handle)
863 (void) pci_config_teardown(&erip->pci_config_handle);
866 mutex_destroy(&erip->xmitlock);
867 mutex_destroy(&erip->intrlock);
868 mutex_destroy(&erip->linklock);
869 mutex_destroy(&erip->xcvrlock);
872 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, attach_fail_msg);
875 ddi_remove_intr(dip, 0, erip->cookie);
877 if (erip->globregh)
878 ddi_regs_map_free(&erip->globregh);
882 if (erip != NULL)
883 kmem_free(erip, sizeof (*erip));
891 struct eri *erip;
894 if ((erip = ddi_get_driver_private(dip)) == NULL) {
906 erip->flags |= ERI_SUSPENDED;
907 eri_uninit(erip);
914 if (erip->flags & (ERI_RUNNING | ERI_SUSPENDED)) {
915 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, busy_msg);
919 if (mac_unregister(erip->mh) != 0) {
926 (void) eri_stop(erip);
931 ddi_remove_intr(dip, 0, erip->cookie);
933 if (erip->pci_config_handle)
934 (void) pci_config_teardown(&erip->pci_config_handle);
941 if (erip->globregh)
942 ddi_regs_map_free(&erip->globregh);
944 erip->etxregh = NULL;
945 erip->erxregh = NULL;
946 erip->bmacregh = NULL;
947 erip->mifregh = NULL;
948 erip->globregh = NULL;
950 if (erip->sw_reset_regh)
951 ddi_regs_map_free(&erip->sw_reset_regh);
953 if (erip->ksp)
954 kstat_delete(erip->ksp);
956 eri_stop_timer(erip); /* acquire linklock */
957 eri_start_timer(erip, eri_check_link, 0);
958 mutex_destroy(&erip->xmitlock);
959 mutex_destroy(&erip->intrlock);
960 mutex_destroy(&erip->linklock);
961 mutex_destroy(&erip->xcvrlock);
963 if (erip->md_h) {
964 if (ddi_dma_unbind_handle(erip->md_h) ==
967 ddi_dma_mem_free(&erip->mdm_h);
968 ddi_dma_free_handle(&erip->md_h);
971 if (eri_freebufs(erip))
976 if (erip->eri_dvmarh) {
977 (void) dvma_release(erip->eri_dvmarh);
978 erip->eri_dvmarh = NULL;
981 * xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma
985 if (erip->ndmarh[i])
986 ddi_dma_free_handle(&erip->ndmarh[i]);
991 if (erip->tbuf_ioaddr != 0) {
992 (void) ddi_dma_unbind_handle(erip->tbuf_handle);
993 erip->tbuf_ioaddr = 0;
995 if (erip->tbuf_kaddr != NULL) {
996 ddi_dma_mem_free(&erip->tbuf_acch);
997 erip->tbuf_kaddr = NULL;
999 if (erip->tbuf_handle != NULL) {
1000 ddi_dma_free_handle(&erip->tbuf_handle);
1001 erip->tbuf_handle = NULL;
1004 eri_param_cleanup(erip);
1007 kmem_free((caddr_t)erip, sizeof (struct eri));
1028 eri_setup_mac_address(struct eri *erip, dev_info_t *dip)
1046 ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
1058 bcopy(prop, erip->ouraddr, ETHERADDRL);
1070 bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1072 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1083 bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1109 struct eri *erip = arg;
1113 ASSERT(erip != NULL);
1134 eri_process_ndd_ioctl(erip, wq, mp, iocp->ioc_cmd);
1142 eri_loopback(erip, wq, mp);
1150 ASSERT(!MUTEX_HELD(&erip->linklock));
1154 eri_loopback(struct eri *erip, queue_t *wq, mblk_t *mp)
1170 erip->flags &= (~ERI_MACLOOPBACK & ~ERI_SERLOOPBACK);
1173 erip->stats.link_up = LINK_STATE_DOWN;
1174 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1175 (void) eri_init(erip);
1179 erip->flags |= ERI_MACLOOPBACK;
1180 erip->flags &= ~ERI_SERLOOPBACK;
1182 erip->stats.link_up = LINK_STATE_DOWN;
1183 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1184 (void) eri_init(erip);
1191 erip->flags |= ERI_SERLOOPBACK;
1192 erip->flags &= ~ERI_MACLOOPBACK;
1195 erip->stats.link_up = LINK_STATE_DOWN;
1196 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1197 (void) eri_init(erip);
1201 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1216 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1224 struct eri *erip = arg;
1226 mutex_enter(&erip->intrlock);
1227 erip->promisc = on;
1228 eri_init_rx(erip);
1229 mutex_exit(&erip->intrlock);
1240 struct eri *erip = arg;
1249 mutex_enter(&erip->intrlock);
1251 erip->ladrf_refcnt[ladrf_bit]++;
1252 if (erip->ladrf_refcnt[ladrf_bit] == 1) {
1253 LADRF_SET(erip, ladrf_bit);
1254 erip->multi_refcnt++;
1255 eri_init_rx(erip);
1258 erip->ladrf_refcnt[ladrf_bit]--;
1259 if (erip->ladrf_refcnt[ladrf_bit] == 0) {
1260 LADRF_CLR(erip, ladrf_bit);
1261 erip->multi_refcnt--;
1262 eri_init_rx(erip);
1265 mutex_exit(&erip->intrlock);
1272 struct eri *erip = arg;
1279 mutex_enter(&erip->intrlock);
1280 bcopy(macaddr, &erip->ouraddr, ETHERADDRL);
1281 eri_init_rx(erip);
1282 mutex_exit(&erip->intrlock);
1304 struct eri *erip = arg;
1306 mutex_enter(&erip->intrlock);
1307 erip->flags |= ERI_STARTED;
1308 mutex_exit(&erip->intrlock);
1310 if (!eri_init(erip)) {
1311 mutex_enter(&erip->intrlock);
1312 erip->flags &= ~ERI_STARTED;
1313 mutex_exit(&erip->intrlock);
1322 struct eri *erip = arg;
1324 mutex_enter(&erip->intrlock);
1325 erip->flags &= ~ERI_STARTED;
1326 mutex_exit(&erip->intrlock);
1327 eri_uninit(erip);
1333 struct eri *erip = arg;
1337 esp = &erip->stats;
1339 mutex_enter(&erip->xmitlock);
1340 if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
1341 erip->tx_completion =
1343 macupdate |= eri_reclaim(erip, erip->tx_completion);
1345 mutex_exit(&erip->xmitlock);
1347 mac_tx_update(erip->mh);
1349 eri_savecntrs(erip);
1426 *val = erip->phyad;
1523 eri_init_macregs_generic(struct eri *erip)
1530 if ((erip->stats.inits == 1) || (erip->init_macregs)) {
1531 erip->init_macregs = 0;
1546 ((erip->ouraddr[0] & 0x3) << 8) | erip->ouraddr[1]);
1576 if (erip->pauseTX)
1585 PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
1586 PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
1587 PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
1593 PUT_MACREG(hash0, erip->ladrf[0]);
1594 PUT_MACREG(hash1, erip->ladrf[1]);
1595 PUT_MACREG(hash2, erip->ladrf[2]);
1596 PUT_MACREG(hash3, erip->ladrf[3]);
1597 PUT_MACREG(hash4, erip->ladrf[4]);
1598 PUT_MACREG(hash5, erip->ladrf[5]);
1599 PUT_MACREG(hash6, erip->ladrf[6]);
1600 PUT_MACREG(hash7, erip->ladrf[7]);
1601 PUT_MACREG(hash8, erip->ladrf[8]);
1602 PUT_MACREG(hash9, erip->ladrf[9]);
1603 PUT_MACREG(hash10, erip->ladrf[10]);
1604 PUT_MACREG(hash11, erip->ladrf[11]);
1605 PUT_MACREG(hash12, erip->ladrf[12]);
1606 PUT_MACREG(hash13, erip->ladrf[13]);
1607 PUT_MACREG(hash14, erip->ladrf[14]);
1611 eri_flush_rxbufs(struct eri *erip)
1626 if (erip->rmblkp[i]) {
1627 if (erip->eri_dvmarh)
1628 dvma_unload(erip->eri_dvmarh, 2 * i,
1630 else if ((ddi_dma_unbind_handle(erip->ndmarh[i]) ==
1633 freeb(erip->rmblkp[i]);
1634 erip->rmblkp[i] = NULL;
1641 eri_init_txbufs(struct eri *erip)
1646 bzero((caddr_t)erip->eri_tmdp, ERI_TPENDING * sizeof (struct eri_tmd));
1651 ERI_SYNCIOPB(erip, erip->eri_tmdp,
1656 erip->tcurp = erip->eri_tmdp;
1657 erip->tnextp = erip->eri_tmdp;
1658 erip->tx_cur_cnt = 0;
1659 erip->tx_kick = 0;
1660 erip->tx_completion = 0;
1664 eri_init_rxbufs(struct eri *erip)
1675 bzero((caddr_t)erip->rmdp, ERI_RPENDING * sizeof (struct rmd));
1683 if (erip->eri_dvmarh)
1684 dvma_kaddr_load(erip->eri_dvmarh,
1690 else if (ddi_dma_addr_bind_handle(erip->ndmarh[i], NULL,
1696 PUT_RMD((&erip->rmdp[i]), dma_cookie);
1697 erip->rmblkp[i] = bp; /* save for later use */
1703 ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
1708 erip->rnextp = erip->rmdp;
1709 erip->rx_completion = 0;
1710 erip->rx_kick = ERI_RPENDING - 4;
1715 eri_txmac_disable(struct eri *erip)
1731 eri_rxmac_disable(struct eri *erip)
1749 eri_stop(struct eri *erip)
1751 (void) eri_erx_reset(erip);
1752 (void) eri_etx_reset(erip);
1759 if (erip->linkcheck) {
1760 erip->linkcheck = 0;
1761 erip->global_reset_issued = 2;
1764 erip->stats.link_up = LINK_STATE_DOWN;
1765 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1766 erip->global_reset_issued = -1;
1771 erip->rx_reset_issued = -1;
1772 erip->tx_reset_issued = -1;
1798 eri_erx_reset(struct eri *erip)
1800 (void) eri_rxmac_disable(erip); /* Disable the RX MAC */
1806 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1818 erip->rx_reset_issued = -1;
1833 eri_etx_reset(struct eri *erip)
1835 (void) eri_txmac_disable(erip);
1842 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1849 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1860 erip->tx_reset_issued = -1;
1873 eri_init_txregs(struct eri *erip)
1883 tx_ring = ERI_IOPBIOADDR(erip, erip->eri_tmdp);
1909 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1916 ENABLE_TXDMA(erip);
1917 ENABLE_MAC(erip);
1926 eri_init_rxregs(struct eri *erip)
1938 rx_ring = ERI_IOPBIOADDR(erip, erip->rmdp);
1941 PUT_ERXREG(rx_kick, erip->rx_kick);
1973 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1989 erip->rxfifo_size = GET_ERXREG(rxfifo_size);
1990 ENABLE_RXDMA(erip);
1995 eri_freebufs(struct eri *erip)
1999 status = eri_flush_rxbufs(erip);
2004 eri_update_rxbufs(struct eri *erip)
2012 rmdpbase = erip->rmdp;
2021 ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
2026 erip->rnextp = erip->rmdp;
2027 erip->rx_completion = 0;
2028 erip->rx_kick = ERI_RPENDING - 4;
2038 eri_init_rx_channel(struct eri *erip)
2040 erip->flags &= ~ERI_RXINIT;
2041 (void) eri_erx_reset(erip);
2042 eri_update_rxbufs(erip);
2043 if (eri_init_rxregs(erip))
2047 erip->rx_reset_issued = 0;
2048 HSTAT(erip, rx_inits);
2049 erip->flags |= ERI_RXINIT;
2054 eri_init_rx(struct eri *erip)
2061 (void) eri_rxmac_disable(erip); /* Disable the RX MAC */
2067 PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
2068 PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
2069 PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
2079 ladrf = erip->ladrf;
2100 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2101 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2105 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2106 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2112 HSTAT(erip, rx_inits);
2117 * &erip->xmitlock is held before calling this routine.
2120 eri_init_txmac(struct eri *erip)
2124 erip->flags &= ~ERI_TXINIT;
2128 (void) eri_txmac_disable(erip);
2147 if (erip->ngu_enable)
2149 ((param_lance_mode && (erip->lance_mode_enable)) ?
2155 ((param_lance_mode && (erip->lance_mode_enable)) ?
2159 ENABLE_TXDMA(erip);
2160 ENABLE_TXMAC(erip);
2162 HSTAT(erip, tx_inits);
2163 erip->flags |= ERI_TXINIT;
2167 eri_unallocthings(struct eri *erip)
2172 flag = erip->alloc_flag;
2175 (void) ddi_dma_unbind_handle(erip->md_h);
2178 ddi_dma_mem_free(&erip->mdm_h);
2179 erip->rmdp = NULL;
2180 erip->eri_tmdp = NULL;
2184 ddi_dma_free_handle(&erip->md_h);
2186 (void) eri_freebufs(erip);
2189 for (i = 0; i < erip->rcv_handle_cnt; i++)
2190 ddi_dma_free_handle(&erip->ndmarh[i]);
2193 (void) dvma_release(erip->eri_dvmarh);
2194 erip->eri_dvmarh = NULL;
2198 (void) ddi_dma_unbind_handle(erip->tbuf_handle);
2199 erip->tbuf_ioaddr = 0;
2203 ddi_dma_mem_free(&erip->tbuf_acch);
2204 erip->tbuf_kaddr = NULL;
2208 ddi_dma_free_handle(&erip->tbuf_handle);
2209 erip->tbuf_handle = NULL;
2250 eri_init(struct eri *erip)
2264 ASSERT(erip != NULL);
2266 if (erip->flags & ERI_SUSPENDED) {
2271 mutex_enter(&erip->intrlock);
2272 eri_stop_timer(erip); /* acquire linklock */
2273 mutex_enter(&erip->xmitlock);
2274 erip->flags &= (ERI_DLPI_LINKUP | ERI_STARTED);
2275 erip->wantw = B_FALSE;
2276 HSTAT(erip, inits);
2277 erip->txhung = 0;
2279 if ((erip->stats.inits > 1) && (erip->init_macregs == 0))
2280 eri_savecntrs(erip);
2282 mutex_enter(&erip->xcvrlock);
2283 if (!param_linkup || erip->linkcheck) {
2284 if (!erip->linkcheck)
2286 (void) eri_stop(erip);
2288 if (!(erip->flags & ERI_DLPI_LINKUP) || !param_linkup) {
2289 erip->flags |= ERI_DLPI_LINKUP;
2290 eri_mif_poll(erip, MIF_POLL_STOP);
2291 (void) eri_new_xcvr(erip);
2292 ERI_DEBUG_MSG1(erip, XCVR_MSG, "New transceiver detected.");
2298 if (eri_reset_xcvr(erip)) {
2299 ERI_FAULT_MSG1(erip, SEVERITY_NONE,
2301 mutex_exit(&erip->xcvrlock);
2305 if (erip->stats.link_up == LINK_STATE_UP)
2308 erip->flags |= (ERI_RUNNING | ERI_INITIALIZED);
2310 erip->stats.link_up = LINK_STATE_DOWN;
2311 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2319 eri_mif_poll(erip, MIF_POLL_START);
2322 mutex_exit(&erip->xcvrlock);
2327 if (erip->global_reset_issued) {
2328 if (erip->global_reset_issued == 2) { /* fast path */
2333 eri_init_txbufs(erip);
2335 eri_update_rxbufs(erip);
2337 init_stat = eri_allocthings(erip);
2341 if (eri_freebufs(erip))
2346 eri_init_txbufs(erip);
2347 if (eri_init_rxbufs(erip))
2360 if (eri_txmac_disable(erip)) {
2361 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2364 erip->stats.link_up = LINK_STATE_DOWN;
2365 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2370 if (eri_rxmac_disable(erip)) {
2371 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2374 erip->stats.link_up = LINK_STATE_DOWN;
2375 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2380 eri_init_macregs_generic(erip);
2402 erip->tx_int_me = 0;
2408 if (erip->global_reset_issued) {
2413 if (eri_init_txregs(erip))
2421 if (eri_init_rxregs(erip))
2436 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2437 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2441 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2442 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2448 if (erip->ngu_enable)
2451 ((param_lance_mode && (erip->lance_mode_enable)) ?
2458 ((param_lance_mode && (erip->lance_mode_enable)) ?
2462 if (erip->pauseRX)
2464 if (erip->pauseTX)
2487 if (erip->flags & ERI_MACLOOPBACK) {
2494 ENABLE_MAC(erip);
2495 erip->flags |= (ERI_RUNNING | ERI_INITIALIZED |
2497 mac_tx_update(erip->mh);
2498 erip->global_reset_issued = 0;
2501 eri_xcvr_force_mode(erip, &link_timeout);
2506 eri_unallocthings(erip);
2508 mutex_exit(&erip->xmitlock);
2509 eri_start_timer(erip, eri_check_link, link_timeout);
2510 mutex_exit(&erip->intrlock);
2513 mac_link_update(erip->mh, linkupdate);
2515 ret = (erip->flags & ERI_RUNNING) ? B_TRUE : B_FALSE;
2517 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
2522 ASSERT(!MUTEX_HELD(&erip->linklock));
2530 eri_burstsize(struct eri *erip)
2534 if (ddi_dma_alloc_handle(erip->dip, &dma_attr, DDI_DMA_DONTWAIT,
2538 erip->burstsizes = ddi_dma_burstsizes(handle);
2541 if (erip->burstsizes)
2551 eri_uninit(struct eri *erip)
2558 ERI_DELAY((erip->tcurp == erip->tnextp), ERI_DRAINTIME);
2560 mutex_enter(&erip->intrlock);
2561 eri_stop_timer(erip); /* acquire linklock */
2562 mutex_enter(&erip->xmitlock);
2563 mutex_enter(&erip->xcvrlock);
2564 eri_mif_poll(erip, MIF_POLL_STOP);
2565 erip->flags &= ~ERI_DLPI_LINKUP;
2566 mutex_exit(&erip->xcvrlock);
2568 needind = !erip->linkcheck;
2569 (void) eri_stop(erip);
2570 erip->flags &= ~ERI_RUNNING;
2572 mutex_exit(&erip->xmitlock);
2573 eri_start_timer(erip, eri_check_link, 0);
2574 mutex_exit(&erip->intrlock);
2577 mac_link_update(erip->mh, LINK_STATE_DOWN);
2589 eri_allocthings(struct eri *erip)
2604 if (erip->rmdp)
2607 erip->alloc_flag = 0;
2615 rval = ddi_dma_alloc_handle(erip->dip, &desc_dma_attr,
2616 DDI_DMA_DONTWAIT, 0, &erip->md_h);
2620 erip->alloc_flag |= ERI_DESC_HANDLE_ALLOC;
2622 rval = ddi_dma_mem_alloc(erip->md_h, size, &erip->dev_attr,
2624 (caddr_t *)&erip->iopbkbase, &real_len, &erip->mdm_h);
2628 erip->alloc_flag |= ERI_DESC_MEM_ALLOC;
2630 rval = ddi_dma_addr_bind_handle(erip->md_h, NULL,
2631 (caddr_t)erip->iopbkbase, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2632 DDI_DMA_DONTWAIT, 0, &erip->md_c, &cookiec);
2637 erip->alloc_flag |= ERI_DESC_MEM_MAP;
2642 erip->iopbiobase = erip->md_c.dmac_address;
2644 a = erip->iopbkbase;
2646 erip->rmdp = (struct rmd *)a;
2648 erip->eri_tmdp = (struct eri_tmd *)a;
2666 (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_RPENDING * 2),
2667 &erip->eri_dvmarh)) == DDI_SUCCESS) {
2668 erip->alloc_flag |= ERI_RCV_DVMA_ALLOC;
2670 erip->eri_dvmarh = NULL;
2673 rval = ddi_dma_alloc_handle(erip->dip,
2675 0, &erip->ndmarh[i]);
2678 ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2685 erip->rcv_handle_cnt = i;
2688 erip->alloc_flag |= ERI_RCV_HANDLE_ALLOC;
2701 * So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr)
2704 if (ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, DDI_DMA_DONTWAIT,
2705 0, &erip->tbuf_handle) != DDI_SUCCESS) {
2706 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2710 erip->alloc_flag |= ERI_XBUFS_HANDLE_ALLOC;
2712 if (ddi_dma_mem_alloc(erip->tbuf_handle, size, &buf_attr,
2713 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &erip->tbuf_kaddr,
2714 &real_len, &erip->tbuf_acch) != DDI_SUCCESS) {
2715 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2719 erip->alloc_flag |= ERI_XBUFS_KMEM_ALLOC;
2720 if (ddi_dma_addr_bind_handle(erip->tbuf_handle, NULL,
2721 erip->tbuf_kaddr, size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2725 erip->tbuf_ioaddr = dma_cookie.dmac_address;
2726 erip->alloc_flag |= ERI_XBUFS_KMEM_DMABIND;
2733 erip->rmdlimp = &((erip->rmdp)[ERI_RPENDING]);
2734 erip->eri_tmdlimp = &((erip->eri_tmdp)[ERI_TPENDING]);
2739 bzero((caddr_t)erip->rmblkp, sizeof (erip->rmblkp));
2750 struct eri *erip = (void *)arg;
2763 mutex_enter(&erip->intrlock);
2772 (erip->flags & ERI_RUNNING)) {
2778 if (erip->flags & ERI_INITIALIZED) {
2779 erip->flags &= ~ERI_INITIALIZED;
2789 ERI_DEBUG_MSG2(erip, DIAG_MSG,
2794 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF Config = 0x%X",
2796 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF imask = 0x%X",
2798 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:INT imask = 0x%X",
2800 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:alias %X",
2803 linkupdate = eri_check_link_noind(erip);
2805 mutex_exit(&erip->intrlock);
2808 mac_link_update(erip->mh, linkupdate);
2814 if (!(erip->flags & ERI_RUNNING)) {
2815 mutex_exit(&erip->intrlock);
2816 eri_uninit(erip);
2821 ERI_DEBUG_MSG2(erip, INTR_MSG,
2823 (void) eri_fatal_err(erip, erisbits);
2826 if (erip->rx_reset_issued) {
2827 erip->rx_reset_issued = 0;
2828 (void) eri_init_rx_channel(erip);
2829 mutex_exit(&erip->intrlock);
2832 erip->stats.link_up = LINK_STATE_DOWN;
2833 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2834 DISABLE_MAC(erip);
2835 mutex_exit(&erip->intrlock);
2836 (void) eri_init(erip);
2842 ERI_DEBUG_MSG2(erip, INTR_MSG,
2844 (void) eri_nonfatal_err(erip, erisbits);
2845 if (erip->linkcheck) {
2846 mutex_exit(&erip->intrlock);
2847 (void) eri_init(erip);
2854 ERI_DEBUG_MSG2(erip, XCVR_MSG,
2855 "eri_intr:MIF Interrupt:mii_status %X", erip->mii_status);
2856 eri_stop_timer(erip); /* acquire linklock */
2858 mutex_enter(&erip->xmitlock);
2859 mutex_enter(&erip->xcvrlock);
2862 eri_mif_poll(erip, MIF_POLL_STOP);
2863 ERI_DEBUG_MSG3(erip, XCVR_MSG,
2865 mif_status, erip->mii_status);
2866 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
2867 linkupdate = eri_mif_check(erip, stat, stat);
2871 eri_mif_poll(erip, MIF_POLL_STOP);
2872 linkupdate = eri_mif_check(erip, (uint16_t)mif_status,
2875 eri_mif_poll(erip, MIF_POLL_START);
2876 mutex_exit(&erip->xcvrlock);
2877 mutex_exit(&erip->xmitlock);
2879 if (!erip->openloop_autoneg)
2880 eri_start_timer(erip, eri_check_link,
2883 eri_start_timer(erip, eri_check_link,
2887 ERI_DEBUG_MSG2(erip, INTR_MSG,
2892 (erip->tx_cur_cnt >= tx_interrupt_rate)) {
2893 mutex_enter(&erip->xmitlock);
2894 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
2897 macupdate |= eri_reclaim(erip, erip->tx_completion);
2899 erip->wantw = B_FALSE;
2901 mutex_exit(&erip->xmitlock);
2909 uint32_t rmdmax_mask = erip->rmdmax_mask;
2911 rmdpbase = erip->rmdp;
2912 rmdi = erip->rx_completion;
2918 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2927 mp = eri_read_dma(erip, rmdp, rmdi, flags);
2950 erip->rx_kick =
2953 PUT_ERXREG(rx_kick, erip->rx_kick);
2960 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2965 erip->rx_completion = rmdi;
2968 mutex_exit(&erip->intrlock);
2971 mac_rx(erip->mh, NULL, head);
2974 mac_tx_update(erip->mh);
2977 mac_link_update(erip->mh, linkupdate);
2995 eri_fatal_err(struct eri *erip, uint32_t erisbits)
3001 erip->rx_reset_issued = 1;
3002 HSTAT(erip, rxtag_err);
3004 erip->global_reset_issued = 1;
3007 HSTAT(erip, pci_error_int);
3009 HSTAT(erip, parity_error);
3011 HSTAT(erip, unknown_fatal);
3018 if (pci_error_int && erip->pci_config_handle) {
3019 pci_status = pci_config_get16(erip->pci_config_handle,
3021 ERI_DEBUG_MSG2(erip, FATAL_ERR_MSG, "Bus Error Status %x",
3024 HSTAT(erip, pci_data_parity_err);
3026 HSTAT(erip, pci_signal_target_abort);
3028 HSTAT(erip, pci_rcvd_target_abort);
3030 HSTAT(erip, pci_rcvd_master_abort);
3032 HSTAT(erip, pci_signal_system_err);
3034 HSTAT(erip, pci_signal_system_err);
3038 pci_config_put16(erip->pci_config_handle, PCI_CONF_STAT,
3048 eri_nonfatal_err(struct eri *erip, uint32_t erisbits)
3054 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
3056 erip->stats.pmcap = ERI_PMCAP_NONE;
3062 erip->linkcheck = 1;
3063 HSTAT(erip, txmac_urun);
3064 HSTAT(erip, oerrors);
3068 erip->linkcheck = 1;
3069 HSTAT(erip, txmac_maxpkt_err);
3070 HSTAT(erip, oerrors);
3073 erip->stats.collisions += 0x10000;
3077 erip->stats.excessive_coll += 0x10000;
3081 erip->stats.late_coll += 0x10000;
3085 erip->stats.first_coll += 0x10000;
3089 HSTAT(erip, defer_timer_exp);
3093 erip->stats.peak_attempt_cnt += 0x100;
3098 ERI_DEBUG_MSG1(erip, NONFATAL_MSG, "rx dropped/no free desc");
3101 erip->linkcheck = 1;
3103 HSTAT(erip, no_free_rx_desc);
3104 HSTAT(erip, ierrors);
3110 eri_stop_timer(erip); /* acquire linklock */
3111 erip->check_rmac_hang ++;
3112 erip->check2_rmac_hang = 0;
3113 erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
3114 erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
3116 ERI_DEBUG_MSG5(erip, NONFATAL_MSG,
3118 erip->check_rmac_hang,
3123 eri_start_timer(erip, eri_check_link,
3127 erip->linkcheck = 1;
3129 HSTAT(erip, rx_overflow);
3130 HSTAT(erip, ierrors);
3134 erip->stats.rx_align_err += 0x10000;
3135 erip->stats.ierrors += 0x10000;
3139 erip->stats.rx_crc_err += 0x10000;
3140 erip->stats.ierrors += 0x10000;
3144 erip->stats.rx_length_err += 0x10000;
3145 erip->stats.ierrors += 0x10000;
3149 erip->stats.rx_code_viol_err += 0x10000;
3150 erip->stats.ierrors += 0x10000;
3160 ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
3163 HSTAT(erip, pause_rxcount);
3164 erip->stats.pause_time_count += pause_time;
3168 HSTAT(erip, pause_oncount);
3169 erip->stats.pausing = 1;
3173 HSTAT(erip, pause_offcount);
3174 erip->stats.pausing = 0;
3185 eri_savecntrs(struct eri *erip)
3192 HSTATN(erip, rx_crc_err, fecnt);
3196 HSTATN(erip, rx_align_err, aecnt);
3200 HSTATN(erip, rx_length_err, lecnt);
3204 HSTATN(erip, rx_code_viol_err, rxcv);
3208 HSTATN(erip, late_coll, ltcnt);
3211 erip->stats.collisions += (GET_MACREG(nccnt) + ltcnt);
3215 HSTATN(erip, excessive_coll, excnt);
3219 HSTATN(erip, first_coll, fccnt);
3226 HSTATN(erip, ierrors, (fecnt + aecnt + lecnt));
3227 HSTATN(erip, oerrors, (ltcnt + excnt));
3268 send_bit(struct eri *erip, uint32_t x)
3279 get_bit_std(struct eri *erip)
3293 #define SEND_BIT(x) send_bit(erip, x)
3294 #define GET_BIT_STD(x) x = get_bit_std(erip)
3298 eri_bb_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3304 phyad = erip->phyad;
3305 (void) eri_bb_force_idle(erip);
3323 eri_bb_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3333 phyad = erip->phyad;
3334 (void) eri_bb_force_idle(erip);
3362 eri_bb_force_idle(struct eri *erip)
3382 eri_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3390 if (!erip->frame_enable)
3391 return (eri_bb_mii_read(erip, regad, datap));
3393 phyad = erip->phyad;
3396 eri_errror(erip->dip, "Frame Register used for MII");
3400 ERI_DEBUG_MSG3(erip, FRM_MSG,
3418 eri_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3422 if (!erip->frame_enable) {
3423 eri_bb_mii_write(erip, regad, data);
3427 phyad = erip->phyad;
3448 eri_send_msg(struct eri *erip, mblk_t *mp)
3467 HSTAT(erip, tnocar);
3468 HSTAT(erip, oerrors);
3492 HSTAT(erip, oerrors);
3500 BUMP_OutNUcast(erip, mp->b_rptr);
3502 mutex_enter(&erip->xmitlock);
3504 tbasep = erip->eri_tmdp;
3507 tmdp = erip->tnextp;
3509 if (tmdp >= erip->tcurp) /* check notmds */
3510 i = tmdp - erip->tcurp;
3512 i = tmdp + ERI_TPENDING - erip->tcurp;
3517 if (i >= (ERI_TPENDING >> 1) && !(erip->starts & 0x7)) {
3520 if (!erip->tx_int_me) {
3523 erip->tx_int_me = 1;
3530 ptr = erip->tbuf_kaddr + offset;
3547 c.dmac_address = erip->tbuf_ioaddr + offset;
3548 (void) ddi_dma_sync(erip->tbuf_handle,
3557 ERI_SYNCIOPB(erip, tmdp, sizeof (struct eri_tmd),
3560 tmdp = NEXTTMD(erip, tmdp);
3561 erip->tx_cur_cnt++;
3563 erip->tx_kick = tmdp - tbasep;
3564 PUT_ETXREG(tx_kick, erip->tx_kick);
3565 erip->tnextp = tmdp;
3567 erip->starts++;
3569 if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3570 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3572 (void) eri_reclaim(erip, erip->tx_completion);
3574 mutex_exit(&erip->xmitlock);
3579 HSTAT(erip, notmds);
3580 erip->wantw = B_TRUE;
3582 mutex_exit(&erip->xmitlock);
3590 struct eri *erip = arg;
3596 if (!eri_send_msg(erip, mp)) {
3610 eri_reclaim(struct eri *erip, uint32_t tx_completion)
3619 tbasep = erip->eri_tmdp;
3620 tlimp = erip->eri_tmdlimp;
3622 tmdp = erip->tcurp;
3631 HSTAT(erip, opackets64);
3633 HSTATN(erip, obytes64, (flags & ERI_TMD_BUFSIZE));
3639 erip->tcurp = tmdp;
3640 erip->tx_cur_cnt -= reclaimed;
3642 return (erip->wantw && reclaimed ? B_TRUE : B_FALSE);
3648 eri_read_dma(struct eri *erip, volatile struct rmd *rmdp,
3660 bp = erip->rmblkp[rmdi];
3678 HSTAT(erip, rx_bad_pkts);
3680 HSTAT(erip, ierrors);
3682 HSTAT(erip, rx_runt);
3684 HSTAT(erip, rx_toolong_pkts);
3686 HSTAT(erip, drop);
3689 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3731 * then put bp in our read service queue erip->ipq, if it exists
3740 (void) ddi_dma_sync(erip->ndmarh[rmdi], 0,
3746 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3755 ERI_PROCESS_READ(erip, nbp, sum);
3757 ERI_PROCESS_READ(erip, nbp);
3767 HSTAT(erip, ierrors);
3768 HSTAT(erip, allocbfail);
3769 HSTAT(erip, norcvbuf);
3772 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3774 ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3786 (void) ddi_dma_unbind_handle(erip->ndmarh[rmdi]);
3787 (void) ddi_dma_addr_bind_handle(erip->ndmarh[rmdi],
3792 erip->rmblkp[rmdi] = nbp;
3794 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3804 ERI_PROCESS_READ(erip, bp, sum);
3806 ERI_PROCESS_READ(erip, bp);
3816 HSTAT(erip, ierrors);
3817 HSTAT(erip, allocbfail);
3818 HSTAT(erip, norcvbuf);
3821 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3823 ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3833 eri_init_xfer_params(struct eri *erip)
3838 dip = erip->dip;
3841 erip->param_arr[i] = param_arr[i];
3843 erip->xmit_dma_mode = 0;
3844 erip->rcv_dma_mode = 0;
3845 erip->mifpoll_enable = mifpoll_enable;
3846 erip->lance_mode_enable = lance_mode;
3847 erip->frame_enable = 1;
3848 erip->ngu_enable = ngu_enable;
3850 if (!erip->g_nd && !eri_param_register(erip,
3851 erip->param_arr, A_CNT(param_arr))) {
3852 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
3948 erip->link_pulse_disabled = 1;
3950 erip->link_pulse_disabled = 1;
3952 eri_statinit(erip);
3958 eri_process_ndd_ioctl(struct eri *erip, queue_t *wq, mblk_t *mp, int cmd)
3990 if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4036 if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4045 ERI_DEBUG_MSG2(erip, NDD_MSG,
4048 erip->stats.link_up = LINK_STATE_DOWN;
4049 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4050 (void) eri_init(erip);
4057 erip->stats.link_up = LINK_STATE_DOWN;
4058 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4059 (void) eri_init(erip);
4068 erip->stats.link_up = LINK_STATE_DOWN;
4069 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4070 (void) eri_init(erip);
4081 struct eri *erip;
4086 erip = (struct eri *)ksp->ks_private;
4097 mutex_enter(&erip->xmitlock);
4098 if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
4099 erip->tx_completion =
4101 macupdate |= eri_reclaim(erip, erip->tx_completion);
4103 mutex_exit(&erip->xmitlock);
4105 mac_tx_update(erip->mh);
4107 eri_savecntrs(erip);
4109 esp = &erip->stats;
4155 eri_statinit(struct eri *erip)
4160 if ((ksp = kstat_create("eri", erip->instance, "driver_info", "net",
4163 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4168 erip->ksp = ksp;
4244 ksp->ks_private = (void *) erip;
4255 eri_param_cleanup(struct eri *erip)
4257 if (erip->g_nd)
4258 (void) eri_nd_free(&erip->g_nd);
4321 eri_param_register(struct eri *erip, param_t *eripa, int cnt)
4345 if (!eri_nd_load(&erip->g_nd, eripa->param_name + 1,
4347 (void) eri_nd_free(&erip->g_nd);
4576 eri_debug_msg(const char *file, int line, struct eri *erip,
4589 if (erip)
4592 ddi_driver_name(erip->dip), erip->instance,
4606 eri_fault_msg(struct eri *erip, uint_t severity, msg_t type,
4616 if (erip == NULL) {
4622 cmn_err(CE_WARN, "%s%d : %s", ddi_driver_name(erip->dip),
4623 erip->instance, msg_buffer);
4626 cmn_err(CE_CONT, "?%s%d : %s", ddi_driver_name(erip->dip),
4627 erip->instance, msg_buffer);
4630 cmn_err(CE_NOTE, "^%s%d : %s", ddi_driver_name(erip->dip),
4631 erip->instance, msg_buffer);
4634 cmn_err(CE_NOTE, "!%s%d : %s", ddi_driver_name(erip->dip),
4635 erip->instance, msg_buffer);
4638 cmn_err(CE_CONT, "%s%d : %s", ddi_driver_name(erip->dip),
4639 erip->instance, msg_buffer);
4655 eri_stop_timer(struct eri *erip)
4658 mutex_enter(&erip->linklock);
4659 if (erip->timerid) {
4660 erip->flags |= ERI_NOTIMEOUTS; /* prevent multiple timeout */
4661 id = erip->timerid;
4662 erip->timerid = 0; /* prevent other thread do untimeout */
4663 mutex_exit(&erip->linklock); /* no mutex across untimeout() */
4666 mutex_enter(&erip->linklock); /* acquire mutex again */
4667 erip->flags &= ~ERI_NOTIMEOUTS;
4675 eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec)
4678 if (!(erip->flags & ERI_NOTIMEOUTS) &&
4679 (erip->flags & ERI_RUNNING)) {
4680 erip->timerid = timeout(func, (caddr_t)erip,
4685 mutex_exit(&erip->linklock);
4689 eri_new_xcvr(struct eri *erip)
4695 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4697 erip->stats.pmcap = ERI_PMCAP_NONE;
4701 ERI_DEBUG_MSG2(erip, MIF_MSG, "cfg value = %X", cfg);
4705 ERI_DEBUG_MSG1(erip, PHY_MSG, "Found External XCVR");
4717 eri_mii_write(erip, ERI_PHY_BMCR,
4726 erip->phyad = ERI_EXTERNAL_PHYAD;
4728 erip->mif_config &= ~ERI_MIF_CFGPD;
4729 erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4730 erip->mif_config |= ERI_MIF_CFGPS;
4731 PUT_MIFREG(mif_cfg, erip->mif_config);
4736 ERI_DEBUG_MSG1(erip, PHY_MSG, "Found Internal XCVR");
4749 eri_mii_write(erip, ERI_PHY_BMCR,
4758 erip->phyad = ERI_INTERNAL_PHYAD;
4760 erip->mif_config &= ~ERI_MIF_CFGPD;
4761 erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4762 erip->mif_config &= ~ERI_MIF_CFGPS;
4763 PUT_MIFREG(mif_cfg, erip->mif_config);
4771 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4774 erip->xcvr_status = PHY_LINK_DOWN;
4777 if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4778 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4780 erip->stats.pmcap = ERI_PMCAP_4MHZ;
4790 eri_check_link(struct eri *erip)
4792 link_state_t linkupdate = eri_check_link_noind(erip);
4795 mac_link_update(erip->mh, linkupdate);
4808 eri_check_link_noind(struct eri *erip)
4814 eri_stop_timer(erip); /* acquire linklock */
4816 mutex_enter(&erip->xmitlock);
4817 mutex_enter(&erip->xcvrlock);
4818 eri_mif_poll(erip, MIF_POLL_STOP);
4820 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4821 mif_ints = erip->mii_status ^ stat;
4823 if (erip->openloop_autoneg) {
4824 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4825 ERI_DEBUG_MSG3(erip, XCVR_MSG,
4827 stat, erip->mii_status);
4828 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
4830 (erip->openloop_autoneg < 2)) {
4836 erip->stats.ifspeed = 10;
4843 erip->stats.ifspeed = 100;
4845 ERI_DEBUG_MSG3(erip, XCVR_MSG,
4849 erip->openloop_autoneg ++;
4850 eri_mii_write(erip, ERI_PHY_BMCR, control);
4853 erip->openloop_autoneg = 0;
4854 linkupdate = eri_mif_check(erip, stat, stat);
4855 if (erip->openloop_autoneg)
4858 eri_mif_poll(erip, MIF_POLL_START);
4859 mutex_exit(&erip->xcvrlock);
4860 mutex_exit(&erip->xmitlock);
4862 eri_start_timer(erip, eri_check_link, link_timeout);
4866 linkupdate = eri_mif_check(erip, mif_ints, stat);
4867 eri_mif_poll(erip, MIF_POLL_START);
4868 mutex_exit(&erip->xcvrlock);
4869 mutex_exit(&erip->xmitlock);
4875 if ((erip->flags & ERI_RUNNING) && param_linkup) {
4876 if (erip->check_rmac_hang) {
4877 ERI_DEBUG_MSG5(erip,
4880 erip->check_rmac_hang,
4885 erip->check_rmac_hang = 0;
4886 erip->check2_rmac_hang ++;
4888 erip->rxfifo_wr_ptr_c = GET_ERXREG(rxfifo_wr_ptr);
4889 erip->rxfifo_rd_ptr_c = GET_ERXREG(rxfifo_rd_ptr);
4891 eri_start_timer(erip, eri_check_link,
4896 if (erip->check2_rmac_hang) {
4897 ERI_DEBUG_MSG5(erip,
4900 erip->check2_rmac_hang,
4905 erip->check2_rmac_hang = 0;
4907 erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
4908 erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
4912 ((erip->rxfifo_wr_ptr_c == erip->rxfifo_rd_ptr_c) ||
4913 ((erip->rxfifo_rd_ptr == erip->rxfifo_rd_ptr_c) &&
4914 (erip->rxfifo_wr_ptr == erip->rxfifo_wr_ptr_c)))) {
4915 ERI_DEBUG_MSG1(erip,
4919 HSTAT(erip, rx_hang);
4920 erip->linkcheck = 1;
4922 eri_start_timer(erip, eri_check_link,
4924 (void) eri_init(erip);
4935 if ((erip->flags & ERI_RUNNING) && param_linkup &&
4936 (eri_check_txhung(erip))) {
4937 HSTAT(erip, tx_hang);
4939 erip->linkcheck = 1;
4940 eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4941 (void) eri_init(erip);
4947 if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4948 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4950 erip->stats.pmcap = ERI_PMCAP_4MHZ;
4952 ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
4953 "eri_check_link: PMCAP %d", erip->stats.pmcap);
4957 eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4959 eri_start_timer(erip, eri_check_link, ERI_LINKCHECK_TIMER);
4964 eri_mif_check(struct eri *erip, uint16_t mif_ints, uint16_t mif_data)
4971 ERI_DEBUG_MSG4(erip, XCVR_MSG, "eri_mif_check: mif_mask: %X, %X, %X",
4972 erip->mif_mask, mif_ints, mif_data);
4974 mif_ints &= ~erip->mif_mask;
4975 erip->mii_status = mif_data;
4983 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4985 if (eri_new_xcvr(erip)) {
4990 (void) eri_reset_xcvr(erip);
4999 ERI_DEBUG_MSG3(erip, PHY_MSG,
5005 ERI_DEBUG_MSG1(erip, PHY_MSG, "Auto-negotiation interrupt.");
5011 erip->mif_mask |= PHY_BMSR_ANC;
5012 erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5013 (void) eri_mii_read(erip, ERI_PHY_ANER, &aner);
5016 ERI_DEBUG_MSG1(erip, XCVR_MSG,
5021 ERI_DEBUG_MSG1(erip, XCVR_MSG,
5023 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5032 erip->stats.ifspeed = 100;
5039 erip->stats.ifspeed = 10;
5041 ERI_FAULT_MSG1(erip, SEVERITY_NONE,
5047 (void) eri_mii_write(erip, ERI_PHY_BMCR, control);
5051 erip->openloop_autoneg = 1;
5054 (void) eri_mii_read(erip, ERI_PHY_ANLPAR, &anlpar);
5055 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5058 ERI_DEBUG_MSG2(erip, XCVR_MSG, "an_common = 0x%X", an_common);
5062 erip->stats.ifspeed = 100;
5067 erip->stats.ifspeed = 10;
5073 ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
5082 ERI_DEBUG_MSG2(erip, PHY_MSG,
5084 ERI_DEBUG_MSG2(erip, PHY_MSG,
5092 ERI_DEBUG_MSG1(erip, PHY_MSG, "Link Up");
5097 eri_mii_write(erip, 31, 0x8000);
5098 (void) eri_mii_read(erip, 0, &old_mintrans);
5099 eri_mii_write(erip, 0, 0x00F1);
5100 eri_mii_write(erip, 31, 0x0000);
5105 eri_init_txmac(erip);
5107 erip->stats.link_up = LINK_STATE_UP;
5109 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5111 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5115 ERI_DEBUG_MSG1(erip, PHY_MSG, "Link down.");
5117 erip->stats.link_up = LINK_STATE_DOWN;
5118 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5127 ERI_DEBUG_MSG1(erip, PHY_MSG,
5133 eri_mii_write(erip, 31, 0x8000);
5134 (void) eri_mii_read(erip, 0,
5136 eri_mii_write(erip, 0, 0x00F1);
5137 eri_mii_write(erip, 31, 0x0000);
5142 eri_init_txmac(erip);
5145 erip->stats.link_up = LINK_STATE_UP;
5147 erip->stats.link_duplex =
5150 erip->stats.link_duplex =
5159 ERI_DEBUG_MSG1(erip, PHY_MSG,
5162 erip->stats.link_up = LINK_STATE_DOWN;
5163 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5173 ERI_DEBUG_MSG1(erip, PHY_MSG,
5175 erip->openloop_autoneg = 0;
5184 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5186 eri_mii_write(erip, ERI_PHY_BMCR, control);
5190 ERI_DEBUG_MSG1(erip, PHY_MSG, "Jabber detected.");
5191 HSTAT(erip, jab);
5196 (void) eri_reset_xcvr(erip);
5204 eri_reset_xcvr(struct eri *erip)
5217 erip->ifspeed_old = erip->stats.ifspeed;
5225 erip->openloop_autoneg = 0;
5230 eri_mii_write(erip, ERI_PHY_BMCR, PHY_BMCR_RESET);
5237 if (eri_mii_read(erip, ERI_PHY_BMCR, &control) == 1) {
5239 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5245 ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
5251 ERI_DEBUG_MSG2(erip, AUTOCONFIG_MSG,
5255 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5256 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5257 (void) eri_mii_read(erip, ERI_PHY_IDR1, &idr1);
5258 (void) eri_mii_read(erip, ERI_PHY_IDR2, &idr2);
5260 ERI_DEBUG_MSG4(erip, XCVR_MSG,
5301 ERI_DEBUG_MSG2(erip, XCVR_MSG, "anar = %x", anar);
5302 eri_mii_write(erip, ERI_PHY_ANAR, anar);
5308 eri_mii_write(erip, ERI_PHY_BMCR,
5322 erip->mif_mask = (uint16_t)(~PHY_BMSR_RES1);
5331 ERI_DEBUG_MSG5(erip, XCVR_MSG, "eri_reset_xcvr: %d %d %d %d",
5335 ERI_DEBUG_MSG3(erip, XCVR_MSG,
5339 erip->mif_mask &= ~PHY_BMSR_JABDET;
5342 (erip->link_pulse_disabled)) {
5345 (void) eri_mii_read(erip, ERI_PHY_NICR, &nicr);
5347 eri_mii_write(erip, ERI_PHY_NICR, nicr);
5349 erip->stats.link_up = LINK_STATE_UP;
5351 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5353 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5361 /* eri_mii_write(erip, ERI_PHY_BMCR, control); */
5366 erip->mif_mask &= ~PHY_BMSR_ANC;
5371 eri_mii_write(erip, ERI_PHY_BMCR, control & ~PHY_BMCR_ANE);
5378 eri_mii_write(erip, ERI_PHY_BMCR, control);
5383 erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5399 erip->stats.ifspeed = 100;
5403 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5405 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5416 erip->stats.ifspeed = 10;
5420 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5422 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5425 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5433 ERI_DEBUG_MSG4(erip, PHY_MSG,
5437 eri_mii_write(erip, ERI_PHY_BMCR, control);
5443 * eri_mii_write(erip, ERI_PHY_BMCR, control);
5448 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5449 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5450 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5452 ERI_DEBUG_MSG4(erip, PHY_MSG,
5465 eri_xcvr_force_mode(struct eri *erip, uint32_t *link_timeout)
5468 if (!param_autoneg && !param_linkup && (erip->stats.ifspeed == 10) &&
5474 if (!param_autoneg && !param_linkup && (erip->ifspeed_old == 10) &&
5479 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_LOG_MSG,
5486 eri_mif_poll(struct eri *erip, soft_mif_enable_t enable)
5489 if (erip->mifpoll_enable && !erip->openloop_autoneg) {
5490 erip->mif_config |= ERI_MIF_CFGPE;
5491 PUT_MIFREG(mif_cfg, erip->mif_config);
5495 PUT_MIFREG(mif_imask, erip->mif_mask);
5498 erip->mif_config &= ~ERI_MIF_CFGPE;
5499 PUT_MIFREG(mif_cfg, erip->mif_config);
5505 ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF Config = 0x%X",
5507 ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF imask = 0x%X",
5509 ERI_DEBUG_MSG2(erip, XCVR_MSG, "INT imask = 0x%X",
5511 ERI_DEBUG_MSG1(erip, XCVR_MSG, "<== mif_poll");
5518 eri_check_txhung(struct eri *erip)
5522 mutex_enter(&erip->xmitlock);
5523 if (erip->flags & ERI_RUNNING)
5524 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
5526 macupdate |= eri_reclaim(erip, erip->tx_completion);
5529 if ((erip->tcurp != erip->tnextp) &&
5530 (erip->stats.opackets64 == erip->erisave.reclaim_opackets) &&
5531 (erip->stats.collisions == erip->erisave.starts))
5532 erip->txhung++;
5534 erip->txhung = 0;
5536 erip->erisave.reclaim_opackets = erip->stats.opackets64;
5537 erip->erisave.starts = erip->stats.collisions;
5538 mutex_exit(&erip->xmitlock);
5541 mac_tx_update(erip->mh);
5543 return (erip->txhung >= eri_txhung_limit);