eri.c revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* SunOS MT STREAMS ERI(PCI) 10/100 Mb Ethernet Device Driver
*/
#include <sys/ethernet.h>
#include <sys/eri_common.h>
#ifdef DEBUG
#endif
/*
* **** Function Prototypes *****
*/
/*
* Entry points (man9e)
*/
#ifdef ERI_SERVICE_ROUTINE
#endif
static void eri_wenable(struct eri *);
/*
*/
/*
* Initialization Functions
*/
static int eri_allocthings(struct eri *);
static int eri_init_xfer_params(struct eri *);
static void eri_statinit(struct eri *);
static int eri_burstsize(struct eri *);
static char *eri_ether_sprintf(struct ether_addr *);
static void eri_init_rx(struct eri *);
#ifdef LATER_SPLIT_TX_RX
static void eri_init_tx_channel(struct eri *);
#endif
static void eri_init_txmac(struct eri *);
/*
* Un-init Functions
*/
static int eri_freebufs(struct eri *);
/*
* Transceiver (xcvr) Functions
*/
#ifdef XCVR
static int eri_init_xcvr_info(struct eri *, int);
#endif
static int eri_reset_xcvr(struct eri *);
#endif
static void eri_check_link(struct eri *);
static void eri_display_link_status(struct eri *);
/*
* Reset Functions
*/
/*
* Error Functions
*/
#ifdef ERI_TX_HUNG
static int eri_check_txhung(struct eri *);
#endif
/*
* Hardening Functions
*/
msg_t, char *, ...);
/*
* Misc Functions
*/
static void eri_savecntrs(struct eri *);
static void eri_bb_force_idle(struct eri *);
struct ether_addr *);
struct ether_addr *);
struct ether_addr *, int, uint32_t);
/*
* Utility Functions
*/
static long eri_strtol(char *, char **, int);
/*
* Functions to support ndd
*/
static void eri_param_cleanup(struct eri *);
static void eri_cable_down_msg(struct eri *);
/*
* DLPI Functions
*/
static void eri_dodetach(struct eristr *);
static void eri_setipq(struct eri *);
/*
* Define PHY Vendors: Matches to IEEE
* Organizationally Unique Identifier (OUI)
*/
/*
* The first two are supported as Internal XCVRs
*/
#define PHY_VENDOR_LUCENT 0x601d
#define PHY_LINK_NONE 0 /* Not attempted yet or retry */
#define AUTO_SPEED 0
#define FORCE_SPEED 1
/*
* link_up kstat variable states
*/
#define ERI_LINK_DOWN 0
#define ERI_LINK_UP 1
/*
* States for kstat variable link_duplex
*/
#define ERI_UNKNOWN_DUPLEX 0
#define ERI_HALF_DUPLEX 1
#define ERI_FULL_DUPLEX 2
/*
* tcp and ip data structures used to grab some length
* and type information in eri_read_dma().
*/
#ifdef ERI_RCV_CKSUM
typedef struct ip {
} ip_t;
typedef struct tcphdr {
} tcp_t;
#endif
/*
*/
#define IS_NOT_MULTICAST(ehp) \
#define IS_BROADCAST(ehp) \
#define IS_MULTICAST(ehp) \
if (IS_BROADCAST(ehp)) { \
} else if (IS_MULTICAST(ehp)) { \
}
if (IS_BROADCAST(ehp)) { \
} else if (IS_MULTICAST(ehp)) { \
}
/*
* ERI 1.0 has a bug in which the last byte of the MAC address is
* ehp->ether_dhost.ether_addr_octet[5]
* not filtered thus accepting all the packets with the first five bytes
* match. Here we filter out the packets which are not intended for us.
*
* !(ehp->ether_dhost.ether_addr_octet[0] & 0x1) checks if the packet
* is not mutlicast.
*
* (ehp->ether_dhost.ether_addr_octet[1] &
* ehp->ether_dhost.ether_addr_octet[5]) != 0xff ), checks if the
* the packet could be a broadcast. If it were broadcast, both bytes
* would be 0xff. This could be never true for unicast because
* the vendor portion for Sun would 8:0:20.
*/
#define ETHERHEADER_SIZE (sizeof (struct ether_header))
#ifdef ERI_RCV_CKSUM
{ \
t_uscalar_t type; \
struct ether_header *ehp; \
\
\
/* \
* update MIB II statistics \
*/ \
if (type == ETHERTYPE_IPV6) \
start_offset = 0; \
if ((sb_flags & ERI_SCKSUM) && \
NULL, start_offset, 0, \
end_offset, sum, \
HCK_PARTIALCKSUM, 0); \
} \
if (canputnext(ipq)) \
else { \
} \
} else { \
/* \
* Strip the PADS for 802.3 \
*/ \
ETHERHEADER_SIZE + type; \
} \
}
#else
{ \
t_uscalar_t type; \
struct ether_header *ehp; \
if (type == ETHERTYPE_IPV6) \
if (canputnext(ipq)) \
else { \
} \
} else { \
/* \
* Strip the PADS for 802.3 \
*/ \
ETHERHEADER_SIZE + type; \
} \
}
#endif /* ERI_RCV_CKSUM */
/*
* TX Interrupt Rate
*/
static int tx_interrupt_rate = 16;
/*
* Ethernet broadcast address definition.
*/
static struct ether_addr etherbroadcastaddr = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
/*
* Linked list of active (inuse) driver Streams.
*/
static krwlock_t eristruplock;
/*
* Single private "global" lock for the few rare conditions
* we want single-threaded.
*/
static kmutex_t eriwenlock;
/*
* The following variables are used for configuring various features
*/
#define ERI_DESC_HANDLE_ALLOC 0x0001
#define ERI_DESC_MEM_ALLOC 0x0002
#define ERI_DESC_MEM_MAP 0x0004
#define ERI_XMIT_HANDLE_ALLOC 0x0008
#define ERI_XMIT_HANDLE_BIND 0x0010
#define ERI_RCV_HANDLE_ALLOC 0x0020
#define ERI_RCV_HANDLE_BIND 0x0040
#define ERI_XMIT_DVMA_ALLOC 0x0100
#define ERI_RCV_DVMA_ALLOC 0x0200
#define ERI_XBUFS_HANDLE_ALLOC 0x0400
#define ERI_XBUFS_KMEM_ALLOC 0x0800
#define ERI_XBUFS_KMEM_DMABIND 0x1000
#define ERI_DONT_STRIP_CRC
/*
* Translate a kernel virtual address to i/o address.
*/
#define ERI_IOPBIOADDR(erip, a) \
/*
* ERI Configuration Register Value
* Used to configure parameters that define DMA burst
* and internal arbitration behavior.
* for equal TX and RX bursts, set the following in global
* configuration register.
* static int global_config = 0x42;
*/
/*
* ERI ERX Interrupt Blanking Time
* Each count is about 16 us (2048 clocks) for 66 MHz PCI.
*/
/*
* ERX PAUSE Threshold Register value
* The following value is for an OFF Threshold of about 15.5 Kbytes
* and an ON Threshold of 4K bytes.
*/
static int eri_reinit_fatal = 0;
#ifdef DEBUG
static int noteri = 0;
#endif
#ifdef ERI_TX_HUNG
static int eri_reinit_txhung = 0;
#endif
#ifdef ERI_HDX_BUG_WORKAROUND
/*
* By default enable padding in hdx mode to 97 bytes.
* set eri:eri_hdx_pad_enable=0
*/
#endif
/*
* Default values to initialize the cache line size and latency timer
* registers in the PCI configuration space.
* ERI_G_CACHE_LINE_SIZE_16 is defined as 16 since RIO expects in units
* of 4 bytes.
*/
#ifdef ERI_PM_WORKAROUND_PCI
#else
#endif
/*
* Claim the device is ultra-capable of burst in the beginning. Use
* the value returned by ddi_dma_burstsizes() to actually set the ERI
* global configuration register later.
*
* PCI_ERI supports Infinite burst or 64-byte-multiple bursts.
*/
static ddi_dma_attr_t dma_attr = {
DMA_ATTR_V0, /* version number. */
(uint_t)0 /* attribute flags */
};
static ddi_dma_attr_t desc_dma_attr = {
DMA_ATTR_V0, /* version number. */
16, /* granularity */
0 /* attribute flags */
};
1024 /* dlim_speed */
};
/*
* Link Configuration variables
*
* On Motherboard implementations, 10/100 Mbps speeds may be supported
* by using both the Serial Link and the MII on Non-serial-link interface.
* When both links are present, the driver automatically tries to bring up
* both. If both are up, the Gigabit Serial Link is selected for use, by
* default. The following configuration variable is used to force the selection
* of one of the links when both are up.
* To change the default selection to the MII link when both the Serial
* Link and the MII link are up, change eri_default_link to 1.
*
* Once a link is in use, the driver will continue to use that link till it
* goes down. When it goes down, the driver will look at the status of both the
* links again for link selection.
*
* Currently the standard is not stable w.r.t. gigabit link configuration
* using auto-negotiation procedures. Meanwhile, the link may be configured
* in "forced" mode using the "autonegotiation enable" bit (bit-12) in the
* PCS MII Command Register. In this mode the PCS sends "idles" until sees
* "idles" as initialization instead of the Link Configuration protocol
* where a Config register is exchanged. In this mode, the ERI is programmed
* for full-duplex operation with both pauseTX and pauseRX (for flow control)
* enabled.
*/
static int select_link = 0; /* automatic selection */
static int default_link = 0; /* Select Serial link if both are up */
/*
* The following variables are used for configuring link-operation
* for all the "eri" interfaces in the system.
* Later these parameters may be changed per interface using "ndd" command
* These parameters may also be specified as properties using the .conf
* file mechanism for each interface.
*/
/*
* The following variable value will be overridden by "link-pulse-disabled"
* property which may be created by OBP or eri.conf file. This property is
* applicable only for 10 Mbps links.
*/
static int link_pulse_disabled = 0; /* link pulse disabled */
/* For MII-based FastEthernet links */
static int adv_autoneg_cap = 1;
static int adv_100T4_cap = 0;
static int adv_100fdx_cap = 1;
static int adv_100hdx_cap = 1;
static int adv_10fdx_cap = 1;
static int adv_10hdx_cap = 1;
static int adv_pauseTX_cap = 0;
static int adv_pauseRX_cap = 0;
/*
* The following gap parameters are in terms of byte times.
*/
static int ipg0 = 8;
static int ipg1 = 8;
static int ipg2 = 4;
static int mifpoll_enable = 0; /* to enable mif poll */
static int ngu_enable = 0; /* to enable Never Give Up mode */
static int eri_force_mlf = 0; /* to enable mif poll */
/*
* For the MII interface, the External Transceiver is selected when present.
* The following variable is used to select the Internal Transceiver even
* when the External Transceiver is present.
*/
static int use_int_xcvr = 0;
static int pace_size = 0; /* Do not use pacing for now */
static int device = -1;
static int eri_use_dvma_rx = 0; /* =1:use dvma */
/*
* The following parameters may be configured by the user. If they are not
* configured by the user, the values will be based on the capabilities of
* the transceiver.
* The value "ERI_NOTUSR" is ORed with the parameter value to indicate values
* which are NOT configured by the user.
*/
#define ERI_NOTUSR 0x0f000000
#define ERI_MASK_1BIT 0x1
#define ERI_MASK_2BIT 0x3
#define ERI_MASK_8BIT 0xff
/*
* Note:
* ERI has all of the above capabilities.
* Only when an External Transceiver is selected for MII-based FastEthernet
* link operation, the capabilities depend upon the capabilities of the
* External Transceiver.
*/
/* ------------------------------------------------------------------------- */
/* min max value name */
{ 0, 2, 2, "transceiver_inuse"},
{ 0, 1, 0, "link_status"},
{ 0, 1, 0, "link_speed"},
{ 0, 1, 0, "link_mode"},
{ 0, 255, 8, "ipg1"},
{ 0, 255, 4, "ipg2"},
{ 0, 1, 0, "use_int_xcvr"},
{ 0, 255, 0, "pace_size"},
{ 0, 1, 1, "adv_autoneg_cap"},
{ 0, 1, 1, "adv_100T4_cap"},
{ 0, 1, 1, "adv_100fdx_cap"},
{ 0, 1, 1, "adv_100hdx_cap"},
{ 0, 1, 1, "adv_10fdx_cap"},
{ 0, 1, 1, "adv_10hdx_cap"},
{ 0, 1, 1, "autoneg_cap"},
{ 0, 1, 1, "100T4_cap"},
{ 0, 1, 1, "100fdx_cap"},
{ 0, 1, 1, "100hdx_cap"},
{ 0, 1, 1, "10fdx_cap"},
{ 0, 1, 1, "10hdx_cap"},
{ 0, 1, 0, "lp_autoneg_cap"},
{ 0, 1, 0, "lp_100T4_cap"},
{ 0, 1, 0, "lp_100fdx_cap"},
{ 0, 1, 0, "lp_100hdx_cap"},
{ 0, 1, 0, "lp_10fdx_cap"},
{ 0, 1, 0, "lp_10hdx_cap"},
{ 0, 255, 0, "instance"},
{ 0, 1, 1, "lance_mode"},
{ 0, 31, 8, "ipg0"},
{ 0, 127, 6, "intr_blank_time"},
{ 0, 255, 8, "intr_blank_packets"},
{ 0, 1, 1, "serial-link"},
{ 0, 2, 1, "non-serial-link"},
{ 0, 1, 0, "select-link"},
{ 0, 1, 0, "default-link"},
{ 0, 2, 0, "link-in-use"},
{ 0, 1, 1, "adv_1000autoneg_cap"},
{ 0, 1, 1, "adv_1000fdx_cap"},
{ 0, 1, 1, "adv_1000hdx_cap"},
{ 0, 1, 1, "adv_asm_dir_cap"},
{ 0, 1, 1, "adv_pause_cap"},
{ 0, 1, 0, "1000autoneg_cap"},
{ 0, 1, 0, "1000fdx_cap"},
{ 0, 1, 0, "1000hdx_cap"},
{ 0, 1, 0, "asm_dir_cap"},
{ 0, 1, 0, "pause_cap"},
{ 0, 1, 0, "lp_1000autoneg_cap"},
{ 0, 1, 0, "lp_1000fdx_cap"},
{ 0, 1, 0, "lp_1000hdx_cap"},
{ 0, 1, 0, "lp_asm_dir_cap"},
{ 0, 1, 0, "lp_pause_cap"},
};
#define DISPLAY_PARAM 1
#define DONT_DISPLAY 0
static uint32_t param_display_mii[] = {
/* DISPLAY */
DISPLAY_PARAM, /* transceiver_inuse */
DISPLAY_PARAM, /* link_status */
DISPLAY_PARAM, /* link_speed */
DISPLAY_PARAM, /* link_mode */
DISPLAY_PARAM, /* ipg1 */
DISPLAY_PARAM, /* ipg2 */
DISPLAY_PARAM, /* use_int_xcvr */
DISPLAY_PARAM, /* pace_size */
DISPLAY_PARAM, /* adv_autoneg_cap */
DISPLAY_PARAM, /* adv_100T4_cap */
DISPLAY_PARAM, /* adv_100fdx_cap */
DISPLAY_PARAM, /* adv_100hdx_cap */
DISPLAY_PARAM, /* adv_10fdx_cap */
DISPLAY_PARAM, /* adv_10hdx_cap */
DISPLAY_PARAM, /* autoneg_cap */
DISPLAY_PARAM, /* 100T4_cap */
DISPLAY_PARAM, /* 100fdx_cap */
DISPLAY_PARAM, /* 100hdx_cap */
DISPLAY_PARAM, /* 10fdx_cap */
DISPLAY_PARAM, /* 10hdx_cap */
DISPLAY_PARAM, /* lp_autoneg_cap */
DISPLAY_PARAM, /* lp_100T4_cap */
DISPLAY_PARAM, /* lp_100fdx_cap */
DISPLAY_PARAM, /* lp_100hdx_cap */
DISPLAY_PARAM, /* lp_10fdx_cap */
DISPLAY_PARAM, /* lp_10hdx_cap */
DISPLAY_PARAM, /* instance */
DISPLAY_PARAM, /* lance_mode */
DISPLAY_PARAM, /* ipg0 */
DISPLAY_PARAM, /* intr_blank_time */
DISPLAY_PARAM, /* intr_blank_packets */
DONT_DISPLAY, /* serial-link */
DONT_DISPLAY, /* non-serial-link */
DONT_DISPLAY, /* select-link */
DONT_DISPLAY, /* default-link */
DONT_DISPLAY, /* link-in-use */
DONT_DISPLAY, /* adv_1000autoneg_cap */
DONT_DISPLAY, /* adv_1000fdx_cap */
DONT_DISPLAY, /* adv_1000hdx_cap */
DONT_DISPLAY, /* adv_asm_dir */
DONT_DISPLAY, /* adv_pause */
DONT_DISPLAY, /* 1000autoneg_cap */
DONT_DISPLAY, /* 1000fdx_cap */
DONT_DISPLAY, /* 1000hdx_cap */
DONT_DISPLAY, /* asm_dir_cap */
DONT_DISPLAY, /* pause_cap */
DONT_DISPLAY, /* lp_1000autoneg_cap */
DONT_DISPLAY, /* lp_1000fdx_cap */
DONT_DISPLAY, /* lp_1000hdx_cap */
DONT_DISPLAY, /* lp_asm_dir */
DONT_DISPLAY, /* lp_pause */
};
static struct module_info eriinfo = {
ERI_IDNUM, /* mi_idnum */
ERI_NAME, /* mi_idname */
ERI_MINPSZ, /* mi_minpsz */
ERI_MAXPSZ, /* mi_maxpsz */
ERI_HIWAT, /* mi_hiwat */
ERI_LOWAT /* mi_lowat */
};
NULL, /* qi_putp */
#ifdef ERI_SERVICE_ROUTINE
eri_rsrv, /* qi_srvp */
#else
NULL, /* qi_srvp */
#endif
eri_open, /* qi_qopen */
eri_close, /* qi_qclose */
NULL, /* qi_qadmin */
&eriinfo, /* qi_minfo */
NULL /* qi_mstat */
};
eri_wput, /* qi_putp */
eri_wsrv, /* qi_srvp */
NULL, /* qi_qopen */
NULL, /* qi_qclose */
NULL, /* qi_qadmin */
&eriinfo, /* qi_minfo */
NULL /* qi_mstat */
};
&eri_rinit, /* st_rdinit */
&eri_winit, /* st_wrinit */
NULL, /* st_muxrinit */
NULL /* st_muxwrinit */
};
static struct cb_ops cb_eri_ops = {
nodev, /* cb_open */
nodev, /* cb_close */
nodev, /* cb_strategy */
nodev, /* cb_print */
nodev, /* cb_dump */
nodev, /* cb_read */
nodev, /* cb_write */
nodev, /* cb_ioctl */
nodev, /* cb_devmap */
nodev, /* cb_mmap */
nodev, /* cb_segmap */
nochpoll, /* cb_chpoll */
ddi_prop_op, /* cb_prop_op */
&er_info, /* cb_stream */
CB_REV, /* rev */
nodev, /* int (*cb_aread() */
nodev /* int (*cb_awrite() */
};
DEVO_REV, /* devo_rev */
0, /* devo_refcnt */
ddi_no_info, /* devo_getinfo */
nulldev, /* devo_identify */
nulldev, /* devo_probe */
eri_attach, /* devo_attach */
eri_detach, /* devo_detach */
nodev, /* devo_reset */
&cb_eri_ops, /* devo_cb_ops */
NULL /* devo_power */
};
#ifndef lint
static char _depends_on[] = "drv/ip";
#endif
/*
* This is the loadable module wrapper.
*/
/*
* Module linkage information for the kernel.
*/
&mod_driverops, /* Type of module. This one is a driver */
"10/100 Mb Ethernet Driver v%I% ",
&eri_ops, /* driver ops */
};
static struct modlinkage modlinkage = {
};
/*
* XXX Autoconfiguration lock: We want to initialize all the global
* locks at _init(). However, we do not have the cookie required which
* is returned in ddi_add_intr(), which in turn is usually called at attach
* time.
*/
static kmutex_t eriautolock;
/*
* Hardware Independent Functions
* New Section
*/
int
_init(void)
{
int status;
if (status != 0)
"_init status = 0x%X", status);
return (status);
}
int
_fini(void)
{
int status;
if (status != 0)
goto _fini_exit;
"_fini status = 0x%X", status);
return (status);
}
int
{
int status;
"_info status = 0x%X", status);
return (status);
}
/*
* Interface exists: make available by filling in network interface
* record. System will initialize the interface when it is ready
* to accept packets.
*/
static int
{
static int once = 1;
int regno;
uint8_t mutex_inited = 0;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
return (DDI_FAILURE);
param_linkup = 0;
{
int doeriinit = 0;
/*
* Do eri_init() only for active interface
*/
doeriinit = 1;
break;
}
if (doeriinit)
}
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
/*
* Allocate soft device data structure
*/
/*
* Initialize as many elements as possible.
*/
erip->promisc_cnt = 0;
erip->all_multi_cnt = 0;
erip->all_sap_cnt = 0;
/*
* Map in the device registers.
* Separate pointers will be set up for the following
* register groups within the GEM Register Space:
* Global register set
* ETX register set
* ERX register set
* BigMAC register set.
* MIF register set
*/
"ddi_dev_nregs failed, returned %d", regno);
goto attach_fail;
}
/*
* Map the PCI config space
*/
DDI_SUCCESS) {
"%s pci_config_setup()",
goto attach_fail;
}
/*
* Initialize device attributes structure
*/
goto attach_fail;
}
"eri_attach: gloregp %p alias %X gintmask %X",
/*
* Map the software reset register.
*/
goto attach_fail;
}
/*
* Try and stop the device.
* This is done until we want to handle interrupts.
*/
goto attach_fail;
/*
* set PCI latency timer register.
*/
"eri_attach: gloregp %p alias %X gintmask %X",
if (ddi_intr_hilevel(dip, 0)) {
" high-level interrupts are not supported");
goto attach_fail;
}
/*
* Get the interrupt cookie so the mutexes can be
* Initialized.
*/
goto attach_fail;
/*
* Initialize mutex's for this device.
*/
#ifdef XMIT_SERIAL_QUEUE
/*
* A syncq implementation.
*/
#endif
mutex_inited = 1;
/*
* Add interrupt to system
*/
"eri_att: add intr");
intr_add = 1;
else {
goto attach_fail;
}
"eri_att: DONE: add intr");
/*
* Set up the ethernet mac address.
*/
/*
* Create the filesystem device node.
*/
minor_node_created = 1;
else {
goto attach_fail;
}
if (once) {
once = 0;
}
if (eri_init_xfer_params(erip))
goto attach_fail;
goto attach_fail;
}
/*
* Setup fewer receive bufers.
*/
/*
* lock eri structure while manipulating link list of eri structs
*/
return (DDI_SUCCESS);
if (erip->pci_config_handle)
if (minor_node_created)
if (mutex_inited) {
#ifdef XMIT_SERIAL_QUEUE
#endif
}
if (intr_add)
if (erip)
return (DDI_FAILURE);
}
static int
{
int i;
"detach: dip == NULL");
return (DDI_FAILURE);
}
/*
* No resources allocated.
*/
"detach: !erip ");
return (DDI_FAILURE);
}
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
/*
* Make the device quiescent
*/
/*
* Remove instance of the intr
*/
if (erip->pci_config_handle)
/*
* Remove erip from the linked list of device structures
*/
break;
}
/*
* Destroy all mutexes and data structures allocated during
* attach time.
*/
if (erip->sw_reset_regh)
#ifdef XMIT_SERIAL_QUEUE
#endif
return (DDI_FAILURE);
}
if (eri_freebufs(erip))
return (DDI_FAILURE);
/* dvma handle case */
if (erip->eri_dvmaxh) {
}
if (erip->eri_dvmarh) {
}
/*
* xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma
*/
else {
for (i = 0; i < ERI_TPENDING; i++)
for (i = 0; i < ERI_RPENDING; i++)
}
/*
* Release tiny TX buffers
*/
if (erip->tbuf_ioaddr != 0) {
erip->tbuf_ioaddr = 0;
}
}
}
return (DDI_SUCCESS);
}
/*
* To set up the mac address for the network interface:
* The adapter card may support a local mac address which is published
* in a device node property "local-mac-address". This mac address is
* treated as the factory-installed mac address for DLPI interface.
* If the adapter firmware has used the device for diskless boot
* operation it publishes a property called "mac-address" for use by
* inetboot and the device driver.
* If "mac-address" is not found, the system options property
* "local-mac-address" is used to select the mac-address. If this option
* is set to "true", and "local-mac-address" has been found, then
* local-mac-address is used; otherwise the system mac address is used
* by calling the "localetheraddr()" function.
*/
static void
{
char *prop;
int prop_len = sizeof (int);
/*
* Check if it is an adapter with its own local mac address
* If it is present, save it as the "factory-address"
* for this adapter.
*/
if (prop_len == ETHERADDRL) {
}
}
/*
* Check if the adapter has published "mac-address" property.
* If it is present, use it as the mac address for this device.
*/
if (prop_len >= ETHERADDRL) {
return;
}
}
return;
}
}
/*
* Get the system ethernet address.
*/
}
/*
* Convert Ethernet address to printable (loggable) representation.
*/
static char *
{
int i;
static char etherbuf[18];
static char digits[] = "0123456789abcdef";
for (i = 0; i < 6; i++) {
if (*ap > 0x0f)
*cp++ = ':';
}
*--cp = 0;
return (etherbuf);
}
/*
* DLPI (Data Link Provider Interface) Functions
* New Section
*/
/*
* Our DL_INFO_ACK template.
*/
static dl_info_ack_t infoack = {
DL_INFO_ACK, /* dl_primitive */
ETHERMTU, /* dl_max_sdu */
0, /* dl_min_sdu */
ERI_ADDRL, /* dl_addr_length */
DL_ETHER, /* dl_mac_type */
0, /* dl_reserved */
0, /* dl_current_state */
-2, /* dl_sap_length */
DL_CLDLS, /* dl_service_mode */
0, /* dl_qos_length */
0, /* dl_qos_offset */
0, /* dl_range_length */
0, /* dl_range_offset */
DL_STYLE2, /* dl_provider_style */
sizeof (dl_info_ack_t), /* dl_addr_offset */
DL_VERSION_2, /* dl_version */
ETHERADDRL, /* dl_brdcst_addr_length */
0 /* dl_growth */
};
/*
* Calculate the bit in the multicast address filter that selects the given
* address.
* Note: For ERI, the last 8-bits are used.
*/
static uint32_t
{
/*
* Just want the 8 most significant bits.
*/
return ((~crc) >> 24);
}
/*
* Assorted DLPI V2 routines.
*/
static void
{
union DL_primitives *dlp;
"eri_proto Entered");
switch (prim) {
case DL_UNITDATA_REQ:
break;
case DL_ATTACH_REQ:
"eri_proto : ATTACH_REQ");
break;
case DL_DETACH_REQ:
break;
case DL_BIND_REQ:
"eri_proto : BIND_REQ");
break;
case DL_UNBIND_REQ:
break;
case DL_INFO_REQ:
"eri_proto : INFO_REQ");
break;
case DL_PROMISCON_REQ:
break;
case DL_PROMISCOFF_REQ:
break;
case DL_ENABMULTI_REQ:
break;
case DL_DISABMULTI_REQ:
break;
case DL_PHYS_ADDR_REQ:
break;
case DL_SET_PHYS_ADDR_REQ:
break;
case DL_NOTIFY_REQ:
break;
case DL_CAPABILITY_REQ:
break;
default:
break;
}
}
/*ARGSUSED*/
static int
{
int rc = 0;
"eri_open Entered");
/*
* Serialize all driver open and closes.
*/
/*
* Determine minor device number.
*/
minordev = 0;
break;
minordev++;
}
} else
goto done;
"eri_open: sbp = %X\n", sbp);
/*
* Link new entry into the list of active entries.
*/
/*
* Disable automatic enabling of our write service procedure.
* We control this explicitly.
*/
done:
/* inform framework that we are a good citizen */
return (rc);
}
static int
{
int promisc_cnt = 0;
int sap_cnt = 0;
int all_multi_cnt = 0;
/*
* If the stream was closed without calling eripoffreq,
* update the counters.
*/
++promisc_cnt;
}
++sap_cnt;
}
}
if (erip) {
}
/*
* Implicit detach Stream from interface.
*/
/* dissociate queue */
/*
* Unlink the per-Stream entry from the active list and free it.
*/
break;
return (0);
}
/*
*
* internal locks that are held across upstream putnext calls.
* Specifically there's the problem of eri_intr() holding eri_intrlock
* and eristruplock when it calls putnext() and that thread looping
* back around to call eri_wput() and, eventually, eri_init() to create a
* recursive lock panic. There are two obvious ways of solving this
* problem: (1) have eri_intr() do putq instead of putnext which provides
* the loopback "cutout" right at the rq, or (2) allow eri_intr() to putnext
* and put the loopback "cutout" around eri_proto(). We choose the latter
* for performance reasons.
*
* M_DATA messages are enqueued on the wq *only* when the xmit side
* is out of tbufs or tmds. Once the xmit resource is available again,
* wsrv() is enabled and tries to xmit all the messages on the wq.
*/
static int
{
case M_DATA:
if (erip) {
goto done;
} else
break;
case M_PROTO:
case M_PCPROTO:
break;
default:
ASSERT(0);
break;
}
done:
return (0);
}
#ifdef XMIT_SERIAL_QUEUE
static int
{
int refed;
int ret = 0;
refed = 0;
goto last;
}
next:
refed = 1;
/*
* Mblk chain on syncq, so just add ours, if any
* to the end and get the first one.
*/
else
} else
last:
if (smp) {
/*
* Mblk chain to save, so just add it to
* the end of the sycnq.
*/
else
}
/*
* Nothing more todo ...
*/
if (refed)
return (ret);
}
goto next;
return (0);
}
#endif
static int
{
case M_DATA: /* "fastpath" */
break;
}
/*
* If any msgs already enqueued or the interface will
* loop back up the message (due to ERI_PROMISC), then
* enqueue the msg. Otherwise just xmit it directly.
*/
} else
#ifdef XMIT_SERIAL_QUEUE
#else
#endif
break;
case M_PROTO:
case M_PCPROTO:
/*
* Break the association between the current thread
* and the thread that calls eri_proto() to resolve the
* problem of eri_intr() threads which loop back around
* to call eri_proto() and try to recursively acquire
* internal locks.
*/
break;
case M_IOCTL:
break;
case M_CTL:
break;
case M_FLUSH:
}
#ifdef ERI_SERVICE_ROUTINE
#endif
} else
break;
default:
break;
}
return (0);
}
static void
{
"eri_ioctl Entered");
case DLIOCRAW: /* raw M_DATA mode */
break;
case DL_IOC_HDR_INFO: /* M_DATA "fastpath" info request */
break;
case ERI_ND_GET:
case ERI_ND_SET:
break;
case ERI_SET_LOOP_MODE:
case ERI_GET_LOOP_MODE:
else
break;
default:
break;
}
if (erip)
}
static void
{
loopback_t *al;
"eri_loopback Entered");
return;
}
case ERI_SET_LOOP_MODE:
"ERI_SET_LOOP_MODE\n");
case ERI_LOOPBACK_OFF:
"ERI_LOOPBACK_OFF\n");
/* force link status to go down */
param_linkup = 0;
break;
case ERI_MAC_LOOPBACK_ON:
"ERI_MAC_LOOPBACK_ON\n");
param_linkup = 0;
break;
case ERI_PCS_LOOPBACK_ON:
"ERI_PCS_LOOPBACK_ON\n");
break;
case ERI_SER_LOOPBACK_ON:
"ERI_SER_LOOPBACK_ON\n");
/* force link status to go down */
param_linkup = 0;
break;
default:
return;
}
break;
case ERI_GET_LOOP_MODE:
"ERI_GET_LOOP_MODE\n");
break;
default:
}
}
/*
* M_DATA "fastpath" info request.
* Following the M_IOCTL mblk should come a DL_UNITDATA_REQ mblk.
* We ack with an M_IOCACK pointing to the original DL_UNITDATA_REQ mblk
* followed by an mblk containing the raw ethernet header corresponding
* to the destination address. Subsequently, we may receive M_DATA
* msgs which start with this header and may send up
* up M_DATA msgs with b_rptr pointing to a (uint32_t) group address
* indicator followed by the network-layer data (IP packet header).
* This is all selectable on a per-Stream basis.
*/
static void
{
struct ether_header *headerp;
int error;
return;
}
if (error != 0) {
return;
}
/*
* Sanity check the DL_UNITDATA_REQ destination address
* offset and length values.
*/
return;
}
/*
* Allocate a new mblk to hold the ether header.
*/
return;
}
/*
* Fill in the ether header.
*/
/*
* Link new mblk in after the "request" mblks.
*/
}
/* ARGSUSED */
static void
{
} /* eri_mctl */
static void
{
union DL_primitives *dlp;
"eri_areq: Bad REQ Size");
return;
}
"eri_areq: DL_OUTSTATE");
return;
}
/*
*/
promisc++;
}
all_sap++;
}
all_multi++;
}
/*
* Valid ppa?
*/
"eri_areq: erip == NULL");
return;
}
"got instance");
break;
}
/* Set link to device and update our state. */
/*
* Has device been initialized? Do so if necessary.
* Also check if promiscuous mode is set via the ALLPHYS and
* ALLMULTI flags, for the stream. If so, initialize the
* interface.
*/
"eri_areq: eri_init FAILED");
return;
}
} else {
if (init) {
/*
* Reinitialize rx mac
*/
}
}
}
static void
{
return;
}
return;
}
/* dissociate queue */
}
/*
* Detach a Stream from an interface.
*/
static void
{
int reinit = 0, i;
/* Disable promiscuous mode if on. */
promisc++;
reinit = 1;
}
/* Disable ALLSAP mode if on. */
all_sap++;
}
/* Disable ALLMULTI mode if on. */
all_multi++;
reinit = 1;
}
/* Disable MULTI mode if on. */
reinit = 2;
}
/* Disable any Multicast Addresses. */
for (i = 0; i < NMCHASH; i++) {
reinit = 2;
sizeof (struct ether_addr));
}
}
for (i = 0; i < NMCFILTER_BITS/16; i++)
for (i = 0; i < NMCFILTER_BITS; i++)
sbp->sb_ladrf_refcnt[i] = 0;
/*
* Detach from device structure.
* Uninit the device and update power management property
* when no other streams are attached to it.
*/
break;
else if (reinit) {
reinit == 2)
if (erip->promisc_cnt == 0)
}
}
static void
{
union DL_primitives *dlp;
"eri_breq: Bad REQ Size");
return;
}
"eri_breq: Bad DL_OUTSTATE");
return;
}
if (xidtest) {
"eri_breq: Bad DL_NOAUTO");
return;
}
if (sap > ETHERTYPE_MAX) {
"eri_breq: DL_BADSAP");
return;
}
/*
* Save SAP value for this Stream and change state.
*/
}
static void
{
return;
}
return;
}
}
static void
{
struct ether_addr *ep;
"eri_ireq: < DL_INFO_REQ_SIZE");
return;
}
/* Exchange current msg for a DL_INFO_ACK. */
"eri_ireq: mp == NULL");
return;
}
/* Fill in the DL_INFO_ACK fields and reply. */
"eri_ireq: sbp->sb_erip");
} else {
"eri_ireq: !sbp->sb_erip");
}
}
static void
{
int phy_flag = 0;
int sap_flag = 0;
int allmulti_flag = 0;
return;
}
/*
* Do not increment counter if already set.
*/
phy_flag = 1;
sap_flag = 1;
allmulti_flag = 1;
case DL_PROMISC_PHYS:
break;
case DL_PROMISC_SAP:
break;
case DL_PROMISC_MULTI:
break;
default:
return;
}
if (erip) {
erip->promisc_cnt++;
erip->all_sap_cnt++;
erip->all_multi_cnt++;
}
}
}
static void
{
int flag;
return;
}
case DL_PROMISC_PHYS:
flag = ERI_SALLPHYS;
break;
case DL_PROMISC_SAP:
flag = ERI_SALLSAP;
break;
case DL_PROMISC_MULTI:
break;
default:
return;
}
return;
}
if (erip) {
if (flag == ERI_SALLPHYS)
erip->promisc_cnt--;
if (flag == ERI_SALLSAP)
erip->all_sap_cnt--;
if (flag == ERI_SALLMULTI)
erip->all_multi_cnt--;
if (flag == ERI_SALLPHYS)
}
}
}
/*
* This is to support unlimited number of members
* in Multicast.
*/
static void
{
union DL_primitives *dlp;
struct ether_addr *addrp;
struct ether_addr *mcbucket;
return;
}
return;
}
if ((len != ETHERADDRL) ||
return;
}
/*
* Calculate hash value and bucket.
*/
/*
* Allocate hash bucket if it's not there.
*/
sizeof (struct ether_addr),
KM_SLEEP);
}
/*
* We no longer bother checking to see if the address is already
* in the table. We won't reinitialize the
* hardware, since we'll find the mc bit is already set.
*/
/*
* Expand table if necessary.
*/
struct ether_addr *newbucket;
int newsize;
KM_SLEEP);
sizeof (struct ether_addr));
}
/*
* Add address to the table.
*/
/*
* If this address's bit was not already set in the local address
* filter, add it and re-initialize the Hardware.
*/
}
}
static void
{
union DL_primitives *dlp;
struct ether_addr *addrp;
int i;
struct ether_addr *mcbucket;
return;
}
return;
}
return;
}
/*
* Calculate hash value, get pointer to hash bucket for this address.
*/
/*
* Try and delete the address if we can find it.
*/
if (mcbucket) {
/*
* If there's more than one address in this
* bucket, delete the unwanted one by moving
* the last one in the list over top of it;
* otherwise, just free the bucket.
*/
mcbucket[i] =
} else {
sizeof (struct ether_addr));
}
/*
* If this address's bit should no longer be
* set in the local address filter, clear it and
* re-initialize the Hardware
*/
}
return;
}
}
}
}
static void
{
union DL_primitives *dlp;
struct ether_addr addr;
return;
}
return;
}
switch (type) {
case DL_FACT_PHYS_ADDR:
else
break;
case DL_CURR_PHYS_ADDR:
break;
default:
return;
}
}
static void
{
union DL_primitives *dlp;
struct ether_addr *addrp;
return;
}
return;
}
/*
* Error if length of address isn't right or the address
* specified is a multicast or broadcast address.
*/
if ((len != ETHERADDRL) ||
return;
}
/*
* Error if this stream is not attached to a device.
*/
return;
}
/*
* Set new interface local address and re-init device.
* This is destructive to any other streams attached
* to this device.
*/
}
static void
{
struct ether_header *headerp;
uint_t start_offset = 0;
uint_t stuff_offset = 0;
uint_t end_offset = 0;
return;
}
/*
* Validate destination address format.
*/
return;
}
/*
* Error if no M_DATA follows.
*/
return;
}
/*
* Create ethernet header by either prepending it onto the
* next mblk if possible, or reusing the M_PROTO block if not.
*/
} else {
#ifdef ERI_HWCSUM
if (flags & HCK_PARTIALCKSUM) {
0, flags, 0);
"eri_udreq: added new buffer\n");
}
}
#endif /* ERI_HWCSUM */
}
/*
* In 802.3 mode, the driver looks at the
* sap field of the DL_BIND_REQ being 0 in addition to the destination
* sap field in the range [0-1500]. If either is true, then the driver
* computes the length of the message, not including initial M_PROTO
* mblk (message block), of all subsequent DL_UNITDATA_REQ messages and
* transmits 802.3 frames that have this value in the MAC frame header
* length field.
*/
} else {
}
}
static void
{
return;
}
return;
}
while (dl_notification) {
break;
dlnip->dl_notification = 0;
dlnip->dl_addr_length = 0;
dlnip->dl_addr_offset = 0;
if (dl_notification & DL_NOTE_PROMISC_ON_PHYS) {
if (erip->promisc_cnt)
} else if (dl_notification & DL_NOTE_PROMISC_OFF_PHYS) {
if (erip->promisc_cnt == 0)
} else if (dl_notification & DL_NOTE_LINK_DOWN) {
if (!param_linkup)
} else if (dl_notification & DL_NOTE_LINK_UP) {
if (param_linkup)
} else if (dl_notification & DL_NOTE_SPEED) {
}
if (dlnip->dl_notification) {
} else
}
}
static void
{
continue;
== NULL) {
break;
}
if (notification == DL_NOTE_SPEED)
else
dlnip->dl_addr_length = 0;
dlnip->dl_addr_offset = 0;
}
}
}
/*
* Set or clear the device ipq pointer.
* XXX Assumes IPv4 and IPv6 are ERIFAST.
*/
static void
{
int ok4 = 1;
int ok6 = 1;
ok4 = 0;
ok6 = 0;
break;
}
else
ok4 = 0;
}
else
ok6 = 0;
}
}
}
if (ok4)
else
if (ok6)
else
}
/*
* Hardware Functions
* New Section
*/
/*
* Initialize the MAC registers. Some of of the MAC registers are initialized
* just once since Global Reset or MAC reset doesn't clear them. Others (like
* Host MAC Address Registers) are cleared on every reset and have to be
* reinitialized.
*/
static void
{
int i;
/*
* set up the MAC parameter registers once
* setting these registers.
*/
erip->init_macregs = 0;
#ifdef ERI_RX_TAG_ERROR_WORKAROUND
#else
#endif
/* Program MAC Control address */
}
/* The counters need to be zeroed */
PUT_MACREG(nccnt, 0);
PUT_MACREG(fccnt, 0);
PUT_MACREG(excnt, 0);
PUT_MACREG(ltcnt, 0);
PUT_MACREG(dcnt, 0);
PUT_MACREG(frcnt, 0);
PUT_MACREG(lecnt, 0);
PUT_MACREG(aecnt, 0);
PUT_MACREG(fecnt, 0);
PUT_MACREG(rxcv, 0);
else
PUT_MACREG(spcmd, 0);
/*
* Program BigMAC with local individual ethernet address.
*/
/*
* Set up multicast address filter by passing all multicast
* addresses through a crc generator, and then using the
* low order 8 bits as a index into the 256 bit logical
* address filter. The high order four bits select the word,
* while the rest of the bits select the bit within the word.
*/
/*
* Here we initialize the MC Hash bits
*/
for (i = 0; i < NMCFILTER_BITS/16; i++) {
ladrf[i] = 0xffff;
}
break; /* All bits are already on */
}
for (i = 0; i < NMCFILTER_BITS/16; i++)
}
}
}
static int
{
uint_t i;
int status = 0;
/*
* Free and dvma_unload pending xmit buffers.
* Maintaining the 1-to-1 ordered sequence of
* dvma_load() followed by dvma_unload() is critical.
* Always unload anything before loading it again.
* Never unload anything twice. Always unload
* before freeing the buffer. We satisfy these
* requirements by unloading only those descriptors
* which currently have an mblk associated with them.
*/
for (i = 0; i < ERI_TPENDING; i++) {
if (erip->eri_dvmaxh)
status = -1;
}
}
return (status);
}
static int
{
uint_t i;
int status = 0;
/*
* Free and dvma_unload pending recv buffers.
* Maintaining the 1-to-1 ordered sequence of
* dvma_load() followed by dvma_unload() is critical.
* Always unload anything before loading it again.
* Never unload anything twice. Always unload
* before freeing the buffer. We satisfy these
* requirements by unloading only those descriptors
* which currently have an mblk associated with them.
*/
for (i = 0; i < ERI_RPENDING; i++) {
if (erip->eri_dvmarh)
status = -1;
}
}
return (status);
}
static void
{
/*
* Clear TX descriptors.
*/
/*
* sync TXDMA descriptors.
*/
/*
* Reset TMD 'walking' pointers.
*/
erip->tx_cur_cnt = 0;
erip->tx_completion = 0;
}
static int
{
int i, status = 0;
/*
* clear rcv descriptors
*/
for (i = 0; i < ERI_RPENDING; i++) {
status = -1;
"eri_init_rxbufs allocb failed");
continue;
}
/* Load data buffer to DVMA space */
if (erip->eri_dvmarh)
2 * i, &dma_cookie);
/*
* Bind data buffer to DMA handle
*/
DDI_DMA_DONTWAIT, 0,
status = -1;
}
/*
* sync RXDMA descriptors.
*/
/*
* Reset RMD 'walking' pointers.
*/
erip->rx_completion = 0;
return (status);
}
static uint32_t
{
int n;
while (--n > 0) {
return (0);
}
return (1);
}
static uint32_t
{
int n;
n = BMACRXRSTDELAY / ERI_WAITPERIOD;
while (--n > 0) {
return (0);
}
return (1);
}
/*
* Return 0 upon success, 1 on failure.
*/
static int
{
"eri_stop");
(void) eri_erx_reset(erip);
(void) eri_etx_reset(erip);
/*
* set up cache line to 16 for 64 bytes of pci burst size
*/
} else {
param_linkup = 0;
}
/*
* workaround for RIO not resetting the interrupt mask
* register to default value 0xffffffff.
*/
return (0);
} else {
"cannot stop eri");
return (1);
}
}
/*
* Reset Just the RX Portion
* Return 0 upon success, 1 on failure.
*
* Resetting the rxdma while there is a rx dma transaction going on the
* bus, will cause bus hang or parity errors. To avoid this, we would first
* disable the rxdma by clearing the ENABLE bit (bit 0). To make sure it is
* disabled, we will poll it until it realy clears. Furthermore, to verify
* any RX DMA activity is subsided, we delay for 5 msec.
*/
static uint32_t
{
/*
* Wait until the reset is completed which is indicated by
* the reset bit cleared or time out..
*/
"Can not reset erx");
return (1);
} else
return (0);
}
/*
* Reset Just the TX Portion
* Return 0 upon success, 1 on failure.
* Resetting the txdma while there is a tx dma transaction on the bus, may cause
* bus hang or parity errors. To avoid this we would first disable the txdma by
* clearing the ENABLE bit (bit 0). To make sure it is disabled, we will poll
* it until it realy clears. Furthermore, to any TX DMA activity is subsided,
* we delay for 1 msec.
*/
static uint32_t
{
(void) eri_txmac_disable(erip);
#ifdef ORIG
#endif
/*
* Wait until the reset is completed which is indicated by the reset bit
* cleared or time out..
*/
"cannot reset eri etx");
return (1);
} else
return (0);
}
/*
* Initialize the TX DMA registers and Enable the TX DMA.
*/
static uint32_t
{
uint32_t i;
#ifdef DEBUG
#endif
/*
* Initialize ETX Registers:
* config, txring_lo, txring_hi
*/
/*
* Get TX Ring Size Masks.
* The ring size ERI_TPENDING is defined in eri_mac.h.
*/
switch (ERI_TPENDING) {
case 32: i = ETX_RINGSZ_32;
break;
case 64: i = ETX_RINGSZ_64;
break;
case 128: i = ETX_RINGSZ_128;
break;
case 256: i = ETX_RINGSZ_256;
break;
case 512: i = ETX_RINGSZ_512;
break;
case 1024: i = ETX_RINGSZ_1024;
break;
case 2048: i = ETX_RINGSZ_2048;
break;
case 4096: i = ETX_RINGSZ_4096;
break;
default:
return (1);
}
"eri_init_txregs: tx fifo threshold %X",
txfifoth);
i <<= ERI_TX_RINGSZ_SHIFT;
return (0);
}
/*
* Initialize the RX DMA registers and Enable the RX DMA.
*/
static uint32_t
{
int i;
/*
* Initialize ERX Registers:
* rxring_lo, rxring_hi, config, rx_blanking, rx_pause_threshold.
* Also, rx_kick
* Read and save rxfifo_size.
* XXX: Use this to properly configure PAUSE threshold values.
*/
/*
* The Max ring size, ERI_RMDMAX is defined in eri_mac.h.
* More ERI_RPENDING will provide better performance but requires more
* system DVMA memory.
* eri_rx_ring_size cannot be NDD'able due to non-recoverable errors
* which cannot be detected from NDD operations
*/
/*
* get the rxring size bits
*/
switch (ERI_RPENDING) {
case 32: i = ERX_RINGSZ_32;
break;
case 64: i = ERX_RINGSZ_64;
break;
case 128: i = ERX_RINGSZ_128;
break;
case 256: i = ERX_RINGSZ_256;
break;
case 512: i = ERX_RINGSZ_512;
break;
case 1024: i = ERX_RINGSZ_1024;
break;
case 2048: i = ERX_RINGSZ_2048;
break;
case 4096: i = ERX_RINGSZ_4096;
break;
default:
return (1);
}
i <<= ERI_RX_RINGSZ_SHIFT;
i |= (ERI_FSTBYTE_OFFSET << ERI_RX_CONFIG_FBO_SHIFT) |
(sizeof (struct ether_header) <<
PUT_ERXREG(config, i);
return (0);
}
static int
{
int status = 0;
return (status);
}
static void
{
int i;
/*
* Hang out receive buffers.
*/
for (i = 0; i < ERI_RPENDING; i++) {
}
/*
* sync RXDMA descriptors.
*/
(ERI_RPENDING * sizeof (struct rmd)),
/*
* Reset RMD 'walking' pointers.
*/
erip->rx_completion = 0;
}
/*
* This routine is used to reset the RX DMA only. In the case of RX
* failures such as RX Tag Error, RX hang etc... we don't want to
* do global reset which takes down the link and clears the FIFO's
* By doing RX only reset, we leave the TX and the link intact.
*/
static uint32_t
{
(void) eri_erx_reset(erip);
if (eri_init_rxregs(erip))
return (1);
erip->rx_reset_issued = 0;
return (0);
}
static void
{
int i;
/*
* First of all make sure the Receive MAC is stop.
*/
/*
* Program BigMAC with local individual ethernet address.
*/
/*
* XXX moved here setting erip->flags from end of this fn.
*/
if (erip->promisc_cnt)
else
if (erip->all_multi_cnt)
else
/*
* Set up multicast address filter by passing all multicast
* addresses through a crc generator, and then using the
* low order 8 bits as a index into the 256 bit logical
* address filter. The high order four bits select the word,
* while the rest of the bits select the bit within the word.
*/
/*
* Here we initialize the MC Hash bits
*/
for (i = 0; i < NMCFILTER_BITS/16; i++) {
ladrf[i] = 0xffff;
}
break; /* All bits are already on */
}
for (i = 0; i < NMCFILTER_BITS/16; i++)
}
}
/*
* Determine if Multicast mode.
*/
else
break;
}
}
#ifdef ERI_DONT_STRIP_CRC
#else
#endif
/* wait after setting Hash Enable bit */
/* drv_usecwait(10); */
#if 0
/*
* XXX why is this here?
* should be moved before setting h/w register.
*/
if (erip->promisc_cnt)
else
else
#endif
}
#ifdef LATER_SPLIT_TX_RX
/*
* This routine is used to reset the TX DMA only.
* &erip->xmitlock is held before calling this routine.
*/
void
{
uint32_t carrier_ext = 0;
(void) eri_etx_reset(erip);
(void) eri_init_txregs(erip);
if (erip->ngu_enable)
((param_mode ? BMAC_TXCFG_FDX: 0) |
BMAC_TXCFG_ENIPG0 : 0) |
(carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
else
((param_mode ? BMAC_TXCFG_FDX: 0) |
BMAC_TXCFG_ENIPG0 : 0) |
(carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
erip->tx_reset_issued = 0;
}
#endif
/*
* This routine is used to init the TX MAC only.
* &erip->xmitlock is held before calling this routine.
*/
void
{
uint32_t carrier_ext = 0;
/*
* Stop the Transmit MAC.
*/
(void) eri_txmac_disable(erip);
/*
* Must be Internal Transceiver
*/
if (param_mode)
BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
else
BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
/*
* Initialize the interpacket gap registers
*/
if (erip->ngu_enable)
((param_mode ? BMAC_TXCFG_FDX: 0) |
BMAC_TXCFG_ENIPG0 : 0) |
(carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
else
((param_mode ? BMAC_TXCFG_FDX: 0) |
BMAC_TXCFG_ENIPG0 : 0) |
(carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
}
/*
* Start xmit on any msgs previously enqueued on any write queues.
*/
static void
{
/*
* Order of wantw accesses is important.
*/
do {
}
static void
{
uint32_t i;
if (flag & ERI_DESC_MEM_MAP)
if (flag & ERI_DESC_MEM_ALLOC) {
}
if (flag & ERI_DESC_HANDLE_ALLOC)
(void) eri_freebufs(erip);
if (flag & ERI_XMIT_HANDLE_ALLOC)
for (i = 0; i < erip->xmit_handle_cnt; i++)
if (flag & ERI_XMIT_DVMA_ALLOC) {
}
if (flag & ERI_RCV_HANDLE_ALLOC)
for (i = 0; i < erip->rcv_handle_cnt; i++)
if (flag & ERI_RCV_DVMA_ALLOC) {
}
if (flag & ERI_XBUFS_KMEM_DMABIND) {
erip->tbuf_ioaddr = 0;
}
if (flag & ERI_XBUFS_KMEM_ALLOC) {
}
if (flag & ERI_XBUFS_HANDLE_ALLOC) {
}
}
/*
* Initialize channel.
* Return 0 on success, nonzero on error.
*
* The recommended sequence for initialization is:
* 1. Issue a Global Reset command to the Ethernet Channel.
* 2. Poll the Global_Reset bits until the execution of the reset has been
* completed.
* Poll Register 0 to till the Resetbit is 0.
* 100Mbps and Non-Isolated mode. The main point here is to bring the
* PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
* to the MII interface so that the Bigmac core can correctly reset
* upon a software reset.
* 2(c). Issue another Global Reset command to the Ethernet Channel and poll
* the Global_Reset bits till completion.
* 3. Set up all the data structures in the host memory.
* Register).
* Register).
* 6. Program the Transmit Descriptor Ring Base Address in the ETX.
* 7. Program the Receive Descriptor Ring Base Address in the ERX.
* 8. Program the Global Configuration and the Global Interrupt Mask Registers.
* 9. Program the ETX Configuration register (enable the Transmit DMA channel).
* 10. Program the ERX Configuration register (enable the Receive DMA channel).
* 11. Program the XIF Configuration Register (enable the XIF).
* 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
* 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
*/
/*
* lock order:
* intrlock->linklock->eristruplock->xmitlock->xcvrlock
*/
static int
{
uint32_t partial_init = 0;
uint32_t carrier_ext = 0;
"eri_init: Entered");
/*
* Just return if device is suspended.
* eri_init() will be called again from resume.
*/
"eri_init: erip == NULL");
ret = 1;
goto init_exit;
"eri_init: erip->flags & ERI_SUSPENDED");
ret = 1;
goto init_exit;
}
"eri_init: Entered erip");
}
(void) eri_new_xcvr(erip);
"New transceiver detected.");
if (param_transceiver != NO_XCVR) {
/*
* Reset the new PHY and bring up the
* link
*/
if (eri_reset_xcvr(erip)) {
ERI_VERB_MSG, "In Init after reset");
link_timeout = 0;
goto done;
}
} else {
param_linkup = 0;
/*
* Still go on and complete the MAC initialization as
* xcvr might show up later.
* you must return to their mutex ordering.
*/
}
}
/*
* Allocate data structures.
*/
if (erip->global_reset_issued) {
if (eri_flush_txbufs(erip))
goto done;
/*
* Hang out/Initialize descriptors and buffers.
*/
} else {
if (init_stat)
goto done;
if (eri_freebufs(erip))
goto done;
/*
* Hang out/Initialize descriptors and buffers.
*/
if (eri_init_rxbufs(erip))
goto done;
}
}
/*
* Determine if promiscuous mode or multicast mode.
*/
break;
}
}
/*
* Determine which internal loopback mode, if any
* only one internal loopback mode is set, the checking order is
* SERDES/SERIAL_LINK, PCS, and MAC
*/
"init(): flags = 0x%x\n",
break;
}
"init(): flags = 0x%x\n",
break;
}
}
}
/*
* BigMAC requires that we confirm that tx, rx and hash are in
* quiescent state.
* MAC will not reset successfully if the transceiver is not reset and
* brought out of Isolate mode correctly. TXMAC reset may fail if the
* ext. transceiver is just disconnected. If it fails, try again by
* checking the transceiver.
*/
if (eri_txmac_disable(erip)) {
param_linkup = 0; /* force init again */
goto done;
}
if (eri_rxmac_disable(erip)) {
param_linkup = 0; /* force init again */
goto done;
}
/*
* Initialize ERI Global registers :
* config
* For PCI : err_mask, bif_cfg
*
* Use user-configurable parameter for enabling 64-bit transfers.
* Note:For PCI, burst sizes are in multiples of 64-bytes.
*/
/*
* Significant performance improvements can be achieved by
* disabling transmit interrupt. Thus TMD's are reclaimed
* only very infrequently.
* The PCS Interrupt is masked here. It is enabled only when
* a PCS link is brought up because there is no second level
* mask for this interrupt..
* Init GLOBAL, TXMAC, RXMAC and MACCTL interrupt masks here.
*/
if (! partial_init) {
}
if (erip->global_reset_issued) {
/*
* Initialize ETX Registers:
* config, txring_lo, txring_hi
*/
if (eri_init_txregs(erip))
goto done;
/*
* Initialize ERX Registers:
* rxring_lo, rxring_hi, config, rx_blanking,
* rx_pause_threshold. Also, rx_kick
* Read and save rxfifo_size.
*/
if (eri_init_rxregs(erip))
goto done;
}
/*
* Set up the slottime,and rxconfig, txconfig without enabling
* the latter two at this time
*/
carrier_ext = 0;
#ifdef ERI_DONT_STRIP_CRC
(carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
#else
(carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
#endif
if (erip->ngu_enable)
((param_mode ? BMAC_TXCFG_FDX: 0) |
BMAC_TXCFG_ENIPG0 : 0) |
(carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
else
((param_mode ? BMAC_TXCFG_FDX: 0) |
BMAC_TXCFG_ENIPG0 : 0) |
(carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
/*
* Must be Internal Transceiver
*/
if (param_mode)
BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
else {
BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
}
/*
* if MAC int loopback flag is set, put xifc reg in mii loopback
* mode {DIAG}
*/
"eri_init(): put in MAC int loopback mode\n");
} else {
"init(): internal loopback mode not set\n");
}
/*
* Enable TX and RX MACs.
*/
ERI_TXINIT | ERI_RXINIT);
erip->global_reset_issued = 0;
#endif
done:
if (init_stat)
if (ret) {
"eri_init failed");
}
return (ret);
}
/*
* 0 as burstsize upon failure as it signifies no burst size.
*/
static int
{
return (DDI_FAILURE);
if (erip->burstsizes)
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* Un-initialize (STOP) ERI channel.
*/
static void
{
/*
* Allow up to 'ERI_DRAINTIME' for pending xmit's to complete.
*/
}
/*
* Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
* map it in IO space.
*
* The driver allocates STREAMS buffers which will be mapped in DVMA
* space using DDI DMA resources.
*
*/
static int
{
uintptr_t a;
int size;
int i;
int alloc_stat = 0;
/*
* Return if resources are already allocated.
*/
return (alloc_stat);
erip->alloc_flag = 0;
/*
* Allocate the TMD and RMD descriptors and extra for alignments.
*/
if (rval != DDI_SUCCESS) {
return (++alloc_stat);
}
if (rval != DDI_SUCCESS) {
return (++alloc_stat);
}
DDI_DMA_DONTWAIT, 0,
if (rval != DDI_DMA_MAPPED)
return (++alloc_stat);
if (cookiec != 1)
return (++alloc_stat);
a = ROUNDUP(a, ERI_GMDALIGN);
a += ERI_RPENDING * sizeof (struct rmd);
/*
* Specifically we reserve n (ERI_TPENDING + ERI_RPENDING)
* pagetable entries. Therefore we have 2 ptes for each
* descriptor. Since the ethernet buffers are 1518 bytes
* so they can at most use 2 ptes.
* Will do a ddi_dma_addr_setup for each bufer
*/
/*
* In the current implementation, we use the ddi compliant
* dma interface. We allocate ERI_TPENDING dma handles for
* Transmit activity and ERI_RPENDING dma handles for receive
* activity. The actual dma mapping is done in the io functions
* eri_start() and eri_read_dma(),
* by calling the ddi_dma_addr_bind_handle.
* Dma resources are deallocated by calling ddi_dma_unbind_handle
* in eri_reclaim() for transmit and eri_read_dma(), for receive io.
*/
if (eri_use_dvma_tx &&
} else {
for (i = 0; i < ERI_TPENDING; i++) {
&dma_attr, DDI_DMA_DONTWAIT, 0,
if (rval != DDI_SUCCESS) {
alloc_stat++;
break;
}
}
erip->xmit_handle_cnt = i;
if (i)
if (alloc_stat)
return (alloc_stat);
}
if (eri_use_dvma_rx &&
} else {
for (i = 0; i < ERI_RPENDING; i++) {
if (rval != DDI_SUCCESS) {
alloc_stat++;
break;
}
}
erip->rcv_handle_cnt = i;
if (i)
if (alloc_stat)
return (alloc_stat);
}
/*
* Allocate tiny TX buffers
* Note: tinybufs must always be allocated in the native
* ordering of the CPU (always big-endian for Sparc).
* ddi_dma_mem_alloc returns memory in the native ordering
* of the bus (big endian for SBus, little endian for PCI).
* So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr)
* because we'll get little endian memory on PCI.
*/
DDI_DMA_DONTWAIT, 0,
return (++alloc_stat);
}
return (++alloc_stat);
}
DDI_DMA_DONTWAIT, 0,
return (++alloc_stat);
}
if (cookiec != 1)
return (++alloc_stat);
/*
* Keep handy limit values for RMD, TMD, and Buffers.
*/
/*
* Zero out xmit and RCV holders.
*/
return (alloc_stat);
}
/* <<<<<<<<<<<<<<<<< INTERRUPT HANDLING FUNCTION >>>>>>>>>>>>>>>>>>>> */
/*
* First check to see if it is our device interrupting.
*/
static uint_t
{
"eri_intr: start: erip %p gloregp %p status %X intmask %X",
/*
* Check if it is only the RX_DONE interrupt, which is
* the most frequent one.
*/
"eri_intr:(RX_DONE)erip %p gloregp %p status %X intmask %X",
goto rx_done_int;
}
/* Claim the first interrupt after initialization */
}
/* Check for interesting events */
if ((erisbits & ERI_G_STATUS_INTR) == 0) {
"eri_intr: Interrupt Not Claimed gsbits %X", erisbits);
#ifdef DEBUG
noteri++;
#endif
"eri_intr:MIF Config = 0x%X",
"eri_intr:MIF imask = 0x%X",
"eri_intr:INT imask = 0x%X",
"eri_intr:alias %X",
#ifdef ESTAR_WORKAROUND
#endif
return (serviced);
}
"eri_intr: eri not running");
return (serviced);
}
if (erisbits & ERI_G_STATUS_FATAL_ERR) {
"eri_intr: fatal error: erisbits = %X",
erisbits);
if (erip->rx_reset_issued) {
erip->rx_reset_issued = 0;
(void) eri_init_rx_channel(erip);
} else {
param_linkup = 0;
}
return (serviced);
}
if (erisbits & ERI_G_STATUS_NONFATAL_ERR) {
"eri_intr: non-fatal error: erisbits = %X", erisbits);
return (serviced);
}
}
if (erisbits & ERI_G_STATUS_MIF_INT) {
"eri_intr:MIF Interrupt:mii_status %X",
erip->mii_status);
"eri_intr: new MIF interrupt status %X XCVR status %X",
#else
#endif
if (!erip->openloop_autoneg)
else
}
"eri_intr:May have Read Interrupt status:status %X",
erisbits);
if ((erisbits & (ERI_G_STATUS_TX_INT_ME)) ||
}
if (erisbits & ERI_G_STATUS_RX_DONE) {
"eri_intr: packet received: rmdp = %X status %X",
/*
* Sync RMD before looking at it.
*/
/*
* Loop through each RMD.
*/
/* process one packet */
/*
* ERI RCV DMA fetches or updates four descriptors
* a time. Also we don't want to update the desc.
* batch we just received packet on. So we update
* descriptors for every 4 packets and we update
* the group of 4 after the current batch.
*/
if (!(rmdi % 4)) {
if (eri_overflow_reset &&
loop_limit = 1;
} else {
}
}
/*
* Sync the next RMD before looking at it.
*/
loop_limit--;
}
}
return (serviced);
}
/*
* Handle interrupts for fatal errors
* Need reinitialization.
*/
/* called with intrlock held */
static void
{
uint32_t pci_error_int = 0;
if (erisbits & ERI_G_STATUS_RX_TAG_ERR) {
"ERI RX Tag Error");
} else {
if (erisbits & ERI_G_STATUS_BUS_ERR_INT) {
pci_error_int = 1;
} else if (erisbits & ERI_G_STATUS_PERR_INT) {
} else {
"ERI Unknown fatal error");
}
}
/*
* PCI bus error
*/
"Bus Error Status %x", pci_status);
if (pci_status & PCI_DATA_PARITY_REP)
if (pci_status & PCI_SING_TARGET_ABORT)
if (pci_status & PCI_RCV_TARGET_ABORT)
if (pci_status & PCI_RCV_MASTER_ABORT)
if (pci_status & PCI_SING_SYSTEM_ERR)
if (pci_status & PCI_DATA_PARITY_ERR)
/*
* clear it by writing the value that was read back.
*/
}
}
/*
* Handle interrupts regarding non-fatal events.
* TXMAC, RXMAC and MACCTL events
*/
static void
{
#ifdef ERI_PM_WORKAROUND
#endif
if (erisbits & ERI_G_STATUS_TX_MAC_INT) {
if (txmac_sts & BMAC_TXSTS_TX_URUN) {
"tx fifo underrun");
}
if (txmac_sts & BMAC_TXSTS_MAXPKT_ERR) {
"tx max pkt size error");
}
if (txmac_sts & BMAC_TXSTS_NCC_EXP) {
"Normal collisions counter expired");
}
if (txmac_sts & BMAC_TXSTS_ECC_EXP) {
"Excessive collisions counter expired");
}
if (txmac_sts & BMAC_TXSTS_LCC_EXP) {
"Late collisions counter expired");
}
if (txmac_sts & BMAC_TXSTS_FCC_EXP) {
"first collisions counter expired");
}
if (txmac_sts & BMAC_TXSTS_DEFER_EXP) {
"defer timer expired");
}
if (txmac_sts & BMAC_TXSTS_PEAK_EXP) {
"peak attempts counter expired");
}
}
if (erisbits & ERI_G_STATUS_RX_NO_BUF) {
if (eri_overflow_reset)
}
if (erisbits & ERI_G_STATUS_RX_MAC_INT) {
if (rxmac_sts & BMAC_RXSTS_RX_OVF) {
#ifndef ERI_RMAC_HANG_WORKAROUND
erip->check_rmac_hang ++;
erip->check2_rmac_hang = 0;
"overflow intr %d: %8x wr:%2x rd:%2x",
#endif
if (eri_overflow_reset)
}
if (rxmac_sts & BMAC_RXSTS_ALE_EXP) {
"RX Alignment Error Counter Expired");
}
if (rxmac_sts & BMAC_RXSTS_CRC_EXP) {
"RX CRC Error Counter Expired");
}
if (rxmac_sts & BMAC_RXSTS_LEN_EXP) {
"RX Length Error Counter Expired");
}
if (rxmac_sts & BMAC_RXSTS_CVI_EXP) {
"Rx Code Violation Err Count Expired");
}
}
if (erisbits & ERI_G_STATUS_MAC_CTRL_INT) {
if (macctl_sts & ERI_MCTLSTS_PAUSE_RCVD) {
pause_time = ((macctl_sts &
ERI_MCTLSTS_PAUSE_TIME) >> 16);
"PAUSE Received. pause time = %X slot_times",
}
if (macctl_sts & ERI_MCTLSTS_PAUSE_STATE) {
"Transition to PAUSE state");
}
if (macctl_sts & ERI_MCTLSTS_NONPAUSE) {
"Transition to non-PAUSE state");
}
}
}
/*
* if this is the first init do not bother to save the
* counters.
*/
static void
{
/* XXX What all gets added in ierrors and oerrors? */
PUT_MACREG(fecnt, 0);
PUT_MACREG(aecnt, 0);
PUT_MACREG(lecnt, 0);
PUT_MACREG(rxcv, 0);
PUT_MACREG(ltcnt, 0);
PUT_MACREG(nccnt, 0);
PUT_MACREG(excnt, 0);
PUT_MACREG(fccnt, 0);
/*
* Do not add code violations to input errors.
* They are already counted in CRC errors
*/
}
mblk_t *
{
size += 128;
return (NULL);
}
return (mp);
}
mblk_t *
{
return (NULL);
}
return (mp);
}
/*
* Hardware Dependent Functions
* New Section.
*/
/* <<<<<<<<<<<<<<<< Fast Ethernet PHY Bit Bang Operations >>>>>>>>>>>>>>>>>> */
static void
{
PUT_MIFREG(mif_bbdata, x);
}
/*
* To read the MII register bits according to the IEEE Standard
*/
static uint32_t
{
uint32_t x;
if (param_transceiver == INTERNAL_XCVR)
else
return (x);
}
static void
{
int i;
(void) eri_bb_force_idle(erip);
for (i = 4; i >= 0; i--) { /* <AAAAA> */
}
for (i = 4; i >= 0; i--) { /* <RRRRR> */
}
for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */
}
}
/* Return 0 if OK, 1 if error (Transceiver does not talk management) */
static uint32_t
{
int i;
uint32_t x;
uint32_t y;
*datap = 0;
(void) eri_bb_force_idle(erip);
for (i = 4; i >= 0; i--) { /* <AAAAA> */
}
for (i = 4; i >= 0; i--) { /* <RRRRR> */
}
GET_BIT_STD(x);
GET_BIT_STD(y); /* <TA> */
for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */
GET_BIT_STD(x);
*datap += (x << i);
}
/* Kludge to get the Transceiver out of hung mode */
/* XXX: Test if this is still needed */
GET_BIT_STD(x);
GET_BIT_STD(x);
GET_BIT_STD(x);
return (y);
}
static void
{
int i;
for (i = 0; i < 33; i++) {
SEND_BIT(1);
}
}
/* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
/* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
#ifdef ERI_FRM_DEBUG
int frame_flag = 0;
#endif
/* Return 0 if OK, 1 if error (Transceiver does not talk management) */
static uint32_t
{
if (param_transceiver == NO_XCVR)
return (1); /* No xcvr present */
if (!erip->frame_enable)
#ifdef ERI_FRM_DEBUG
if (!frame_flag) {
frame_flag = 1;
}
#endif
"Frame Reg :mii_read: phyad = %X reg = %X ",
(phyad << ERI_MIF_FRPHYAD_SHIFT) |
(regad << ERI_MIF_FRREGAD_SHIFT));
if ((frame & ERI_MIF_FRTA0) == 0) {
"MIF Read failure: data = %X", frame);
return (1);
} else {
"Frame Reg :mii_read: successful:data = %X ",
*datap);
return (0);
}
}
static void
{
if (!erip->frame_enable) {
return;
}
"Frame Reg:eri_mii_write: phyad = %X \
(phyad << ERI_MIF_FRPHYAD_SHIFT) |
if ((frame & ERI_MIF_FRTA0) == 0) {
} else {
"Frame Reg:eri_mii_write: successful");
return;
}
}
/*
* Return TRUE if the given multicast address is one
* of those that this particular Stream is interested in.
*/
static int
{
struct ether_addr *mcbucket;
int mccount;
int i;
int mchash;
/*
* Return FALSE if not a multicast address.
*/
return (0);
/*
* Check if all multicasts have been enabled for this Stream
*/
return (1);
/*
* Compute the hash value for the address and
* grab the bucket and the number of entries in the
* bucket.
*/
/*
* Return FALSE if no multicast addresses enabled for this Stream.
*/
if (mccount == 0)
return (0);
/*
* Otherwise, find it in the table.
*/
if (mcbucket)
for (i = 0; i < mccount; i++)
return (1);
return (0);
}
/*
* Send packet upstream.
* Assume mp->b_rptr points to ether_header.
*/
static void
{
int type;
/*
* While holding a reader lock on the linked list of streams structures,
* attempt to match the address criteria for each stream
* and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND.
*/
return;
}
/*
* Loop on matching open streams until (*acceptfunc)() returns NULL.
*/
!isgroupaddr) {
sizeof (struct ether_header);
} else {
} else {
}
/*
* Do the last one.
*/
type, isgroupaddr)))
} else {
}
}
/*
* Prefix msg with a DL_UNITDATA_IND mblk and return the new msg.
*/
static mblk_t *
{
int size;
/*
* Allocate an M_PROTO mblk for the DL_UNITDATA_IND.
*/
"allocb failed");
return (NULL);
}
/*
* Construct a DL_UNITDATA_IND primitive.
*/
+ ERI_ADDRL);
/*
* Link the M_PROTO and M_DATA together.
*/
return (nmp);
}
/*
* Test upstream destination sap and address match.
*/
static struct eristr *
struct ether_addr *addrp)
{
(flags & ERI_SALLPHYS) ||
flags & ERI_SALLMULTI))
return (sbp);
}
return (NULL);
}
/*
* Test upstream destination sap and address match for ERI_SALLPHYS only.
*/
/* ARGSUSED3 */
struct eristr *
{
((flags & ERI_SALLPHYS) ||
(flags & ERI_SALLMULTI)))
return (sbp);
}
return (NULL);
}
/* <<<<<<<<<<<<<<<<< PACKET TRANSMIT FUNCTIONS >>>>>>>>>>>>>>>>>>>> */
static int
{
uint32_t i, j;
uint_t start_offset = 0;
uint_t stuff_offset = 0;
struct ether_header *ehp;
if (!param_linkup) {
return (0);
}
}
#ifdef ERI_HWCSUM
if (flags & HCK_PARTIALCKSUM) {
start_offset += sizeof (*ehp);
stuff_offset += sizeof (*ehp);
}
}
#endif /* ERI_HWCSUM */
nmblks++; /* # of mbs */
}
/*
* update MIB II statistics
*/
/*
* ----------------------------------------------------------------------
* here we deal with 3 cases.
* 1. pkt has exactly one mblk
* 2. pkt has exactly two mblks
* 3. pkt has more than 2 mblks. Since this almost
* always never happens, we copy all of them into
* a msh with one mblk.
* for each mblk in the message, we allocate a tmd and
* figure out the tmd index and tmblkp index.
* ----------------------------------------------------------------------
*/
if (pmp)
return (1); /* bad case */
}
} else
/* Check if there are enough descriptors for this packet */
else
if (i > (ERI_TPENDING - 4))
goto notmds;
for (j = 0; j < nmblks; j++) { /* for one or two mb cases */
offset = (i * eri_tx_bcopy_max);
#ifdef ERI_HDX_BUG_WORKAROUND
if ((param_mode) || (eri_hdx_pad_enable == 0)) {
}
} else {
if (len_msg < 97) {
len_msg = 97;
}
}
#endif
offset = (i * eri_tx_bcopy_max);
2 * i, DDI_DMA_SYNC_FORDEV);
if (!j) {
}
} else { /* DDI DMA */
else
&count) != DDI_DMA_MAPPED) {
if (j) { /* free previous DMV resources */
(void) ddi_dma_unbind_handle(
}
} else {
}
if (pmp)
return (1); /* bad case */
}
if (!j) {
}
}
ctrl = 0;
/* first descr of packet */
if (!j) {
}
/* last descr of packet */
if ((j + 1) == nmblks) {
ctrl |= ERI_TMD_EOP;
}
erip->tx_cur_cnt++;
} /* for each nmp */
}
/* will hold reader lock */
} else
}
return (0);
~(ERI_G_MASK_TX_INT_ME));
}
}
if (pmp)
return (1);
}
/*
* Transmit completion reclaiming.
*/
static uint_t
{
int i;
/*
* Loop through each TMD starting from tcurp and upto tcomp.
*/
if (flags & (ERI_TMD_SOP))
/* dvma handle case */
if (bp) {
if (erip->eri_dvmaxh) {
(uint_t)DONT_FLUSH);
} else
/* dma handle case. */
}
reclaimed++;
}
if (reclaimed) {
}
}
return (reclaimed);
}
/* <<<<<<<<<<<<<<<<<<< PACKET RECEIVE FUNCTIONS >>>>>>>>>>>>>>>>>>> */
static void
{
int len;
#ifdef ERI_RCV_CKSUM
#endif /* ERI_RCV_CKSUM */
#ifdef ERI_DONT_STRIP_CRC
len -= 4;
#endif
/*
* In the event of RX FIFO overflow error, ERI REV 1.0 ASIC can
* corrupt packets following the descriptor corresponding the
* overflow. To detect the corrupted packets, we disable the
* dropping of the "bad" packets at the MAC. The descriptor
* then would have the "BAD" bit set. We drop the overflowing
* packet and the packet following it. We could have done some sort
* of checking to determine if the second packet was indeed bad
* (using CRC or checksum) but it would be expensive in this
* routine, since it is run in interrupt context.
*/
"eri_read_dma: Corrupted Packet is Recieved flags %p length %d",
if ((flags & ERI_RMD_BAD) == 0)
}
return;
}
#ifdef ERI_DONT_STRIP_CRC
{
/*
* since we don't let the hardware srip the CRC in hdx
* then the driver needs to do it.
* this is to workaround a hardware bug
*/
/*
* Get the Checksum calculated by the hardware.
*/
/*
* Catch the case when the CRC starts on an odd
* boundary.
*/
}
hw_fcs &= 0xffff;
/*
* Now we can replace what the hardware wrote, make believe
* it got it right in the first place.
*/
}
#endif
/*
* Packet Processing
* Once we get a packet bp, we try allocate a new mblk, nbp
* to replace this one. If we succeed, we map it to the current
* dma handle and update the descriptor with the new cookie. We
* then put bp in our read service queue erip->ipq, if it exists
* or we just bp to the streams expecting it.
* If allocation of the new mblk fails, we implicitly drop the
* current packet, i.e do not pass up the mblk and re-use it.
* Re-mapping is not required.
*/
if (len < eri_rx_bcopy_max) {
/* Add the First Byte offset to the b_rptr */
#ifdef ERI_RCV_CKSUM
#else
#endif
} else {
/*
* mblk allocation has failed. Re-use the old mblk for
* the next packet. Re-mapping is not required since
* the same mblk and dma cookie is to be used again.
*/
"allocb fail");
}
} else {
/*
* How do we harden this, specially if unbind
* succeeds and then bind fails?
* If Unbind fails, we can leave without updating
* the descriptor but would it continue to work on
* next round?
*/
DDI_DMA_DONTWAIT, 0, &c, &ccnt);
/* Add the First Byte offset to the b_rptr */
#ifdef ERI_RCV_CKSUM
#else
#endif
} else {
/*
* mblk allocation has failed. Re-use the old mblk for
* the next packet. Re-mapping is not required since
* the same mblk and dma cookie is to be used again.
*/
"allocb fail");
}
}
}
#ifdef ERI_SERVICE_ROUTINE
static int
{
struct ether_header *ehp;
int len;
/*
* First check if the stream is still there.
* If the stream is detached free all the mblks
*/
}
return (-1);
}
"eri_rsrv: Illegal Size Recieved len %x ", len);
continue;
}
/*
* ERI 1.0 has an address filtering bug in which
* it doesn't do any filtering for the last byte of
* the destination MAC address. Thus packets which
* are not intended for us can go thu. Here we filter
* out these packets. This bug will be fixed in the
* next Spin of the ERI ASIC.
*/
#ifdef ERI_MAC_ADDR_FLTR_BUG
"Host/Destination MAC address mismatch ");
continue;
}
#endif
/*
* update MIB II statistics
*/
#ifdef ERI_RCV_CKSUM
}
#endif /* ERI_RCV_CKSUM */
if (canputnext(q))
else {
}
} else if ((type == ETHERTYPE_IPV6) &&
IS_NOT_MULTICAST(ehp)) {
#ifdef ERI_RCV_CKSUM
}
#endif /* ERI_RCV_CKSUM */
if (canputnext(q))
else {
}
} else {
/*
* Strip the PADs for 802.3
*/
+ sizeof (struct ether_header) + type;
}
}
return (0);
}
#endif /* ERI_SERVICE_ROUTINE */
#define LINK_STAT_DISPLAY_TIME 20
static void
{
if ((erip->linksts_msg) &&
}
}
static int
{
int prop_len = sizeof (int);
int i;
int autoneg_conf;
int anar_100T4_conf;
int ipg0_conf, lance_mode_conf;
erip->xmit_dma_mode = 0;
erip->rcv_dma_mode = 0;
return (-1);
}
/*
* Set up the start-up values for user-configurable parameters
* Get the values from the global variables first.
* Use the MASK to limit the value to allowed maximum.
*/
/*
* The link speed may be forced to either 10 Mbps or 100 Mbps using the
* property "transfer-speed". This may be done in OBP by using the command
* "apply transfer-speed=<speed> <device>". The speed may be either 10 or 100.
*/
== DDI_PROP_SUCCESS) {
"eri_init_xfer_params: transfer-speed property = %X", i);
param_autoneg = 0; /* force speed */
param_anar_100T4 = 0;
param_anar_10fdx = 0;
param_anar_10hdx = 0;
param_anar_100fdx = 0;
param_anar_100hdx = 0;
param_anar_asm_dir = 0;
param_anar_pause = 0;
if (i == 10)
param_anar_10hdx = 1;
else if (i == 100)
param_anar_100hdx = 1;
}
/*
* Get the parameter values configured in .conf file.
*/
== DDI_PROP_SUCCESS) {
"eri_init_xfer_params: ipg1 property %X", ipg1_conf);
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
== DDI_PROP_SUCCESS) {
}
if (link_pulse_disabled)
== DDI_PROP_SUCCESS) {
"eri_init_xfer_params: dis link-pulse prop.");
}
return (0);
}
static struct eri *
{
int instance;
if (eriup)
}
return (NULL);
}
break;
return (erip);
}
static void
{
return;
}
switch (cmd) {
case ERI_ND_GET:
"eri_process_ndd_ioctl:ND_GET");
"ndd_ioctl: _nd_getset nak");
return;
}
"ndd_ioctl: _nd_getset ack");
break;
case ERI_ND_SET:
"eri_process_ndd_ioctl:ND_SET");
param_autoneg = 0xff;
param_anar_asm_dir = 0xff;
param_anar_pause = 0xff;
return;
}
if (old_device != param_device) {
if (new_device != -1 &&
break;
}
return;
}
device = new_device;
return;
}
if (param_autoneg != 0xff) {
"ndd_ioctl: new param_autoneg %d",
param_linkup = 0;
} else {
if ((old_use_int_xcvr != param_use_intphy) ||
(old_default_link != param_default_link) ||
(old_select_link != param_select_link)) {
param_linkup = 0;
} else if ((old_ipg1 != param_ipg1) ||
(old_ipg2 != param_ipg2) ||
(old_ipg0 != param_ipg0) ||
(old_lance_mode != param_lance_mode)) {
param_linkup = 0;
}
}
break;
default:
break;
}
}
static int
{
/*
* Update all the stats by reading all the counter registers.
* Counter register stats are not updated till they overflow
* and interrupt.
*/
}
if (rw == KSTAT_WRITE) {
/*
* MIB II kstat variables
*/
#ifdef kstat
#endif /* kstat */
return (0);
} else {
/*
* MIB II kstat variables
*/
= esp->txmac_maxpkt_err;
= esp->excessive_coll;
= esp->defer_timer_exp;
= esp->peak_attempt_cnt;
= esp->no_free_rx_desc;
= esp->rx_length_err;
= esp->rx_code_viol_err;
= esp->pause_rxcount;
= esp->pause_oncount;
= esp->pause_offcount;
= esp->pause_time_count;
= esp->rx_toolong_pkts;
}
return (0);
}
static void
{
#ifdef kstat
sizeof (struct erikstat) / sizeof (kstat_named_t),
KSTAT_FLAG_PERSISTENT)) == NULL) {
#else
#endif /* kstat */
return;
}
/*
* MIB II kstat variables
*/
"parity_error",
"pci_error_interrupt",
"pci_data_parity_err", KSTAT_DATA_ULONG);
"pci_signal_target_abort", KSTAT_DATA_ULONG);
"pci_rcvd_target_abort", KSTAT_DATA_ULONG);
"pci_rcvd_master_abort", KSTAT_DATA_ULONG);
"pci_signal_system_err", KSTAT_DATA_ULONG);
"pci_det_parity_err", KSTAT_DATA_ULONG);
/*
* 64-bit kstats : PSARC 1997/198
*/
}
/* <<<<<<<<<<<<<<<<<<< PERFORMANCE MEASUREMENT FUNCTIONS >>>>>>>>>>> */
/* The following code is used for performance metering and debugging; */
/* This routine is invoked via "TIME_POINT(label)" macros, which will */
/* store the label and a timestamp. This allows to execution sequences */
/* and timestamps associated with them. */
#ifdef TPOINTS
/* Time trace points */
int time_point_active;
static int time_point_offset, time_point_loc;
#define POINTS 1024
int time_points[POINTS];
void
eri_time_point(int loc)
{
static hrtime_t time_point_base;
if (time_point_base == 0) {
time_point_offset = 0;
} else {
time_point_offset += 2;
if (time_point_offset >= POINTS)
time_point_offset = 0; /* wrap at end */
/* time_point_active = 0; disable at end */
}
}
#else
#define TPOINT(x)
#endif
/* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS >>>>>>>>>>>>>>>>>>> */
/*
*/
/* Free the Named Dispatch Table by calling eri_nd_free */
static void
{
}
/*
* Extracts the value from the eri parameter array and prints the
* parameter value. cp points to the required parameter.
*/
/* ARGSUSED */
static int
{
int param_len = 1;
int ok;
/*
* Calculate space required in mblk.
* Remember to include NULL terminator.
*/
do {
param_len++;
param_val /= 10;
} while (param_val);
if (ok == 0) {
}
return (ok);
}
/*
* Check if there is space for p_val at the end if mblk.
* If not, allocate new 1k mblk.
*/
static int
{
return (ENOMEM);
}
return (0);
}
/*
* Register each element of the parameter array with the
* named dispatch handler. Each element is loaded using
* eri_nd_load()
*/
/* ARGSUSED */
static int
/* cnt gives the count of the number of */
/* elements present in the parameter array */
{
int i, k;
/* First 4 elements are read-only */
for (i = 0, k = 0; i < 4; i++, k++, eripa++)
return (B_FALSE);
}
/* Next 10 elements are read and write */
for (i = 0; i < 10; i++, k++, eripa++)
return (B_FALSE);
}
}
/* next 12 elements are read-only */
for (i = 0; i < 12; i++, k++, eripa++)
return (B_FALSE);
}
/* Next 5 elements are read and write */
for (i = 0; i < 5; i++, k++, eripa++)
return (B_FALSE);
}
}
/* next 2 elements are read-only */
for (i = 0; i < 2; i++, k++, eripa++)
return (B_FALSE);
}
/* Next 2 element is read and write */
for (i = 0; i < 2; i++, k++, eripa++)
return (B_FALSE);
}
}
/* next 1 element is read-only */
for (i = 0; i < 1; i++, k++, eripa++)
return (B_FALSE);
}
/* Next 5 elements are read and write */
for (i = 0; i < 5; i++, k++, eripa++)
return (B_FALSE);
}
}
/* next 10 elements are read-only */
for (i = 0; i < 10; i++, k++, eripa++)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Sets the eri parameter to the value in the param_register using
* eri_nd_load().
*/
/* ARGSUSED */
static int
{
char *end;
return (EINVAL);
}
return (0);
}
/* Free the table pointed to by 'ndp' */
static void
{
}
}
static int
{
int err;
char *valp;
if (!nd_param)
return (B_FALSE);
return (B_TRUE);
}
/*
* NOTE - logic throughout nd_xxx assumes single data block for ioctl.
* However, existing code sends in some big buffers.
*/
}
return (B_FALSE);
break;
}
while (*valp++)
;
case ND_GET:
/*
* (XXX) hack: "*valp" is size of user buffer for copyout. If result
* of action routine is too big, free excess and return ioc_rval as buf
* size needed. Return as many mblocks as will fit, free the rest. For
* backward compatibility, assume size of orig ioctl buffer if "*valp"
* bad or not given.
*/
if (valp)
{
while (mp2) {
}
}
if (!err) {
/* Tack on the null */
if (!err) {
if (excess > 0) {
&nmp, 1);
if (!err)
else
size_out = 0;
}
} else
size_out = 0;
}
break;
case ND_SET:
if (valp) {
== 0)) {
}
}
break;
default:
"nd_getset: cmd is default");
break;
}
return (B_TRUE);
}
/*
* Load 'name' into the named dispatch table pointed to by 'ndp'.
* 'ndp' should be the address of a char pointer cell. If the table
* does not exist (*ndp == 0), a new table is allocated and 'ndp'
* is stuffed. If there is not enough space in the table for a new
* entry, more space is allocated.
*/
static boolean_t
{
if (!nd_pparam)
return (B_FALSE);
== NULL)
return (B_FALSE);
}
goto fill_it;
}
}
return (B_FALSE);
} else {
nd->nd_free_count--;
}
}
;
nd->nd_free_count--;
return (B_TRUE);
}
/*
* Hardening Functions
* New Section
*/
#ifdef DEBUG
/*VARARGS*/
/* ARGSUSED */
static void
char *file,
int line,
char *fmt, ...)
{
char msg_buffer[255];
static kmutex_t eridebuglock;
static int eri_debug_init = 0;
if (!eri_debug_level)
return;
if (eri_debug_init == 0) {
/*
* Block I/O interrupts
*/
eri_debug_init = 1;
}
if (eri_msg_out & ERI_CON_MSG) {
if (erip)
else
line, msg_buffer);
}
}
}
#endif
/* VARARGS */
/* ARGSUSED */
static void
{
char msg_buffer[255];
return;
}
if (severity == SEVERITY_HIGH) {
} else switch (type) {
case ERI_VERB_MSG:
break;
case ERI_LOG_MSG:
break;
case ERI_BUF_MSG:
break;
case ERI_CON_MSG:
default:
break;
}
}
/*
* Transceiver (xcvr) Functions
* New Section
*/
/*
* eri_stop_timer function is used by a function before doing link-related
* processing. It locks the "linklock" to protect the link-related data
* structures. This lock will be subsequently released in eri_start_timer().
*/
static void
{
}
}
/*
* If msec parameter is zero, just release "linklock".
*/
static void
{
if (msec) {
}
}
}
static int
{
int status;
int old_transceiver;
"cfg value = %X", cfg);
/*
* An External Transceiver was found and it takes priority
* over an internal, given the use_int_xcvr flag
* is false.
*/
if (old_transceiver != EXTERNAL_XCVR) {
/*
* External transceiver has just been plugged
* in. Isolate the internal Transceiver.
*/
if (old_transceiver == INTERNAL_XCVR) {
}
}
/*
* Select the external Transceiver.
*/
} else if (cfg & ERI_MIF_CFGM0) {
/*
* An Internal Transceiver was found or the
* use_int_xcvr flag is true.
*/
if (old_transceiver != INTERNAL_XCVR) {
/*
* The external transceiver has just been
* disconnected or we're moving from a no
* transceiver state.
*/
if ((old_transceiver == EXTERNAL_XCVR) &&
(cfg & ERI_MIF_CFGM0)) {
}
}
/*
* Select the internal transceiver.
*/
} else {
/*
* Did not find a valid xcvr.
*/
"Eri_new_xcvr : Select None");
}
(void *)4000) == DDI_SUCCESS)
}
return (status);
}
/*
* Compare our xcvr in our structure to the xcvr that we get from
* eri_check_mii_xcvr(). If they are different then mark the
* link down, reset xcvr, and return.
*
* Note without the MII connector, conditions can not change that
* will then use a external phy, thus this code has been cleaned
* to not even call the function or to possibly change the xcvr.
*/
/* ARGSUSED */
static void
{
if (erip->openloop_autoneg) {
"eri_check_link:openloop stat %X mii_status %X",
if (!(stat & PHY_BMSR_LNKSTS) &&
if (param_speed) {
control &= ~PHY_BMCR_100M;
param_anlpar_100hdx = 0;
param_anlpar_10hdx = 1;
param_speed = 0;
} else {
control |= PHY_BMCR_100M;
param_anlpar_100hdx = 1;
param_anlpar_10hdx = 0;
param_speed = 1;
}
"eri_check_link: trying speed %X stat %X",
param_speed, stat);
erip->openloop_autoneg ++;
} else {
erip->openloop_autoneg = 0;
if (erip->openloop_autoneg)
}
return;
}
#ifdef ERI_RMAC_HANG_WORKAROUND
/*
* Check if rx hung.
*/
if (erip->check_rmac_hang) {
"check1 %d: macsm:%8x wr:%2x rd:%2x",
erip->check_rmac_hang = 0;
erip->check2_rmac_hang ++;
return;
}
if (erip->check2_rmac_hang) {
"check2 %d: macsm:%8x wr:%2x rd:%2x",
erip->check2_rmac_hang = 0;
"RX hang: Reset mac");
return;
}
}
}
#endif
/*
* Check if tx hung.
*/
#ifdef ERI_TX_HUNG
#ifdef LATER_SPLIT_TX_RX
#endif
return;
}
#endif
#ifdef ERI_PM_WORKAROUND
(void *)4000) == DDI_SUCCESS)
}
#endif
else
}
static void
{
int restart_autoneg = 0;
"eri_mif_check: mif_mask: %X, %X, %X",
/*
* Now check if someone has pulled the xcvr or
* a new xcvr has shown up
* If so try to find out what the new xcvr setup is.
*/
(param_transceiver == NO_XCVR)) {
"No status transceiver gone");
if (eri_new_xcvr(erip)) {
if (param_transceiver != NO_XCVR) {
/*
* Reset the new PHY and bring up the link
*/
(void) eri_reset_xcvr(erip);
}
}
return;
}
mif_ints |= PHY_BMSR_ANC;
"eri_mif_check: Set ANC bit mif_data %X mig_ints %X",
}
"Auto-negotiation interrupt.");
/*
* Switch off Auto-negotiation interrupts and switch on
* Link ststus interrupts.
*/
"parallel detection fault");
/*
* Consider doing open loop auto-negotiation.
*/
"Going into Open loop Auto-neg");
if (param_anar_100fdx || param_anar_100hdx) {
control |= PHY_BMCR_100M;
param_anlpar_100hdx = 1;
param_anlpar_10hdx = 0;
param_speed = 1;
} else if (param_anar_10fdx ||
control &= ~PHY_BMCR_100M;
param_anlpar_100hdx = 0;
param_anlpar_10hdx = 1;
param_speed = 0;
} else {
"Transceiver speed set incorrectly.");
return;
}
param_anlpar_100fdx = 0;
param_anlpar_10fdx = 0;
param_mode = 0;
return;
}
"an_common = 0x%X", an_common);
param_speed = 1;
param_speed = 0;
} else an_common = 0x0;
if (!an_common) {
"Transceiver: anar"
" not set with speed selection");
}
"Link duplex = 0x%X", param_mode);
"Link speed = 0x%X", param_speed);
/* mif_ints |= PHY_BMSR_LNKSTS; prevent double msg */
/* mif_data |= PHY_BMSR_LNKSTS; prevent double msg */
}
if (mif_ints & PHY_BMSR_LNKSTS) {
if (mif_data & PHY_BMSR_LNKSTS) {
/*
* Program Lu3X31T for mininum transition
*/
if (eri_phy_mintrans) {
}
/*
* The link is up.
*/
param_linkup = 1;
if (param_mode)
else
} else {
"Link down.");
param_linkup = 0;
if (param_autoneg) {
restart_autoneg = 1;
}
}
} else {
if (mif_data & PHY_BMSR_LNKSTS) {
if (!param_linkup) {
"eri_mif_check: MIF data link up");
/*
* Program Lu3X31T for minimum transition
*/
if (eri_phy_mintrans) {
(void) eri_mii_read(erip, 0,
&old_mintrans);
}
/*
* The link is up.
*/
param_linkup = 1;
if (param_mode)
else
}
} else if (param_linkup) {
/*
* The link is down now.
*/
"eri_mif_check:Link was up and went down");
param_linkup = 0;
if (param_autoneg)
restart_autoneg = 1;
}
}
if (restart_autoneg) {
/*
* Restart normal auto-negotiation.
*/
"eri_mif_check:Restart AUto Negotiation");
erip->openloop_autoneg = 0;
param_mode = 0;
param_speed = 0;
param_anlpar_100T4 = 0;
param_anlpar_100fdx = 0;
param_anlpar_100hdx = 0;
param_anlpar_10fdx = 0;
param_anlpar_10hdx = 0;
param_aner_lpancap = 0;
&control);
}
if (mif_ints & PHY_BMSR_JABDET) {
if (mif_data & PHY_BMSR_JABDET) {
"Jabber detected.");
/*
* Reset the new PHY and bring up the link
*/
(void) eri_reset_xcvr(erip);
/*
* eri_FAULT_MSG1(erip, SEVERITY_NONE, XCVR_MSG,
* "Unable to reset transceiver.");
*/
}
}
}
#define PHYRST_PERIOD 500
static int
{
int n;
"eri_reset_xcvr:ifspeed %X param_speed %X mif_mask %X",
#endif
/*
* Reset Open loop auto-negotiation this means you can try
* Normal auto-negotiation, until you get a Multiple Link fault
* at which point you try 100M half duplex then 10M half duplex
* until you get a Link up.
*/
erip->openloop_autoneg = 0;
/*
* Reset the xcvr.
*/
/* Check for transceiver reset completion */
n = 1000;
while (--n > 0) {
/* Transceiver does not talk MII */
"eri_reset_xcvr: no mii");
}
if ((control & PHY_BMCR_RESET) == 0)
goto reset_done;
}
"eri_reset_xcvr:reset_failed n == 0, control %x",
control);
goto eri_reset_xcvr_failed;
"eri_reset_xcvr: reset complete in %d us",
(1000 - n) * PHYRST_PERIOD);
"eri_reset_xcvr: control %x stat %x anar %x",
/*
* Initialize the read only transceiver ndd information
* the values are either 0 or 1.
*/
/*
* Match up the ndd capabilities with the transceiver.
*/
/*
* Select the operation mode of the transceiver.
*/
if (param_autoneg) {
"Phy Supports Auto-negotiation.");
/*
* Initialize our auto-negotiation capabilities.
*/
anar = PHY_SELECTOR;
if (param_anar_100T4)
anar |= PHY_ANAR_T4;
if (param_anar_100fdx)
anar |= PHY_ANAR_TXFDX;
if (param_anar_100hdx)
anar |= PHY_ANAR_TX;
if (param_anar_10fdx)
anar |= PHY_ANAR_10FDX;
if (param_anar_10hdx)
anar |= PHY_ANAR_10;
"anar = %x", anar);
} else {
"Phy Doesn't support Auto-negotiation.");
}
/* Place the Transceiver in normal operation mode */
(control & ~PHY_BMCR_ISOLATE));
}
/*
* If Lu3X31T then allow nonzero eri_phy_mintrans
*/
if (eri_phy_mintrans &&
eri_phy_mintrans = 0;
}
/*
* Initialize the mif interrupt mask.
*/
/*
* Establish link speeds and do necessary special stuff based
* in the speed.
*/
"eri_reset_xcvr: speed_100 %d speed_10 %d",
if (!(param_anar_10fdx) &&
(param_anar_10hdx) &&
(erip->link_pulse_disabled)) {
param_speed = 0;
param_mode = 0;
nicr &= ~PHY_NICR_LD;
param_linkup = 1;
if (param_mode)
else
}
}
/*
* Clear the autonegotitation before re-starting
*/
/* eri_mii_write(erip, ERI_PHY_BMCR, control); */
if (param_autoneg) {
/*
* Setup the transceiver for autonegotiation.
*/
"Setup for Auto-negotiation");
/*
* Clear the Auto-negotiation before re-starting
*/
/*
* Switch on auto-negotiation.
*/
} else {
/*
* Force the transceiver.
*/
"Setup for forced mode");
/*
* Switch off auto-negotiation.
*/
if (speed_100) {
control |= PHY_BMCR_100M;
param_aner_lpancap = 0; /* Clear LP nway */
param_anlpar_10fdx = 0;
param_anlpar_10hdx = 0;
param_speed = 1;
if (param_mode) {
param_anlpar_100hdx = 0;
} else {
}
} else if (speed_10) {
control &= ~PHY_BMCR_100M;
param_aner_lpancap = 0; /* Clear LP nway */
param_anlpar_100fdx = 0;
param_anlpar_100hdx = 0;
param_anlpar_100T4 = 0;
param_speed = 0;
if (param_mode) {
param_anlpar_10hdx = 0;
} else {
}
} else {
"Transceiver speed set incorrectly.");
}
if (param_mode) {
control |= PHY_BMCR_FDX;
}
"control = %x status = %x param_mode %d",
/*
* if (param_mode) {
* control |= PHY_BMCR_FDX;
* }
* control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
* eri_mii_write(erip, ERI_PHY_BMCR, control);
*/
}
#ifdef DEBUG
#endif
return (0);
return (1);
}
static void
{
if (!param_autoneg && !param_linkup &&
(param_anar_10fdx | param_anar_10hdx)) {
return;
}
if (!param_autoneg && !param_linkup &&
/*
* May have to set link partner's speed and mode.
*/
"May have to set link partner's speed and duplex mode.");
}
}
#endif
static void
{
char link_up_msg[64];
"eri_display_link_status: ifspeed %X param_mode %d",
case SPEED_100:
break;
case SPEED_10:
break;
default:
}
if (param_mode)
else
}
static void
{
if (enable == MIF_POLL_START) {
if (erip->mifpoll_enable &&
!erip->openloop_autoneg) {
"Starting mif poll: normal start");
} else {
"Starting mif poll:fault start");
}
} else if (enable == MIF_POLL_STOP) {
"Stopping mif poll");
}
}
/*
* This function is cut&pasted from mi.c, part of IP source base.
* By defining this function in eri, we remove dependency from ip module.
* This function can be removed once kernel level 'strtol' becomes available.
*/
static long
{
char *cp;
int digits;
long value;
cp++;
if (is_negative)
cp++;
if (base == 0) {
base = 10;
if (*cp == '0') {
base = 8;
cp++;
base = 16;
cp++;
}
}
}
value = 0;
else
break;
break;
}
/*
* Note: we cast away const here deliberately
*/
if (is_negative)
return (value);
}
#ifdef XCVR
static int
{
switch (vendor_id) {
case PHY_VENDOR_LUCENT:
if (display_msg)
break;
/*
* This can happen if the xcvr is changed after the attach of a
* I/O board or a future NIC.
*/
default:
erip->device_rev = 0;
param_linkup = 0;
return (1);
}
return (0);
}
#endif
/* Decide if transmitter went dead and reinitialize everything */
#ifdef ERI_TX_HUNG
static int eri_txhung_limit = 2;
static int
{
/* Something needs to be sent out but it is not going out */
else
}
#endif
/* Process a DL_CAPABILITY_REQ */
static void
{
int prim;
if (icap->dl_sub_length == 0) {
/* IP wants to have a list of the capabilities we support. */
"for all capabilities");
return;
}
} else {
/*
* IP is probably trying to enable or disable one or more
* capabilities. Reuse received mp to construct reply.
*/
"DL_CAPABILITY_REQ\n");
}
/* send reply back up */
}
static mblk_t *
{
/* Size of reply to send back up, say we support hardware checksum */
size = sizeof (dl_capability_ack_t) +
sizeof (dl_capability_sub_t) +
sizeof (dl_capab_hcksum_t);
/* allocate result mblk and get it started */
return (NULL);
/* update mblk info */
/* dl_capability_ack_t, one per message */
/* dl_capability_sub_t for hardware checksum offload */
/* dl_capab_hcksum_t */
/* tell ip that we're capable, but don't enable until ip says so */
return (nmp);
}
/* Process a non-zero length DL_CAPABILITY_REQ message */
static void
{
/* Make sure that IP supplied correct dl_sub_length */
"DL_CAPABILITY_REQ, invalid dl_sub_length (%d)\n",
return;
}
/*
* There are sub-capabilities. Process the ones we know about.
* Loop until we don't have room for another sub-cap header..
*/
case DL_CAPAB_HCKSUM:
"eri_dlcap_enable: malformed "
"sub-capability too long for mblk");
break;
}
else
break;
default:
/* Unknown sub-capability; ignore it */
break;
}
}
}