bfe.c revision 954c6b5ec18168de579023aa3735121108026e9b
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/ethernet.h>
#include <sys/sysmacros.h>
#include <sys/dditypes.h>
#include <sys/byteorder.h>
#include <sys/mac_provider.h>
#include <sys/mac_ether.h>
#include "bfe.h"
#include "bfe_hw.h"
/*
* Broadcom BCM4401 chipsets use two rings :
*
* - One TX : For sending packets down the wire.
* - One RX : For receving packets.
*
* Each ring can have any number of descriptors (configured during attach).
* has address (desc_addr) and control (desc_ctl) which holds a DMA buffer for
* The descriptor table is allocated first and then a DMA buffer (for a packet)
* is allocated and linked to each descriptor.
*
* interrupt, the stat register will point to current descriptor being
* processed.
*
* Here's an example of TX and RX ring :
*
* TX:
*
* Base of the descriptor table is programmed using BFE_DMATX_CTRL control
* register. Each 'addr' points to DMA buffer (or packet data buffer) to
* be transmitted and 'ctl' has the length of the packet (usually MTU).
*
* ----------------------|
* | addr |Descriptor 0 |
* | ctl | |
* ----------------------|
* | addr |Descriptor 1 | SOF (start of the frame)
* | ctl | |
* ----------------------|
* | ... |Descriptor... | EOF (end of the frame)
* | ... | |
* ----------------------|
* | addr |Descritor 127 |
* | ctl | EOT | EOT (End of Table)
* ----------------------|
*
* 'r_curr_desc' : pointer to current descriptor which can be used to transmit
* a packet.
* 'r_avail_desc' : decremented whenever a packet is being sent.
* 'r_cons_desc' : incremented whenever a packet is sent down the wire and
* notified by an interrupt to bfe driver.
*
* RX:
*
* Base of the descriptor table is programmed using BFE_DMARX_CTRL control
* register. Each 'addr' points to DMA buffer (or packet data buffer). 'ctl'
* contains the size of the DMA buffer and all the DMA buffers are
* pre-allocated during attach and hence the maxmium size of the packet is
* also known (r_buf_len from the bfe_rint_t structure). During RX interrupt
* the packet length is embedded in bfe_header_t which is added by the
* chip in the beginning of the packet.
*
* ----------------------|
* | addr |Descriptor 0 |
* | ctl | |
* ----------------------|
* | addr |Descriptor 1 |
* | ctl | |
* ----------------------|
* | ... |Descriptor... |
* | ... | |
* ----------------------|
* | addr |Descriptor 127|
* | ctl | EOT | EOT (End of Table)
* ----------------------|
*
* 'r_curr_desc' : pointer to current descriptor while receving a packet.
*
*/
#define MODULE_NAME "bfe"
/*
* Used for checking PHY (link state, speed)
*/
/*
* Chip restart action and reason for restart
*/
static char bfe_ident[] = "bfe driver for Broadcom BCM4401 chipsets";
/*
* Function Prototypes for bfe driver.
*/
static int bfe_check_link(bfe_t *);
static void bfe_report_link(bfe_t *);
static void bfe_chip_halt(bfe_t *);
static void bfe_chip_reset(bfe_t *);
static void bfe_tx_desc_init(bfe_ring_t *);
static void bfe_rx_desc_init(bfe_ring_t *);
static void bfe_set_rx_mode(bfe_t *);
static void bfe_enable_chip_intrs(bfe_t *);
static void bfe_chip_restart(bfe_t *);
static void bfe_init_vars(bfe_t *);
static void bfe_clear_stats(bfe_t *);
static void bfe_gather_stats(bfe_t *);
static void bfe_error(dev_info_t *, char *, ...);
const void *);
static int bfe_tx_reclaim(bfe_ring_t *);
int bfe_mac_set_ether_addr(void *, const uint8_t *);
/*
* Macros for ddi_dma_sync().
*/
#define SYNC_DESC(r, s, l, d) \
(void) ddi_dma_sync(r->r_desc_dma_handle, \
(off_t)(s * sizeof (bfe_desc_t)), \
(size_t)(l * sizeof (bfe_desc_t)), \
d)
#define SYNC_BUF(r, s, b, l, d) \
/*
* Supported Broadcom BCM4401 Cards.
*/
static bfe_cards_t bfe_cards[] = {
{ 0x14e4, 0x170c, "BCM4401 100Base-TX"},
};
/*
* DMA attributes for device registers, packet data (buffer) and
* descriptor table.
*/
static struct ddi_device_acc_attr bfe_dev_attr = {
};
static struct ddi_device_acc_attr bfe_buf_attr = {
DDI_NEVERSWAP_ACC, /* native endianness */
};
static ddi_dma_attr_t bfe_dma_attr_buf = {
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo */
0x1fff, /* dma_attr_count_max */
8, /* dma_attr_align */
0, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
0x1fff, /* dma_attr_maxxfer */
1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0 /* dma_attr_flags */
};
static ddi_dma_attr_t bfe_dma_attr_desc = {
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo */
BFE_DESC_ALIGN, /* dma_attr_align */
0, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0 /* dma_attr_flags */
};
/*
* Ethernet broadcast addresses.
*/
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
#define ASSERT_ALL_LOCKS(bfe) { \
}
/*
* Debugging and error reproting code.
*/
static void
{
char buf[256];
if (dip) {
} else {
}
}
/*
* Grabs all necessary locks to block any other operation on the chip.
*/
static void
{
/*
* Grab all the locks.
* - bfe_rwlock : locks down whole chip including RX.
* - tx's r_lock : locks down only TX side.
*/
/*
* Note that we don't use RX's r_lock.
*/
}
/*
*/
static void
{
/*
* Release all the locks in the order in which they were grabbed.
*/
}
/*
* It's used to make sure that the write to device register was successful.
*/
static int
{
ulong_t i;
uint32_t v;
for (i = 0; i < t; i++) {
break;
break;
drv_usecwait(10);
}
/* if device still didn't see the value */
if (i == t)
return (-1);
return (0);
}
/*
* PHY functions (read, write, stop, reset and startup)
*/
static int
{
(reg << BFE_MDIO_RA_SHIFT) |
(BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
}
static void
{
(reg << BFE_MDIO_RA_SHIFT) |
(val & BFE_MDIO_DATA_DATA)));
}
/*
* It resets the PHY layer.
*/
static int
{
uint32_t i;
drv_usecwait(100);
for (i = 0; i < 10; i++) {
drv_usecwait(500);
continue;
}
break;
}
if (i == 10) {
return (BFE_FAILURE);
}
return (BFE_SUCCESS);
}
/*
* Make sure timer function is out of our way and especially during
* detach.
*/
static void
{
if (bfe->bfe_periodic_id) {
}
}
/*
* Stops the PHY
*/
static void
{
/*
* Report the link status to MAC layer.
*/
(void) bfe_report_link(bfe);
}
static int
{
int phy;
if (bfe->bfe_phy_addr) {
return (BFE_SUCCESS);
}
}
return (BFE_SUCCESS);
}
}
return (BFE_FAILURE);
}
/*
* This timeout function fires at BFE_TIMEOUT_INTERVAL to check the link
* status.
*/
static void
bfe_timeout(void *arg)
{
int resched = 0;
/*
* We don't grab any lock because bfe can't go away.
* untimeout() will wait for this timeout instance to complete.
*/
/*
* Restart the chip.
*/
/* Restart will register a new timeout */
return;
}
if (bfe->bfe_tx_stall_time != 0 &&
char *, "pkt timeout");
bfe->bfe_chip_action |=
bfe->bfe_tx_stall_time = 0;
}
}
/*
* Report the link status to MAC layer if link status changed.
*/
if (bfe_check_link(bfe)) {
val &= ~BFE_TX_DUPLEX;
val |= BFE_TX_DUPLEX;
flow &= ~BFE_RXCONF_FLOW;
flow &= ~(BFE_FLOW_RX_HIWAT);
}
resched = 1;
}
}
}
if (resched)
}
/*
* Starts PHY layer.
*/
static int
{
int prog, s;
return (BFE_FAILURE);
}
(void) bfe_reset_phy(bfe);
anar &= ~(MII_ABILITY_100BASE_T4 |
/*
* Supported hardware modes are in bmsr.
*/
/*
* Assume no capabilities are supported in the hardware.
*/
/*
* Assume property is set.
*/
s = 1;
/*
* Property is not set which means bfe_mac_setprop()
* is not called on us.
*/
s = 0;
}
if (bmsr & MII_STATUS_100_BASEX_FD) {
if (s == 0) {
prog++;
} else if (bfe->bfe_adv_100fdx) {
prog++;
}
}
if (bmsr & MII_STATUS_100_BASE_T4) {
if (s == 0) {
prog++;
} else if (bfe->bfe_adv_100T4) {
prog++;
}
}
if (bmsr & MII_STATUS_100_BASEX) {
if (s == 0) {
prog++;
} else if (bfe->bfe_adv_100hdx) {
prog++;
}
}
if (bmsr & MII_STATUS_10_FD) {
if (s == 0) {
prog++;
} else if (bfe->bfe_adv_10fdx) {
prog++;
}
}
if (bmsr & MII_STATUS_10) {
if (s == 0) {
prog++;
} else if (bfe->bfe_adv_10hdx) {
prog++;
}
}
if (bmsr & MII_STATUS_CANAUTONEG) {
if (s == 0) {
}
}
if (prog == 0) {
if (s == 0) {
"No valid link mode selected. Powering down PHY");
return (BFE_FAILURE);
}
/*
* If property is set then user would have goofed up. So we
* go back to default properties.
*/
goto again;
}
} else {
if (bfe->bfe_adv_100fdx)
else if (bfe->bfe_adv_100hdx)
else if (bfe->bfe_adv_10fdx)
else
bmcr = 0; /* 10HDX */
}
if (prog)
if (bmcr)
}
return (BFE_SUCCESS);
}
/*
* Reports link status back to MAC Layer.
*/
static void
{
}
/*
*/
static int
{
/*
* If exp register is not present in PHY.
*/
bfe->bfe_mii_exp = 0;
}
if ((bmsr & MII_STATUS_LINKUP) == 0) {
goto done;
}
if (!(bmcr & MII_CONTROL_ANE)) {
/* Forced mode */
if (bmcr & MII_CONTROL_100MB)
else
if (bmcr & MII_CONTROL_FDUPLEX)
else
} else if ((!(bmsr & MII_STATUS_CANAUTONEG)) ||
(!(bmsr & MII_STATUS_ANDONE))) {
} else {
}
done:
/*
* If speed or link status or duplex mode changed then report to
* MAC layer which is done by the caller.
*/
return (1);
}
return (0);
}
static void
{
uint32_t v;
v |= (uint32_t)d[5];
v = (BFE_CAM_HI_VALID |
(((uint32_t)d[0]) << 8) |
(((uint32_t)d[1])));
}
/*
* Chip related functions (halt, reset, start).
*/
static void
{
/*
* Disables interrupts.
*/
/*
* Wait until TX and RX finish their job.
*/
/*
* Disables DMA engine.
*/
drv_usecwait(10);
}
static void
{
int, bfe->bfe_chip_action);
/*
* Halt chip and PHY.
*/
/*
* Init variables.
*/
/*
* Reset chip and start PHY.
*/
/*
* DMA descriptor rings.
*/
}
/*
* Disables core by stopping the clock.
*/
static void
{
return;
drv_usecwait(10);
drv_usecwait(10);
}
/*
* Resets core.
*/
static void
{
/*
* First disable the core.
*/
drv_usecwait(1);
drv_usecwait(1);
drv_usecwait(1);
}
static void
{
/*
* Change bar0 window to map sbtopci registers.
*/
/* Just read it and don't do anything */
/*
* Restore bar0 window mapping.
*/
}
/*
* Resets chip and starts PHY.
*/
static void
{
/* Set the interrupt vector for the enet core */
/* check if core is up */
10, 0);
}
}
}
/* Program DMA channels */
/*
* DMA addresses need to be added to BFE_PCI_DMA
*/
(void) bfe_startup_phy(bfe);
}
/*
* It enables interrupts. Should be the last step while starting chip.
*/
static void
{
/* Enable the chip and core */
/* Enable interrupts */
}
/*
* Common code to take care of setting RX side mode (filter).
*/
static void
{
int i;
/*
* We don't touch RX filter if we were asked to suspend. It's fine
* if chip is not active (no interface is plumbed on us).
*/
return;
val &= ~BFE_RXCONF_PROMISC;
val &= ~BFE_RXCONF_DBCAST;
val &= ~BFE_RXCONF_DBCAST;
} else {
/* Flush everything */
}
/* Disable CAM */
/*
* We receive all multicast packets.
*/
for (i = 0; i < BFE_MAX_MULTICAST_TABLE - 1; i++) {
}
/* Enable CAM */
}
int, val);
}
/*
* Reset various variable values to initial state.
*/
static void
{
/* Initial assumption */
bfe->bfe_tx_stall_time = 0;
}
/*
* Initializes TX side descriptor entries (bfe_desc_t). Each descriptor entry
* has control (desc_ctl) and address (desc_addr) member.
*/
static void
{
int i;
uint32_t v;
for (i = 0; i < r->r_ndesc; i++) {
/*
* DMA addresses need to be added to BFE_PCI_DMA
*/
}
v | BFE_DESC_EOT);
r->r_curr_desc = 0;
r->r_avail_desc = TX_NUM_DESC;
r->r_cons_desc = 0;
}
/*
* Initializes RX side descriptor entries (bfe_desc_t). Each descriptor entry
* has control (desc_ctl) and address (desc_addr) member.
*/
static void
{
int i;
uint32_t v;
for (i = 0; i < r->r_ndesc; i++) {
/* Initialize rx header (len, flags) */
(void) SYNC_BUF(r, i, 0, sizeof (bfe_rx_header_t),
}
v | BFE_DESC_EOT);
/* TAIL of RX Descriptor */
r->r_curr_desc = 0;
r->r_avail_desc = RX_NUM_DESC;
}
static int
{
/*
* Stop the chip first & then Reset the chip. At last enable interrupts.
*/
/*
* Reset chip and start PHY.
*/
/*
* Initailize Descriptor Rings.
*/
/* Check link, speed and duplex mode */
(void) bfe_check_link(bfe);
return (DDI_SUCCESS);
}
/*
* Clear chip statistics.
*/
static void
{
ulong_t r;
/*
* Stat registers are cleared by reading.
*/
}
/*
* Collect chip statistics.
*/
static void
{
ulong_t r;
uint32_t *v;
v++;
}
v++;
}
/*
* TX :
* -------
* tx_good_octets, tx_good_pkts, tx_octets
* tx_pkts, tx_broadcast_pkts, tx_multicast_pkts
* tx_len_64, tx_len_65_to_127, tx_len_128_to_255
* tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max
* tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts
* tx_underruns, tx_total_cols, tx_single_cols
* tx_multiple_cols, tx_excessive_cols, tx_late_cols
* tx_defered, tx_carrier_lost, tx_pause_pkts
*
* RX :
* -------
* rx_good_octets, rx_good_pkts, rx_octets
* rx_pkts, rx_broadcast_pkts, rx_multicast_pkts
* rx_len_64, rx_len_65_to_127, rx_len_128_to_255
* rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max
* rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts
* rx_missed_pkts, rx_crc_align_errs, rx_undersize
* rx_crc_errs, rx_align_errs, rx_symbol_errs
* rx_pause_pkts, rx_nonpause_pkts
*/
/* txerr += bfe->bfe_hw_stats.tx_carrier_lost; */
}
/*
* Gets the state for dladm command and all.
*/
int
{
uint64_t v;
int err = 0;
switch (stat) {
default:
break;
case MAC_STAT_IFSPEED:
/*
* MAC layer will ask for IFSPEED first and hence we
* collect it only once.
*/
/*
* Update stats from the hardware.
*/
}
break;
case ETHER_STAT_ADV_CAP_100T4:
v = bfe->bfe_adv_100T4;
break;
break;
break;
case ETHER_STAT_ADV_CAP_10FDX:
break;
case ETHER_STAT_ADV_CAP_10HDX:
break;
v = 0;
break;
v = bfe->bfe_adv_aneg;
break;
case ETHER_STAT_ADV_CAP_PAUSE:
break;
case ETHER_STAT_ADV_REMFAULT:
break;
case ETHER_STAT_ALIGN_ERRORS:
/* MIB */
break;
case ETHER_STAT_CAP_100T4:
break;
case ETHER_STAT_CAP_100FDX:
break;
case ETHER_STAT_CAP_100HDX:
break;
case ETHER_STAT_CAP_10FDX:
break;
case ETHER_STAT_CAP_10HDX:
break;
case ETHER_STAT_CAP_ASMPAUSE:
v = 0;
break;
case ETHER_STAT_CAP_AUTONEG:
break;
case ETHER_STAT_CAP_PAUSE:
v = 1;
break;
case ETHER_STAT_CAP_REMFAULT:
break;
break;
case ETHER_STAT_JABBER_ERRORS:
break;
case ETHER_STAT_DEFER_XMTS:
break;
case ETHER_STAT_EX_COLLISIONS:
/* MIB */
break;
case ETHER_STAT_FCS_ERRORS:
/* MIB */
break;
/* MIB */
break;
case ETHER_STAT_LINK_ASMPAUSE:
v = 0;
break;
case ETHER_STAT_LINK_AUTONEG:
break;
case ETHER_STAT_LINK_DUPLEX:
break;
case ETHER_STAT_LP_CAP_100T4:
break;
case ETHER_STAT_LP_CAP_100FDX:
break;
case ETHER_STAT_LP_CAP_100HDX:
break;
case ETHER_STAT_LP_CAP_10FDX:
break;
case ETHER_STAT_LP_CAP_10HDX:
break;
v = 0;
break;
break;
case ETHER_STAT_LP_CAP_PAUSE:
break;
case ETHER_STAT_LP_REMFAULT:
break;
case ETHER_STAT_MACRCV_ERRORS:
break;
case ETHER_STAT_MACXMT_ERRORS:
break;
break;
case ETHER_STAT_SQE_ERRORS:
break;
break;
break;
break;
case ETHER_STAT_XCVR_ADDR:
v = bfe->bfe_phy_addr;
break;
case ETHER_STAT_XCVR_ID:
v = bfe->bfe_phy_id;
break;
case MAC_STAT_BRDCSTRCV:
break;
case MAC_STAT_BRDCSTXMT:
break;
case MAC_STAT_MULTIXMT:
break;
case MAC_STAT_COLLISIONS:
break;
case MAC_STAT_IERRORS:
break;
case MAC_STAT_IPACKETS:
break;
case MAC_STAT_MULTIRCV:
break;
case MAC_STAT_NORCVBUF:
break;
case MAC_STAT_NOXMTBUF:
break;
case MAC_STAT_OBYTES:
break;
case MAC_STAT_OERRORS:
/* MIB */
break;
case MAC_STAT_OPACKETS:
break;
case MAC_STAT_RBYTES:
break;
case MAC_STAT_UNDERFLOWS:
break;
case MAC_STAT_OVERFLOWS:
break;
}
*val = v;
return (err);
}
/*ARGSUSED*/
int
{
int err = 0;
if (sz == 0)
return (EINVAL);
*perm = MAC_PROP_PERM_RW;
switch (num) {
case MAC_PROP_DUPLEX:
if (sz >= sizeof (link_duplex_t)) {
sizeof (link_duplex_t));
} else {
}
break;
case MAC_PROP_SPEED:
} else {
}
break;
case MAC_PROP_AUTONEG:
break;
case MAC_PROP_ADV_100FDX_CAP:
break;
case MAC_PROP_EN_100FDX_CAP:
break;
case MAC_PROP_ADV_100HDX_CAP:
break;
case MAC_PROP_EN_100HDX_CAP:
break;
case MAC_PROP_ADV_10FDX_CAP:
break;
case MAC_PROP_EN_10FDX_CAP:
break;
case MAC_PROP_ADV_10HDX_CAP:
break;
case MAC_PROP_EN_10HDX_CAP:
break;
case MAC_PROP_ADV_100T4_CAP:
break;
case MAC_PROP_EN_100T4_CAP:
break;
default:
}
return (err);
}
/*ARGSUSED*/
int
const void *val)
{
int r = 0;
switch (num) {
case MAC_PROP_EN_100FDX_CAP:
break;
case MAC_PROP_EN_100HDX_CAP:
break;
case MAC_PROP_EN_10FDX_CAP:
break;
case MAC_PROP_EN_10HDX_CAP:
break;
case MAC_PROP_EN_100T4_CAP:
break;
case MAC_PROP_AUTONEG:
break;
default:
return (ENOTSUP);
}
if (*capp == 0)
return (ENOTSUP);
/*
* We need to stop the timer before grabbing locks
* otherwise we can land-up in deadlock with untimeout.
*/
/*
* We leave SETPROP because properties can be
* temporary.
*/
r = 1;
}
}
/* kick-off a potential stopped downstream */
if (r)
return (0);
}
int
{
return (0);
}
int
bfe_mac_start(void *arg)
{
return (EINVAL);
}
return (0);
}
void
bfe_mac_stop(void *arg)
{
/*
* We need to stop the timer before grabbing locks otherwise
* we can land-up in deadlock with untimeout.
*/
/*
* First halt the chip by disabling interrupts.
*/
/*
* This will leave the PHY running.
*/
/*
* Disable RX register.
*/
}
/*
* Send a packet down the wire.
*/
static int
{
uint32_t v;
return (BFE_SUCCESS);
}
/*
* There is a big reason why we don't check for '0'. It becomes easy
* for us to not roll over the ring since we are based on producer (tx)
* and consumer (reclaim by an interrupt) model. Especially when we
* run out of TX descriptor, chip will send a single interrupt and
* both producer and consumer counter will be same. So we keep a
* difference of 1 always.
*/
if (r->r_avail_desc <= 1) {
return (BFE_FAILURE);
}
/*
* Get the DMA buffer to hold packet.
*/
/*
* Gather statistics.
*/
if (buf[0] & 0x1) {
else
}
/*
* Program the DMA descriptor (start and end of frame are same).
*/
v |= BFE_DESC_EOT;
/*
* DMA addresses need to be added to BFE_PCI_DMA
*/
/*
* Sync the packet data for the device.
*/
/* Move to next descriptor slot */
r->r_curr_desc = next;
/*
* The order should be 1,2,3,... for BFE_DMATX_PTR if 0,1,2,3,...
* descriptor slot are being programmed.
*/
r->r_avail_desc--;
/*
* Let timeout know that it must reset the chip if a
* packet is not sent down the wire for more than 5 seconds.
*/
return (BFE_SUCCESS);
}
mblk_t *
{
mutex_enter(&r->r_lock);
mutex_exit(&r->r_lock);
return (NULL);
}
break;
}
}
mutex_exit(&r->r_lock);
return (mp);
}
int
{
return (EIO);
}
if (promiscflag) {
/* Set Promiscous on */
} else {
}
return (0);
}
int
{
/*
* It was too much of pain to implement multicast in CAM. Instead
* we never disable multicast filter.
*/
return (0);
}
static mac_callbacks_t bfe_mac_callbacks = {
bfe_mac_getstat, /* gets stats */
bfe_mac_start, /* starts mac */
bfe_mac_stop, /* stops mac */
bfe_mac_set_promisc, /* sets promisc mode for snoop */
bfe_mac_set_multicast, /* multicast implementation */
bfe_mac_set_ether_addr, /* sets ethernet address (unicast) */
bfe_mac_transmit_packet, /* transmits packet */
NULL, /* ioctl */
NULL, /* getcap */
NULL, /* open */
NULL, /* close */
};
static void
{
uint32_t v;
if (intr_mask & BFE_ISTAT_RFO) {
bfe->bfe_chip_action |=
goto action;
}
if (intr_mask & BFE_ISTAT_TFU) {
return;
}
/* Descriptor Protocol Error */
if (intr_mask & BFE_ISTAT_DPE) {
"Descriptor Protocol Error. Halting Chip");
bfe->bfe_chip_action |=
goto action;
}
/* Descriptor Error */
goto action;
}
/* Receive Descr. Underflow */
if (intr_mask & BFE_ISTAT_RDU) {
"Receive Descriptor Underflow. Restarting Chip");
bfe->bfe_chip_action |=
goto action;
}
/* Error while sending a packet */
if (v & BFE_STAT_EMASK) {
"Error while sending a packet. Restarting Chip");
}
/* Error while receiving a packet */
if (v & BFE_RX_FLAG_ERRORS) {
"Error while receiving a packet. Restarting Chip");
}
bfe->bfe_chip_action |=
}
/*
* It will recycle a RX descriptor slot.
*/
static void
{
uint32_t v;
slot %= RX_NUM_DESC;
v |= BFE_DESC_EOT;
/*
* DMA addresses need to be added to BFE_PCI_DMA
*/
}
/*
* Gets called from interrupt context to handle RX interrupt.
*/
static mblk_t *
{
int i;
i = r->r_curr_desc;
int, r->r_curr_desc,
int, current);
for (i = r->r_curr_desc; i != current;
BFE_INC_SLOT(i, RX_NUM_DESC)) {
/*
* Sync the buffer associated with the descriptor table entry.
*/
/*
* We do this to make sure we are endian neutral. Chip is
* big endian.
*
* The header looks like :-
*
* Offset 0 -> uint16_t len
* Offset 2 -> uint16_t flags
* Offset 4 -> uint16_t pad[12]
*/
/*
* Don't receive this packet if pkt length is greater than
* MTU + VLAN_TAGSZ.
*/
/* Recycle slot for later use */
continue;
}
/* sizeof (bfe_rx_header_t) + 2 */
else {
}
/* Number of packets received so far */
/* Total bytes of packets received so far */
else
} else {
/* Recycle the slot for later use */
break;
}
/*
* Reinitialize the current descriptor slot's buffer so that
* it can be reused.
*/
}
r->r_curr_desc = i;
return (rx_head);
}
static int
{
uint32_t v;
/*
* Start with the last descriptor consumed by the chip.
*/
start = r->r_cons_desc;
int, start,
int, cur);
/*
* There will be at least one descriptor to process.
*/
r->r_avail_desc++;
v |= BFE_DESC_EOT;
/* Move to next descriptor in TX ring */
}
(void) ddi_dma_sync(r->r_desc_dma_handle,
0, (r->r_ndesc * sizeof (bfe_desc_t)),
r->r_bfe->bfe_tx_stall_time = 0;
return (cur);
}
static int
{
int resched = 0;
mutex_enter(&r->r_lock);
(void) bfe_tx_reclaim(r);
if (bfe->bfe_tx_resched) {
resched = 1;
bfe->bfe_tx_resched = 0;
}
mutex_exit(&r->r_lock);
return (resched);
}
/*
* ISR for interrupt handling
*/
static uint_t
{
int resched = 0;
/*
* Grab the lock to avoid stopping the chip while this interrupt
* is handled.
*/
/*
* It's necessary to read intr stat again because masking interrupt
* register does not really mask interrupts coming from the chip.
*/
if (intr_stat == 0) {
return (DDI_INTR_UNCLAIMED);
}
int, intr_stat);
/*
* If chip is suspended then we just return.
*/
return (DDI_INTR_CLAIMED);
}
/*
* Halt the chip again i.e basically disable interrupts.
*/
return (DDI_INTR_CLAIMED);
}
/* A packet was received */
if (intr_stat & BFE_ISTAT_RX) {
}
/* A packet was sent down the wire */
if (intr_stat & BFE_ISTAT_TX) {
}
/* There was an error */
if (intr_stat & BFE_ISTAT_ERRORS) {
}
/*
* Pass the list of packets received from chip to MAC layer.
*/
if (rx_head) {
}
/*
* Let the MAC start sending pkts to a potential stopped stream.
*/
if (resched)
return (DDI_INTR_CLAIMED);
}
/*
* Removes registered interrupt handler.
*/
static void
{
}
/*
* Add an interrupt for the driver.
*/
static int
{
int nintrs = 1;
int ret;
DDI_INTR_TYPE_FIXED, /* type */
0, /* inumber */
1, /* count */
&nintrs, /* actual nintrs */
if (ret != DDI_SUCCESS) {
" : ret : %d", ret);
return (DDI_FAILURE);
}
if (ret != DDI_SUCCESS) {
return (DDI_FAILURE);
}
if (ret != DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* Identify chipset family.
*/
static int
{
int i;
for (i = 0; i < (sizeof (bfe_cards) / sizeof (bfe_cards_t)); i++) {
return (BFE_SUCCESS);
}
}
return (BFE_SUCCESS);
}
/*
* Maps device registers.
*/
static int
{
int ret;
if (ret != DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
}
static int
{
int i;
/*
* Read EEPROM in prom[]
*/
for (i = 0; i < BFE_EEPROM_SIZE; i++) {
}
return (DDI_SUCCESS);
}
/*
* Ring Management routines
*/
static int
{
int err;
if (err != DDI_SUCCESS) {
" alloc_handle failed");
goto fail0;
}
if (err != DDI_SUCCESS) {
" mem_alloc failed :%d", err);
goto fail1;
}
&count);
if (err != DDI_DMA_MAPPED) {
" bind_handle failed");
goto fail2;
}
if (count > 1) {
" more than one DMA cookie");
goto fail2;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
static void
{
return;
}
static void
{
int i;
for (i = 0; i < r->r_ndesc; i++) {
bfe_ring_buf_free(r, i);
}
}
static void
{
(void) ddi_dma_unbind_handle(r->r_desc_dma_handle);
}
static int
{
if (err != DDI_SUCCESS) {
" ddi_dma_alloc_handle()");
return (DDI_FAILURE);
}
if (err != DDI_SUCCESS) {
" ddi_dma_mem_alloc()");
return (DDI_FAILURE);
}
if (err != DDI_SUCCESS) {
" ddi_dma_addr_bind_handle()");
return (DDI_FAILURE);
}
/*
* We don't want to have multiple cookies. Descriptor should be
* aligned to PAGESIZE boundary.
*/
/* The actual descriptor for the ring */
r->r_desc_len = ring_len;
r->r_desc_cookie = cookie;
/* For each descriptor, allocate a DMA buffer */
fail = 0;
for (i = 0; i < r->r_ndesc; i++) {
i--;
fail = 1;
break;
}
}
if (fail) {
while (i-- >= 0) {
bfe_ring_buf_free(r, i);
}
/* We don't need the descriptor anymore */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
/* TX */
/* RX */
/* Allocate TX Ring */
DDI_DMA_WRITE) != DDI_SUCCESS)
return (DDI_FAILURE);
/* Allocate RX Ring */
DDI_DMA_READ) != DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
int err = DDI_SUCCESS;
" while resume");
return (DDI_FAILURE);
}
/*
* Grab all the locks first.
*/
/* PHY will also start running */
err = DDI_FAILURE;
}
if (err == DDI_SUCCESS)
return (err);
}
static int
{
int unit;
int ret;
switch (cmd) {
case DDI_RESUME:
return (bfe_resume(dip));
case DDI_ATTACH:
break;
default:
return (DDI_FAILURE);
}
goto fail0;
}
/*
* Enable IO space, Bus Master and Memory Space accessess.
*/
/* Identify hardware */
goto fail1;
}
goto fail1;
}
(void) bfe_get_chip_config(bfe);
/*
* Register with MAC layer
*/
goto fail2;
}
goto fail2;
}
goto fail3;
}
goto fail4;
}
/* Init and then reset the chip */
bfe->bfe_chip_action = 0;
/* PHY will also start running */
/*
* Even though we enable the interrupts here but chip's interrupt
* is not enabled yet. It will be enabled once we plumb the interface.
*/
goto fail4;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
static int
{
switch (cmd) {
case DDI_DETACH:
/*
* We need to stop the timer before grabbing locks otherwise
* we can land-up in deadlock with untimeout.
*/
/*
* First unregister with MAC layer before stopping DMA
* engine.
*/
return (DDI_FAILURE);
/*
* Quiesce the chip first.
*/
/* Make sure timer is gone. */
/*
* Free the DMA resources for buffer and then descriptors
*/
/* TX */
}
/* RX */
}
return (DDI_SUCCESS);
case DDI_SUSPEND:
/*
* We need to stop the timer before grabbing locks otherwise
* we can land-up in deadlock with untimeout.
*/
/*
* Grab all the locks first.
*/
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/*
* Quiesce the card for fast reboot
*/
int
{
return (DDI_SUCCESS);
}
static struct cb_ops bfe_cb_ops = {
nulldev, /* cb_open */
nulldev, /* cb_close */
nodev, /* cb_strategy */
nodev, /* cb_print */
nodev, /* cb_dump */
nodev, /* cb_read */
nodev, /* cb_write */
nodev, /* cb_ioctl */
nodev, /* cb_devmap */
nodev, /* cb_mmap */
nodev, /* cb_segmap */
nochpoll, /* cb_chpoll */
ddi_prop_op, /* cb_prop_op */
NULL, /* cb_stream */
CB_REV, /* cb_rev */
nodev, /* cb_aread */
nodev /* cb_awrite */
};
static struct dev_ops bfe_dev_ops = {
DEVO_REV, /* devo_rev */
0, /* devo_refcnt */
NULL, /* devo_getinfo */
nulldev, /* devo_identify */
nulldev, /* devo_probe */
bfe_attach, /* devo_attach */
bfe_detach, /* devo_detach */
nodev, /* devo_reset */
&bfe_cb_ops, /* devo_cb_ops */
NULL, /* devo_bus_ops */
ddi_power, /* devo_power */
bfe_quiesce /* devo_quiesce */
};
static struct modldrv bfe_modldrv = {
};
static struct modlinkage modlinkage = {
};
int
{
}
int
_init(void)
{
int status;
if (status == DDI_FAILURE)
return (status);
}
int
_fini(void)
{
int status;
if (status == 0) {
}
return (status);
}