dmfe_main.c revision d15360a7f1d6c844288e4ec4c82be4ed51792be2
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/dmfe_impl.h>
/*
* This is the string displayed by modinfo, etc.
* Make sure you keep the version ID up to date!
*/
static char dmfe_ident[] = "Davicom DM9102 v2.4";
/*
* NOTES:
*
* #defines:
*
* DMFE_PCI_RNUMBER is the register-set number to use for the operating
* registers. On an OBP-based machine, regset 0 refers to CONFIG space,
* regset 1 will be the operating registers in I/O space, and regset 2
* will be the operating registers in MEMORY space (preferred). If an
* expansion ROM is fitted, it may appear as a further register set.
*
* DMFE_SLOP defines the amount by which the chip may read beyond
* the end of a buffer or descriptor, apparently 6-8 dwords :(
* We have to make sure this doesn't cause it to access unallocated
* or unmapped memory.
*
* DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP)
* rounded up to a multiple of 4. Here we choose a power of two for
* speed & simplicity at the cost of a bit more memory.
*
* eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes
* per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1
* (2000) bytes each.
*
* DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for
* the data buffers. The descriptors are always set up in CONSISTENT
* mode.
*
* DMFE_HEADROOM defines how much space we'll leave in allocated
* mblks before the first valid data byte. This should be chosen
* to be 2 modulo 4, so that once the ethernet header (14 bytes)
* has been stripped off, the packet data will be 4-byte aligned.
* The remaining space can be used by upstream modules to prepend
* any headers required.
*
* Patchable globals:
*
* dmfe_bus_modes: the bus mode bits to be put into CSR0.
* Setting READ_MULTIPLE in this register seems to cause
* the chip to generate a READ LINE command with a parity
* error! Don't do it!
*
* dmfe_setup_desc1: the value to be put into descriptor word 1
* when sending a SETUP packet.
*
* Setting TX_LAST_DESC in desc1 in a setup packet seems
* to make the chip spontaneously reset internally - it
* attempts to give back the setup packet descriptor by
* writing to PCI address 00000000 - which may or may not
* get a MASTER ABORT - after which most of its registers
* seem to have either default values or garbage!
*
* TX_FIRST_DESC doesn't seem to have the same effect but
* it isn't needed on a setup packet so we'll leave it out
* too, just in case it has some other wierd side-effect.
*
* The default hardware packet filtering mode is now
* HASH_AND_PERFECT (imperfect filtering of multicast
* packets and perfect filtering of unicast packets).
* If this is found not to work reliably, setting the
* TX_FILTER_TYPE1 bit will cause a switchover to using
* HASH_ONLY mode (imperfect filtering of *all* packets).
* Software will then perform the additional filtering
* as required.
*/
#define DMFE_PCI_RNUMBER 2
#define DMFE_BUF_SIZE 2048
#define DMFE_BUF_SIZE_1 2000
#define DMFE_DMA_MODE DDI_DMA_STREAMING
#define DMFE_HEADROOM 34
/*
* Some tunable parameters ...
* Minimum number of TX ring slots to keep free (1)
* Low-water mark at which to try to reclaim TX ring slots (1)
* How often to take a TX-done interrupt (twice per ring cycle)
* Whether to reclaim TX ring entries on a TX-done interrupt (no)
*/
/*
* Time-related parameters:
*
* We use a cyclic to provide a periodic callback; this is then used
* to check for TX-stall and poll the link status register.
*
* DMFE_TICK is the interval between cyclic callbacks, in microseconds.
*
* TX_STALL_TIME_100 is the timeout in microseconds between passing
* a packet to the chip for transmission and seeing that it's gone,
* when running at 100Mb/s. If we haven't reclaimed at least one
* descriptor in this time we assume the transmitter has stalled
* and reset the chip.
*
* TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s.
*
* LINK_POLL_TIME is the interval between checks on the link state
* when nothing appears to have happened (this is in addition to the
* case where we think we've detected a link change, and serves as a
* backup in case the quick link check doesn't work properly).
*
* Patchable globals:
*
* dmfe_tick_us: DMFE_TICK
* dmfe_tx100_stall_us: TX_STALL_TIME_100
* dmfe_tx10_stall_us: TX_STALL_TIME_10
* dmfe_link_poll_us: LINK_POLL_TIME
*
* These are then used in _init() to calculate:
*
* stall_100_tix[]: number of consecutive cyclic callbacks without a
* reclaim before the TX process is considered stalled,
* when running at 100Mb/s. The elements are indexed
* by transmit-engine-state.
* stall_10_tix[]: number of consecutive cyclic callbacks without a
* reclaim before the TX process is considered stalled,
* when running at 10Mb/s. The elements are indexed
* by transmit-engine-state.
* factotum_tix: number of consecutive cyclic callbacks before waking
* up the factotum even though there doesn't appear to
* be anything for it to do
*/
/*
* Calculated from above in _init()
*/
static uint32_t factotum_tix;
static uint32_t factotum_fast_tix;
static uint32_t factotum_start_tix;
/*
* Versions of the O/S up to Solaris 8 didn't support network booting
* from any network interface except the first (NET0). Patching this
* flag to a non-zero value will tell the driver to work around this
* limitation by creating an extra (internal) pathname node. To do
* file ON THE ROOT FILESYSTEM SERVER before booting the client:
*
* set dmfe:dmfe_net1_boot_support = 1;
*/
static uint32_t dmfe_net1_boot_support = 0;
/*
* Property names
*/
static char localmac_propname[] = "local-mac-address";
static char opmode_propname[] = "opmode-reg-value";
static char debug_propname[] = "dmfe-debug-flags";
/*
* Describes the chip's DMA engine
*/
static ddi_dma_attr_t dma_attr = {
DMA_ATTR_V0, /* dma_attr version */
0, /* dma_attr_addr_lo */
0x0FFFFFF, /* dma_attr_count_max */
0x20, /* dma_attr_align */
0x7F, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0 /* dma_attr_flags */
};
/*
* DMA access attributes for registers and descriptors
*/
static ddi_device_acc_attr_t dmfe_reg_accattr = {
};
/*
* DMA access attributes for data: NOT to be byte swapped.
*/
static ddi_device_acc_attr_t dmfe_data_accattr = {
};
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
/*
* ========== Lowest-level chip register & ring access routines ==========
*/
/*
*/
{
}
void
{
}
/*
*/
static uint32_t
{
}
static void
{
}
/*
*/
static uint32_t
{
}
static void
{
}
/*
* ========== Low-level chip & ring buffer manipulation ==========
*/
/*
* dmfe_set_opmode() -- function to set operating mode
*/
static void
{
drv_usecwait(10);
}
/*
* dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w
*/
static void
{
/*
* Stop the chip:
* disable all interrupts
* If required, reset the chip
* Record the new state
*/
switch (newstate) {
default:
ASSERT(!"can't get here");
return;
case CHIP_STOPPED:
case CHIP_ERROR:
break;
case CHIP_RESET:
drv_usecwait(10);
drv_usecwait(10);
break;
}
}
/*
* Initialize transmit and receive descriptor rings, and
* set the chip to point to the first entry in each ring
*/
static void
{
int i;
/*
* You need all the locks in order to rewrite the descriptor rings
*/
/*
* Program the RX ring entries
*/
pnext += sizeof (struct rx_desc_type);
pbuff += DMFE_BUF_SIZE;
}
/*
* Fix up last entry & sync
*/
/*
* Set the base address of the RX descriptor list in CSR3
*/
DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)",
/*
* Program the TX ring entries
*/
desc1 = TX_CHAINING;
pnext += sizeof (struct tx_desc_type);
pbuff += DMFE_BUF_SIZE;
}
/*
* Fix up last entry & sync
*/
/*
* Set the base address of the TX descrptor list in CSR4
*/
DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)",
}
/*
*/
static void
{
#if defined(VLAN_VID_NONE)
/*
* Enable VLAN length mode (allows packets to be 4 bytes Longer).
*/
if (gld_recv_tagged != NULL)
#endif
/*
* Clear any pending process-stopped interrupts
*/
}
/*
* dmfe_enable_interrupts() -- enable our favourite set of interrupts.
*
* Normal interrupts:
* We always enable:
* RX_PKTDONE_INT (packet received)
* TX_PKTDONE_INT (TX complete)
* We never enable:
* TX_ALLDONE_INT (next TX buffer not ready)
*
* Abnormal interrupts:
* We always enable:
* RX_STOPPED_INT
* TX_STOPPED_INT
* SYSTEM_ERR_INT
* RX_UNAVAIL_INT
* We never enable:
* RX_EARLY_INT
* RX_WATCHDOG_INT
* TX_JABBER_INT
* TX_EARLY_INT
* TX_UNDERFLOW_INT
* GP_TIMER_INT (not valid in -9 chips)
* LINK_STATUS_INT (not valid in -9 chips)
*/
static void
{
/*
* Put 'the standard set of interrupts' in the interrupt mask register
*/
}
/*
* ========== RX side routines ==========
*/
/*
* Function to update receive statistics on various errors
*/
static void
{
/*
* The error summary bit and the error bits that it summarises
* are only valid if this is the last fragment. Therefore, a
* fragment only contributes to the error statistics if both
* the last-fragment and error summary bits are set.
*/
/*
* There are some other error bits in the descriptor for
* which there don't seem to be appropriate GLD statistics,
* notably RX_COLLISION and perhaps RX_DESC_ERR. The
* latter may not be possible if it is supposed to indicate
* that one buffer has been filled with a partial packet
* and the next buffer required for the rest of the packet
* was not available, as all our buffers are more than large
* enough for a whole packet without fragmenting.
*/
if (desc0 & RX_OVERFLOW)
else if (desc0 & RX_RUNT_FRAME)
if (desc0 & RX_FRAME2LONG)
}
/*
* A receive watchdog timeout is counted as a MAC-level receive
* error. Strangely, it doesn't set the packet error summary bit,
* according to the chip data sheet :-?
*/
if (desc0 & RX_RCV_WD_TO)
if (desc0 & RX_DRIBBLING)
if (desc0 & RX_MII_ERR)
}
/*
* Ethernet packet-checking macros used in the receive code below.
* The <eap> parameter should be a pointer to the destination address found
* at the start of a received packet (an array of 6 (ETHERADDRL) bytes).
*
* A packet is intended for this station if the destination address exactly
* matches our own physical (unicast) address.
*
* A packet is a multicast (including broadcast) packet if the low-order bit
* of the first byte of the destination address is set (unicast addresses
* always have a zero in this bit).
*
* A packet should be passed up to GLD if it's multicast OR addressed to us
* OR if the device is in promiscuous mode, when all packets are passed up
* anyway.
*/
/*
* Receive incoming packet(s) and pass them up ...
*/
static mblk_t *
{
struct ether_header *ehp;
char *rxb;
int packet_length;
int index;
/*
* Update the missed frame statistic from the on-chip counter.
*/
/*
* sync (all) receive descriptors before inspecting them
*/
/*
* We should own at least one RX entry, since we've had a
* receive interrupt, but let's not be dogmatic about it.
*/
/*
* Maintain statistics for every descriptor returned
* to us by the chip ...
*/
/*
* Check that the entry has both "packet start" and
* "packet end" flags. We really shouldn't get packet
* fragments, 'cos all the RX buffers are bigger than
* the largest valid packet. So we'll just drop any
* fragments we find & skip on to the next entry.
*/
DMFE_DEBUG(("dmfe_getp: dropping fragment"));
goto skip;
}
/*
* A whole packet in one buffer. We have to check error
* status and packet length before forwarding it upstream.
*/
if (desc0 & RX_ERR_SUMMARY) {
DMFE_DEBUG(("dmfe_getp: dropping errored packet"));
goto skip;
}
if (packet_length > DMFE_MAX_PKT_SIZE) {
DMFE_DEBUG(("dmfe_getp: dropping oversize packet, "
"length %d", packet_length));
goto skip;
} else if (packet_length < ETHERMIN) {
DMFE_DEBUG(("dmfe_getp: dropping undersize packet, "
"length %d", packet_length));
goto skip;
}
/*
* Sync the data, so we can examine it; then check that
* the packet is really intended for us (remember that
* if we're using Imperfect Filtering, then the chip will
* receive unicast packets sent to stations whose addresses
* just happen to hash to the same value as our own; we
* discard these here so they don't get sent upstream ...)
*/
DMFE_DEBUG(("dmfe_getp: dropping aliased packet"));
goto skip;
}
/*
* Packet looks good; get a buffer to copy it into. We
* allow some space at the front of the allocated buffer
* (HEADROOM) in case any upstream modules want to prepend
* some sort of header. The value has been carefully chosen
* So that it also has the side-effect of making the packet
* *contents* 4-byte aligned, as required by NCA!
*/
DMFE_DEBUG(("dmfe_getp: no buffer - dropping packet"));
goto skip;
}
/*
* Copy the packet into the STREAMS buffer
*/
#if !defined(VLAN_VID_NONE)
#else
/*
* An untagged packet (or, there's no runtime support
* for tagging so we treat all packets as untagged).
* Just copy the contents verbatim.
*/
} else {
/*
* A tagged packet. Extract the vtag & stash it in
* the b_cont field (we know that it's safe to grab
* the vtag directly because the way buffers are
* allocated guarantees their alignment). Copy the
* rest of the packet data to the mblk, dropping the
* vtag in the process.
*
* The magic number (2*ETHERADDRL) appears quite a
* lot here, because the vtag comes immediately after
* the destination & source Ethernet addresses in
* the packet header ...
*/
}
#endif /* defined(VLAN_VID_NONE) */
/*
* Fix up the packet length, and link it to the chain
*/
skip:
/*
* Return ownership of ring entry & advance to next
*/
}
/*
* Remember where to start looking next time ...
*/
/*
* sync the receive descriptors that we've given back
* (actually, we sync all of them for simplicity), and
* wake the chip in case it had suspended receive
*/
return (head);
}
/*
* Take a chain of mblks (as returned by dmfe_getp() above) and pass
* them up to gld_recv() one at a time. This function should be called
* with *no* driver-defined locks held.
*
* If the driver is compiled with VLAN support enabled, this function
* also deals with routing vlan-tagged packets to the appropriate GLD
* entry point (gld_recv_tagged()).
*/
static void
{
do {
#if !defined(VLAN_VID_NONE)
#else
else
#endif /* defined(VLAN_VID_NONE) */
}
/*
* ========== Primary TX side routines ==========
*/
/*
* TX ring management:
*
* There are <tx.n_desc> entries in the ring, of which those from
* <tx.next_free> round to but not including <tx.next_busy> must
* be owned by the CPU. The number of such entries should equal
* <tx.n_free>; but there may also be some more entries which the
* chip has given back but which we haven't yet accounted for.
* The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts
* as it discovers such entries.
*
* Initially, or when the ring is entirely free:
* C = Owned by CPU
* D = Owned by Davicom (DMFE) chip
*
* tx.next_free tx.n_desc = 16
* |
* v
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C |
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* ^
* |
* tx.next_busy tx.n_free = 16
*
* On entry to reclaim() during normal use:
*
* tx.next_free tx.n_desc = 16
* |
* v
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* ^
* |
* tx.next_busy tx.n_free = 9
*
* On exit from reclaim():
*
* tx.next_free tx.n_desc = 16
* |
* v
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* ^
* |
* tx.next_busy tx.n_free = 13
*
* The ring is considered "full" when only one entry is owned by
* the CPU; thus <tx.n_free> should always be >= 1.
*
* tx.next_free tx.n_desc = 16
* |
* v
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D |
* +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
* ^
* |
* tx.next_busy tx.n_free = 1
*/
/*
* Function to update transmit statistics on various errors
*/
static void
{
desc0 |= TX_ERR_SUMMARY;
}
if (desc0 & TX_ERR_SUMMARY) {
/*
* If we ever see a transmit jabber timeout, we count it
* as a MAC-level transmit error; but we probably won't
* see it as it causes an Abnormal interrupt and we reset
* the chip in order to recover
*/
if (desc0 & TX_JABBER_TO)
if (desc0 & TX_UNDERFLOW)
else if (desc0 & TX_LATE_COLL)
if (desc0 & TX_EXCESS_COLL) {
collisions = 16;
}
}
if (collisions == 1)
else if (collisions != 0)
if (desc0 & TX_DEFERRED)
}
/*
* Reclaim all the ring entries that the chip has returned to us ...
*
* Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims
* as many as possible, restarts the TX stall timeout, and returns B_TRUE.
*/
static boolean_t
{
int nfree;
int i;
/*
* sync transmit descriptor ring before looking at it
*/
#if DMFEDEBUG
/*
* check that we own all descriptors from next_free to next_used-1
*/
nfree = 0;
do {
nfree += 1;
#endif /* DMFEDEBUG */
/*
* Early exit if there are no descriptors to reclaim, either
* because they're all reclaimed already, or because the next
* one is still owned by the chip ...
*/
return (B_FALSE);
return (B_FALSE);
/*
* Reclaim as many descriptors as possible ...
*/
for (;;) {
if (desc1 & TX_SETUP_PACKET) {
/*
* Setup packet - restore buffer address
*/
descp->setup_dvma);
} else {
/*
* Regular packet - just update stats
*/
}
#if DMFEDEBUG
/*
* We can use one of the SPARE bits in the TX descriptor
* to track when a ring buffer slot is reclaimed. Then
* we can deduce the last operation on a slot from the
* top half of DESC0:
*
* 0x8000 xxxx given to DMFE chip (TX_OWN)
* 0x7fff xxxx returned but not yet reclaimed
* 0x3fff xxxx reclaimed
*/
#endif /* DMFEDEBUG */
/*
* Update count & index; we're all done if the ring is
* now fully reclaimed, or the next entry if still owned
* by the chip ...
*/
break;
break;
}
dmfep->tx_pending_tix = 0;
return (B_TRUE);
}
/*
* Send the message in the message block chain <mp>.
*
* The message is freed if and only if its contents are successfully copied
* and queued for transmission (so that the return value is GLD_SUCCESS).
* If we can't queue the message, the return value is GLD_NORESOURCES and
* the message is *not* freed.
*
* This routine handles the special case of <mp> == NULL, which indicates
* that we want to "send" the special "setup packet" allocated during
* startup. We have to use some different flags in the packet descriptor
* to say its a setup packet (from the global <dmfe_setup_desc1>), and the
* setup packet *isn't* freed after use.
*/
static int
{
char *txb;
DMFE_TRACE(("dmfe_send_msg($%p, $%p, 0x%x)",
/*
* If the number of free slots is below the reclaim threshold
* (soft limit), we'll try to reclaim some. If we fail, and
* the number of free slots is also below the minimum required
* (the hard limit, usually 1), then we can't send the packet.
*/
/*
* Resource shortage - return the proper GLD code,
* so the packet will be queued for retry after the
* next TX-done interrupt.
*/
DMFE_DEBUG(("dmfe_send_msg: no free descriptors"));
return (GLD_NORESOURCES);
}
/*
* There's a slot available, so claim it by incrementing
* the next-free index and decrementing the free count.
* If the ring is currently empty, we also restart the
* stall-detect timer. The ASSERTions check that our
* invariants still hold:
* the next-free index must not match the next-busy index
* there must still be at least one free entry
* After this, we now have exclusive ownership of the ring
* entry (and matching buffer) indicated by <index>, so we
* don't need to hold the TX lock any longer
*/
dmfep->tx_pending_tix = 0;
/*
* Check the ownership of the ring entry ...
*/
/*
* Indicates we should send a SETUP packet, which we do by
* temporarily switching the BUFFER1 pointer in the ring
* entry. The reclaim routine will restore BUFFER1 to its
* usual value.
*
* Note that as the setup packet is tagged on the end of
* the TX ring, when we sync the descriptor we're also
* implicitly syncing the setup packet - hence, we don't
* need a separate ddi_dma_sync() call here.
*/
} else {
/*
* A regular packet; we copy the data into a pre-mapped
* buffer, which avoids the overhead (and complication)
* of them until the DMA has completed.
*
* Because all buffers are the same size, and larger
* than the longest single valid message, we don't have
* to bother about splitting the message across multiple
* buffers.
*/
totlen = 0;
#if defined(VLAN_VID_NONE)
if (vtag != VLAN_VTAG_NONE) {
/*
* A tagged packet. While copying the data from
* the first mblk into the Tx buffer, insert the
* that the entire struct ether_header (14 bytes)
* at the start of the packet can be found in the
* first mblk, so we don't have to worry about it
* being split into multiple fragments.
*
* The magic number (2*ETHERADDRL) appears quite a
* lot here, because the vtag comes immediately after
* the destination & source Ethernet addresses in
* the packet header.
*
* The way we allocated the buffers guarantees
* their alignment, so it's safe to stash the vtag
* directly into the buffer at the proper offset
* (4 bytes, 12 bytes into the frame).
*
* Once we have copied the data from the first mblk,
* we can fall through to the untagged packet code
* to handle the rest of the chain, if any.
*/
}
#endif /* defined(VLAN_VID_NONE) */
/*
* Copy all (remaining) mblks in the message ...
*/
}
}
/*
* We'e reached the end of the chain; and we should have
* collected no more than DMFE_MAX_PKT_SIZE bytes into our
* buffer. Note that the <size> field in the descriptor is
* only 11 bits, so bigger packets would be a problem!
*/
}
/*
* Update ring descriptor entries, sync them, and wake up the
* transmit process
*/
if ((index & dmfe_tx_int_factor) == 0)
desc1 |= TX_INT_ON_COMP;
desc1 |= TX_CHAINING;
/*
* Finally, free the message & return success
*/
if (mp)
return (GLD_SUCCESS);
}
/*
* dmfe_gld_send() -- send a packet
*
* Called when a packet is ready to be transmitted. A pointer to an
* M_DATA message that contains the packet is passed to this routine.
* The complete LLC header is contained in the message's first message
* block, and the remainder of the packet is contained within
* additional M_DATA message blocks linked to the first message block.
*/
static int
{
DMFE_TRACE(("dmfe_gld_send($%p, $%p)",
return (GLD_NORESOURCES);
}
#if defined(VLAN_VID_NONE)
/*
* dmfe_gld_send_tagged() -- send a tagged packet
*
* Called when a packet is ready to be transmitted. A pointer to an
* M_DATA message that contains the packet is passed to this routine.
* The complete LLC header is contained in the message's first message
* block, and the remainder of the packet is contained within
* additional M_DATA message blocks linked to the first message block.
*/
static int
{
DMFE_TRACE(("dmfe_gld_send_vtag($%p, $%p, $%x)",
return (GLD_NORESOURCES);
}
#endif /* defined(VLAN_VID_NONE) */
/*
* ========== Address-setting routines (TX-side) ==========
*/
/*
* Find the index of the relevant bit in the setup packet.
* This must mirror the way the hardware will actually calculate it!
*/
static uint32_t
{
int byteslength;
int shift;
int bit;
crc <<= 1;
crc |= 0x00000001;
}
currentbyte >>= 1;
}
}
return (index);
}
/*
* This must mirror the way the hardware will actually interpret it!
*/
static void
{
if (val)
else
}
/*
* Update the refcount for the bit in the setup packet corresponding
* to the specified address; if it changes between zero & nonzero,
* also update the bitmap itself & return B_TRUE, so that the caller
* knows to re-send the setup packet. Otherwise (only the refcount
* changed), return B_FALSE
*/
static boolean_t
{
DMFE_DEBUG(("dmfe_update_mcast: %s %s %s index %d ref %d->%d%s",
if (change)
return (change);
}
/*
* "Transmit" the (possibly updated) magic setup packet
*/
static int
{
int status;
/*
* If the chip isn't running, we can't really send the setup frame
* now but it doesn't matter, 'cos it will be sent when the transmit
* process is restarted (see dmfe_start()). It isn't an error; in
* fact, GLD v2 will always call dmfe_gld_set_mac_addr() after a
* reset, so that the address can be set up before the chip is
* started. In this context, the return code GLD_NOTSUPPORTED is
* taken as success!
*/
return (GLD_NOTSUPPORTED);
/*
* "Send" the setup frame. If it fails (e.g. no resources),
* set a flag; then the factotum will retry the "send". Once
* it works, we can clear the flag no matter how many attempts
* had previously failed. We tell the caller that it worked
* whether it did or not; after all, it *will* work eventually.
*/
return (GLD_SUCCESS);
}
/*
* dmfe_gld_set_mac_addr() -- set the physical network address
*/
static int
{
int status;
int index;
DMFE_TRACE(("dmfe_gld_set_mac_addr($%p, %s)",
/*
* Update our current address and send out a new setup packet
*
* Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT
* filtering modes (we don't support PERFECT_ONLY or INVERSE modes).
*
* It is said that there is a bug in the 21140 where it fails to
* receive packes addresses to the specified perfect filter address.
* If the same bug is present in the DM9102A, the TX_FILTER_TYPE1
* bit should be set in the module variable dmfe_setup_desc1.
*
* If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering.
* In this mode, *all* incoming addresses are hashed and looked
* up in the bitmap described by the setup packet. Therefore,
* the bit representing the station address has to be added to
* the table before sending it out. If the address is changed,
* the old entry should be removed before the new entry is made.
*
* NOTE: in this mode, unicast packets that are not intended for
* this station may be received; it is up to software to filter
* them out afterwards!
*
* If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT
* filtering. In this mode, multicast addresses are hashed and
* checked against the bitmap, while unicast addresses are simply
* matched against the one physical address specified in the setup
* packet. This means that we shouldn't receive unicast packets
* that aren't intended for us (but software still has to filter
* multicast packets just the same).
*
* Whichever mode we're using, we have to enter the broadcast
* address into the multicast filter map too, so we do this on
* the first time through after attach or reset.
*/
"xaddr");
if (dmfe_setup_desc1 & TX_FILTER_TYPE1)
"bcast");
/*
* Remember the new current address
*/
/*
* Install the new physical address into the proper position in
* the setup frame; this is only used if we select hash+perfect
* filtering, but we'll put it in anyway. The ugliness here is
* down to the usual war of the egg :(
*/
/*
* Finally, we're ready to "transmit" the setup frame
*/
return (status);
}
/*
* dmfe_gld_set_multicast() -- enable or disable a multicast address
*
* in "mca" (enable if "flag" is GLD_MULTI_ENABLE, otherwise disable).
* We keep a refcount for each bit in the map, so that it still
* works out properly if multiple addresses hash to the same bit.
* dmfe_update_mcast() tells us whether the map actually changed;
* if so, we have to re-"transmit" the magic setup packet.
*/
static int
{
int status;
DMFE_TRACE(("dmfe_gld_set_multicast($%p, %s, %d)",
return (status);
}
/*
* ========== Internal state management entry points ==========
*/
/*
* These routines provide all the functionality required by the
* corresponding GLD entry points, but don't update the GLD state
* so they can be called internally without disturbing our record
* of what GLD thinks we should be doing ...
*/
/*
* dmfe_stop() -- stop processing, don't reset h/w or rings
*/
static void
{
}
/*
* dmfe_reset() -- stop processing, reset h/w & rings to initial state
*/
static void
{
}
/*
* dmfe_start() -- start transmitting/receiving
*/
static void
{
/*
* Make opmode consistent with PHY duplex setting
*/
if (gpsr & GPS_FULL_DUPLEX)
else
/*
* Start transmit processing
* Set up the address filters
* Start receive processing
* Enable interrupts
*/
(void) dmfe_send_setup(dmfep);
drv_usecwait(10);
}
/*
* dmfe_restart - restart transmitting/receiving after error or suspend
*/
static void
{
/*
* You need not only <oplock>, but also <rxlock> AND <txlock>
* in order to reset the rings, but then <txlock> *mustn't*
* be held across the call to dmfe_start()
*/
}
/*
* ========== GLD-required management entry points ==========
*/
/*
* dmfe_gld_reset() -- reset to initial state
*/
static int
{
/*
* Reset chip & rings to initial state; also reset address
* filtering & record new GLD state. You need *all* the locks
* in order to stop all other activity while doing this!
*/
return (0);
}
/*
* dmfe_gld_stop() -- stop transmitting/receiving
*/
static int
{
/*
* Just stop processing, then record new GLD state
*/
return (0);
}
/*
* dmfe_gld_start() -- start transmitting/receiving
*/
static int
{
/*
* Start processing and record new GLD state
*/
return (0);
}
/*
* dmfe_gld_set_promiscuous() -- set or reset promiscuous mode on the board
*
* receive-all-multicast modes. Davicom don't document this
* clearly, but it looks like we can do this on-the-fly (i.e.
*/
static int
{
DMFE_TRACE(("dmfe_gld_set_promiscuous($%p, %d)",
/*
* Convert GLD-specified mode to DMFE opmode setting
*/
switch (mode) {
case GLD_MAC_PROMISC_NONE:
pmode = 0;
break;
case GLD_MAC_PROMISC_MULTI:
break;
case GLD_MAC_PROMISC_PHYS:
break;
default:
return (GLD_NOTSUPPORTED);
}
DMFE_DEBUG(("dmfe_gld_set_promiscuous: mode %d => opmode 0x%x",
return (GLD_SUCCESS);
}
/*
* ========== Factotum, implemented as a softint handler ==========
*/
/*
* The factotum is woken up when there's something to do that we'd rather
* not do from inside a (high-level?) hardware interrupt handler. Its
* two main tasks are:
* reset & restart the chip after an error
* update & restart the chip after a link status change
*/
static uint_t
{
dmfep->factotum_flag = 0;
/*
* Check for chip error ...
*/
/*
* Error recovery required: reset the chip and the rings,
* then, if it's supposed to be running, kick it off again.
*/
} else if (dmfep->need_setup) {
(void) dmfe_send_setup(dmfep);
}
/*
* Then, check the link state. We need <milock> but not <oplock>
* to do this, but if something's changed, we need <oplock> as well
* <oplock> right through here, but we'd rather not 'cos checking
* the link state involves reading over the bit-serial MII bus,
* which takes ~500us even when nothing's changed. Holding <oplock>
* would lock out the interrupt handler for the duration, so it's
* better to release it first and reacquire it only if needed.
*/
if (dmfe_check_link(dmfep)) {
if (dmfep->update_phy) {
/*
* The chip may reset itself for some unknown
* reason. If this happens, the chip will use
* default settings (for speed, duplex, and autoneg),
* which possibly aren't the user's desired settings.
*/
}
}
/*
* Keep GLD up-to-date about the state of the link ...
*/
if (gld_linkstate != NULL)
return (DDI_INTR_CLAIMED);
}
static void
{
DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d",
if (dmfep->factotum_flag++ == 0)
}
/*
* ========== Periodic Tasks (Cyclic handler & friends) ==========
*/
/*
* Periodic tick tasks, run from the cyclic handler
*
* Check the state of the link and wake the factotum if necessary
*/
static void
{
const char *why;
int ks_id;
/*
* Is it time to wake the factotum? We do so periodically, in
* case the fast check below doesn't always reveal a link change
*/
if (dmfep->link_poll_tix-- == 0) {
why = "tick (link poll)";
} else {
}
/*
* Has the link status changed? If so, we might want to wake
* the factotum to deal with it.
*/
why = "tick (phy <> utp)";
why = "tick (UP -> DOWN)";
}
DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d",
}
}
/*
* Periodic tick tasks, run from the cyclic handler
*
* Check for TX stall; flag an error and wake the factotum if so.
*/
static void
{
/*
* Check for transmit stall ...
*
* IF there's at least one packet in the ring, AND the timeout
* has elapsed, AND we can't reclaim any descriptors, THEN we've
* stalled; we return B_TRUE to trigger a reset-and-recover cycle.
*
* Note that the timeout limit is based on the transmit engine
* state; we allow the transmitter longer to make progress in
* some states than in others, based on observations of this
* chip's actual behaviour in the lab.
*
* By observation, we find that on about 1 in 10000 passes through
* here, the TX lock is already held. In that case, we'll skip
* the check on this pass rather than wait. Most likely, the send
* routine was holding the lock when the interrupt happened, and
* we'll succeed next time through. In the event of a real stall,
* the TX ring will fill up, after which the send routine won't be
* called any more and then we're sure to get in.
*/
if (gpsr & GPS_LINK_100)
else
"after %d ticks in state %d; "
"automatic recovery initiated",
}
}
}
if (tx_stall) {
}
}
/*
* Cyclic callback handler
*/
static void
dmfe_cyclic(void *arg)
{
/*
* If the chip's not RUNNING, there's nothing to do.
* If we can't get the mutex straight away, we'll just
* skip this pass; we'll back back soon enough anyway.
*/
return;
return;
/*
* Recheck chip state (it might have been stopped since we
* checked above). If still running, call each of the *tick*
* tasks. They will check for link change, TX stall, etc ...
*/
}
}
/*
* ========== Hardware interrupt handler ==========
*/
/*
* dmfe_interrupt() -- handle chip interrupts
*/
static uint_t
{
const char *msg;
/*
* A quick check as to whether the interrupt was from this
* device, before we even finish setting up all our local
* variables. Note that reading the interrupt status register
* doesn't have any unpleasant side effects such as clearing
* the bits read, so it's quite OK to re-read it once we have
* determined that we are going to service this interrupt and
* grabbed the mutexen.
*/
return (DDI_INTR_UNCLAIMED);
/*
* Unfortunately, there can be a race condition between attach()
* adding the interrupt handler and initialising the mutexen,
* and the handler itself being called because of a pending
* interrupt. So, we check <imask>; if it shows that interrupts
* haven't yet been enabled (and therefore we shouldn't really
* be here at all), we will just write back the value read from
* the status register, thus acknowledging (and clearing) *all*
* pending conditions without really servicing them, and claim
* the interrupt.
*/
return (DDI_INTR_CLAIMED);
}
/*
* We're committed to servicing this interrupt, but we
* need to get the lock before going any further ...
*/
/*
* Identify bits that represent enabled interrupts ...
*/
ASSERT(interrupts != 0);
/*
* If there are any, they are considered Abnormal
* and will cause the chip to be reset.
*/
if (istat & ABNORMAL_SUMMARY_INT) {
/*
* Any Abnormal interrupts will lead to us
* resetting the chip, so we don't bother
* to clear each interrupt individually.
*
* Our main task here is to identify the problem,
* by pointing out the most significant unexpected
* bit. Additional bits may well be consequences
* of the first problem, so we consider the possible
* causes in order of severity.
*/
if (interrupts & SYSTEM_ERR_INT) {
switch (istat & SYSTEM_ERR_BITS) {
case SYSTEM_ERR_M_ABORT:
msg = "Bus Master Abort";
break;
case SYSTEM_ERR_T_ABORT:
msg = "Bus Target Abort";
break;
case SYSTEM_ERR_PARITY:
msg = "Parity Error";
break;
default:
msg = "Unknown System Bus Error";
break;
}
} else if (interrupts & RX_STOPPED_INT) {
msg = "RX process stopped";
} else if (interrupts & RX_UNAVAIL_INT) {
msg = "RX buffer unavailable";
} else if (interrupts & RX_WATCHDOG_INT) {
msg = "RX watchdog timeout?";
} else if (interrupts & RX_EARLY_INT) {
msg = "RX early interrupt?";
} else if (interrupts & TX_STOPPED_INT) {
msg = "TX process stopped";
} else if (interrupts & TX_JABBER_INT) {
msg = "TX jabber timeout";
} else if (interrupts & TX_UNDERFLOW_INT) {
msg = "TX underflow?";
} else if (interrupts & TX_EARLY_INT) {
msg = "TX early interrupt?";
} else if (interrupts & LINK_STATUS_INT) {
msg = "Link status change?";
} else if (interrupts & GP_TIMER_INT) {
msg = "Timer expired?";
}
if (warning_msg)
/*
* We don't want to run the entire reinitialisation
* code out of this (high-level?) interrupt, so we
* simply STOP the chip, and wake up the factotum
* to reinitalise it ...
*/
"interrupt (error)");
} else {
/*
* We shouldn't really get here (it would mean
* there were some unprocessed enabled bits but
* they weren't Abnormal?), but we'll check just
* in case ...
*/
DMFE_DEBUG(("unexpected interrupt bits: 0x%x",
istat));
}
}
/*
* Acknowledge all the original bits - except in the case of an
* error, when we leave them unacknowledged so that the recovery
* code can see what was going on when the problem occurred ...
*/
/*
* Read-after-write forces completion on PCI bus.
*
*/
}
/*
* We've finished talking to the chip, so we can drop <oplock>
* before handling the normal interrupts, which only involve
* manipulation of descriptors ...
*/
if (interrupts & RX_PKTDONE_INT)
if (interrupts & TX_PKTDONE_INT) {
/*
* The only reason for taking this interrupt is to give
* GLD a chance to schedule queued packets after a
* ring-full condition. To minimise the number of
* redundant TX-Done interrupts, we only mark two of the
* ring descriptors as 'interrupt-on-complete' - all the
* others are simply handed back without an interrupt.
*/
(void) dmfe_reclaim_tx_desc(dmfep);
}
}
return (DDI_INTR_CLAIMED);
}
/*
* ========== Statistics update handler ==========
*/
static int
{
return (GLD_SUCCESS);
}
/*
* ========== Ioctl handler & subfunctions ==========
*/
/*
* Loopback operation
*
* Support access to the internal loopback and external loopback
* functions selected via the Operation Mode Register (OPR).
* These will be used by netlbtest (see BugId 4370609)
*
*/
static enum ioc_reply
{
return (IOC_INVAL);
switch (cmd) {
default:
/*
* This should never happen ...
*/
return (IOC_INVAL);
case DMFE_GET_LOOP_MODE:
/*
* This doesn't return the current loopback mode - it
* returns a bitmask :-( of all possible loopback modes
*/
DMFE_DEBUG(("dmfe_loop_ioctl: GET_LOOP_MODE"));
return (IOC_DONE);
case DMFE_SET_LOOP_MODE:
/*
* Select any of the various loopback modes
*/
DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d",
loop_req_p->loopback));
switch (loop_req_p->loopback) {
default:
return (IOC_INVAL);
case DMFE_LOOPBACK_OFF:
break;
case DMFE_PHY_A_LOOPBACK_ON:
break;
case DMFE_PHY_D_LOOPBACK_ON:
break;
case DMFE_INT_LOOPBACK_ON:
break;
}
return (IOC_RESTART_ACK);
}
return (IOC_ACK);
}
}
/*
* Specific dmfe IOCTLs, the gld module handles the generic ones.
*/
static int
{
int cmd;
DMFE_TRACE(("dmfe_gld_ioctl($%p, $%p, $%p)",
/*
* Validate the command before bothering with the mutexen ...
*/
switch (cmd) {
default:
return (GLD_SUCCESS);
case DMFE_SET_LOOP_MODE:
case DMFE_GET_LOOP_MODE:
case DMFE_ND_GET:
case DMFE_ND_SET:
break;
}
switch (cmd) {
default:
break;
case DMFE_SET_LOOP_MODE:
case DMFE_GET_LOOP_MODE:
break;
case DMFE_ND_GET:
case DMFE_ND_SET:
break;
}
/*
* Do we need to restart?
*/
switch (status) {
default:
break;
case IOC_RESTART_ACK:
case IOC_RESTART:
/*
* PHY parameters changed; we need to stop, update the
* PHY layer and restart before sending the reply or ACK
*/
/*
* The link will now most likely go DOWN and UP, because
* we've changed the loopback state or the link parameters
* or autonegotiation. So we have to check that it's
* The ioctl code will have planted some reason strings
* to explain what's happening, so the link state change
* messages won't be printed on the console . We wake the
* factotum to deal with link notifications, if any ...
*/
if (dmfe_check_link(dmfep)) {
}
break;
}
/*
* The 'reasons-for-link-change', if any, don't apply any more
*/
/*
* Finally, decide how to reply
*/
switch (status) {
default:
/*
* Error, reply with a NAK and EINVAL
*/
break;
case IOC_RESTART_ACK:
case IOC_ACK:
/*
* OK, reply with an ACK
*/
break;
case IOC_RESTART:
case IOC_REPLY:
/*
* OK, send prepared reply
*/
break;
case IOC_DONE:
/*
* OK, reply already sent
*/
break;
}
return (GLD_SUCCESS);
}
/*
*/
/*
* Determine local MAC address & broadcast address for this interface
*/
static void
{
int err;
/*
* We have to find the "vendor's factory-set address". This is
* the value of the property "local-mac-address", as set by OBP
* (or a .conf file!)
*/
if (err == DDI_PROP_SUCCESS) {
if (propsize == ETHERADDRL)
}
DMFE_DEBUG(("dmfe_setup_mac_address: factory %s",
}
static int
{
int err;
/*
* Allocate handle
*/
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
/*
* Allocate memory
*/
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
/*
* Bind the two together
*/
if (err != DDI_DMA_MAPPED)
return (DDI_FAILURE);
return (DDI_FAILURE);
if (setup > 0) {
} else {
dma_p->setup_dvma = 0;
}
return (DDI_SUCCESS);
}
/*
* This function allocates the transmit and receive buffers and descriptors.
*/
static int
{
int err;
/*
* Allocate memory & handles for TX descriptor ring
*/
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
/*
* Allocate memory & handles for TX buffers
*/
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
/*
* Allocate memory & handles for RX descriptor ring
*/
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
/*
* Allocate memory & handles for RX buffers
*/
if (err != DDI_SUCCESS)
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
static void
{
}
dma_p->setup_dvma = 0;
}
}
}
/*
* This routine frees the transmit and receive buffers and descriptors.
* Make sure the chip is stopped before calling it!
*/
static void
{
}
static void
{
/*
* Clean up and free all DMFE data structures
*/
}
}
}
static int
{
return (DDI_FAILURE);
/*
* (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102)
*/
/*
* Turn on Bus Master Enable bit and ensure the device is not asleep
*/
return (DDI_SUCCESS);
}
struct ks_index {
int index;
char *name;
};
static const struct ks_index ks_mii_names[] = {
{ KS_MII_XCVR_ADDR, "xcvr_addr" },
{ KS_MII_XCVR_ID, "xcvr_id" },
{ KS_MII_XCVR_INUSE, "xcvr_inuse" },
{ KS_MII_LINK_UP, "link_up" },
{ KS_MII_LINK_DUPLEX, "link_duplex" },
{ KS_MII_CAP_100FDX, "cap_100fdx" },
{ KS_MII_CAP_100HDX, "cap_100hdx" },
{ KS_MII_CAP_10FDX, "cap_10fdx" },
{ KS_MII_CAP_10HDX, "cap_10hdx" },
{ KS_MII_CAP_REMFAULT, "cap_rem_fault" },
{ KS_MII_CAP_AUTONEG, "cap_autoneg" },
{ KS_MII_ADV_CAP_100FDX, "adv_cap_100fdx" },
{ KS_MII_ADV_CAP_100HDX, "adv_cap_100hdx" },
{ KS_MII_ADV_CAP_10FDX, "adv_cap_10fdx" },
{ KS_MII_ADV_CAP_10HDX, "adv_cap_10hdx" },
{ KS_MII_ADV_CAP_REMFAULT, "adv_rem_fault" },
{ KS_MII_ADV_CAP_AUTONEG, "adv_cap_autoneg" },
{ KS_MII_LP_CAP_100FDX, "lp_cap_100fdx" },
{ KS_MII_LP_CAP_100HDX, "lp_cap_100hdx" },
{ KS_MII_LP_CAP_10FDX, "lp_cap_10fdx" },
{ KS_MII_LP_CAP_10HDX, "lp_cap_10hdx" },
{ KS_MII_LP_CAP_REMFAULT, "lp_cap_rem_fault" },
{ KS_MII_LP_CAP_AUTONEG, "lp_cap_autoneg" },
{ -1, NULL }
};
static const struct ks_index ks_drv_names[] = {
{ KS_CYCLIC_RUN, "cyclic_run" },
{ KS_TICK_LINK_STATE, "link_state_change" },
{ KS_TICK_LINK_POLL, "link_state_poll" },
{ KS_LINK_INTERRUPT, "link_state_interrupt" },
{ KS_TX_STALL, "tx_stall_detect" },
{ KS_CHIP_ERROR, "chip_error_interrupt" },
{ KS_FACTOTUM_RUN, "factotum_run" },
{ KS_RECOVERY, "factotum_recover" },
{ KS_LINK_CHECK, "factotum_link_check" },
{ KS_LINK_UP_CNT, "link_up_cnt" },
{ KS_LINK_DROP_CNT, "link_drop_cnt" },
{ KS_LINK_CYCLE_UP_CNT, "link_cycle_up_cnt" },
{ KS_LINK_CYCLE_DOWN_CNT, "link_cycle_down_cnt" },
{ KS_MIIREG_BMSR, "mii_status" },
{ KS_MIIREG_ANAR, "mii_advert_cap" },
{ KS_MIIREG_ANLPAR, "mii_partner_cap" },
{ KS_MIIREG_ANER, "mii_expansion_cap" },
{ KS_MIIREG_DSCSR, "mii_dscsr" },
{ -1, NULL }
};
static void
{
/* Create and initialise standard "mii" kstats */
}
} else {
}
/* Create and initialise driver-defined kstats */
}
} else {
}
}
static int
{
return (DDI_FAILURE);
/*
* Refuse to resume if the data structures aren't consistent
*/
return (DDI_FAILURE);
/*
* Refuse to resume if the chip's changed its identity (*boggle*)
*/
return (DDI_FAILURE);
return (DDI_FAILURE);
return (DDI_FAILURE);
return (DDI_FAILURE);
/*
* All OK, reinitialise h/w & kick off GLD scheduling
*/
return (DDI_SUCCESS);
}
/*
* attach(9E) -- Attach a device to the system
*
* Called once for each board successfully probed.
*/
static int
{
int instance;
int err;
DMFE_GTRACE(("dmfe_attach($%p, %d) instance %d",
switch (cmd) {
default:
return (DDI_FAILURE);
case DDI_RESUME:
return (dmfe_resume(devinfo));
case DDI_ATTACH:
break;
}
/*
* Allocate GLD macinfo and DMFE private structures, and
* cross-link them so that given either one of these or
* the devinfo the others can be derived.
*/
return (DDI_FAILURE);
/*
* Initialize more fields in DMFE private data
* Determine the local MAC address
*/
#if DMFEDEBUG
#endif /* DMFEDEBUG */
instance);
/*
* Check for custom "opmode-reg-value" property;
* if none, use the defaults below for CSR6 ...
*/
/*
* Read chip ID & set up config space command register(s)
*/
goto attach_fail;
}
/*
* Register NDD-tweakable parameters
*/
if (dmfe_nd_init(dmfep)) {
goto attach_fail;
}
/*
* Map operating registers
*/
if (err != DDI_SUCCESS) {
goto attach_fail;
}
/*
* Allocate the TX and RX descriptors/buffers.
*/
if (err != DDI_SUCCESS) {
goto attach_fail;
}
/*
* Add the softint handler
*/
goto attach_fail;
}
/*
* Add the h/w interrupt handler & initialise mutexen
*/
goto attach_fail;
}
/*
* Create & initialise named kstats
*/
/*
* Reset & initialise the chip and the ring buffers
* Initialise the (internal) PHY
*/
(void) dmfe_gld_reset(macinfo);
goto attach_fail;
/*
* Initialize pointers to device specific functions which
* will be used by the generic layer.
*/
#if defined(VLAN_VID_NONE)
/*
* This assignment wouldn't be safe if running a new DMFE binary
* against an old GLD, because <gldm_send_tagged> extends the
* macinfo structure. So we need a runtime test as well as the
* compile-time one if we want full compatibility ...
*/
if (gld_recv_tagged != NULL)
#endif /* defined(VLAN_VID_NONE) */
/*
* Initialize board characteristics needed by the generic layer.
*/
#if defined(GLD_CAP_LINKSTATE)
/*
* This is safe even when running a new DMFE binary against an
* old GLD, because <gldm_capabilities> replaces a reserved
* field, rather than extending the macinfo structure
*/
#endif /* defined(GLD_CAP_LINKSTATE) */
/*
* Workaround for booting from NET1 on early versions of Solaris,
* enabled by patching the global variable (dmfe_net1_boot_support)
* to non-zero.
*
* Up to Solaris 8, strplumb() assumed that PPA == minor number,
* which is not (and never has been) true for any GLD-based driver.
* In later versions, strplumb() and GLD have been made to cooperate
* so this isn't necessary; specifically, strplumb() has been changed
* to assume PPA == instance (rather than minor number), which *is*
* true for GLD v2 drivers, and GLD also specifically tells it the
* PPA for legacy (v0) drivers.
*
* The workaround creates an internal minor node with a minor number
* equal to the PPA; this node shouldn't ever appear under /devices
* in userland, but can be found internally when setting up the root
* (NFS) filesystem.
*/
if (dmfe_net1_boot_support) {
}
/*
* Finally, we're ready to register ourselves with the GLD
* interface; if this succeeds, we're all ready to start()
*/
goto attach_fail;
/*
* Install the cyclic callback that we use to check for link
* status, transmit stall, etc. We ASSERT that this can't fail
*/
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
*/
static int
{
/*
* Just stop processing ...
*/
return (DDI_SUCCESS);
}
/*
* detach(9E) -- Detach a device from the system
*/
static int
{
/*
* Get the GLD (macinfo) data from the devinfo and
* derive the driver's own state structure from that
*/
switch (cmd) {
default:
return (DDI_FAILURE);
case DDI_SUSPEND:
return (dmfe_suspend(dmfep));
case DDI_DETACH:
break;
}
/*
* Unregister from the GLD subsystem. This can fail, in
* particular if there are DLPI style-2 streams still open -
* in which case we just return failure without shutting
* down chip operations.
*/
return (DDI_FAILURE);
/*
* All activity stopped, so we can clean up & exit
*/
return (DDI_SUCCESS);
}
/*
* ========== Module Loading Data & Entry Points ==========
*/
static struct module_info dmfe_module_info = {
0,
};
NULL,
NULL,
};
NULL,
NULL,
NULL,
};
static struct streamtab dmfe_streamtab = {
NULL,
};
static struct cb_ops dmfe_cb_ops = {
nulldev, /* cb_open */
nulldev, /* cb_close */
nodev, /* cb_strategy */
nodev, /* cb_print */
nodev, /* cb_dump */
nodev, /* cb_read */
nodev, /* cb_write */
nodev, /* cb_ioctl */
nodev, /* cb_devmap */
nodev, /* cb_mmap */
nodev, /* cb_segmap */
nochpoll, /* cb_chpoll */
ddi_prop_op, /* cb_prop_op */
&dmfe_streamtab, /* cb_stream */
D_MP, /* cb_flag */
CB_REV, /* cb_rev */
nodev, /* cb_aread */
nodev /* cb_awrite */
};
static struct dev_ops dmfe_dev_ops = {
DEVO_REV, /* devo_rev */
0, /* devo_refcnt */
gld_getinfo, /* devo_getinfo */
nulldev, /* devo_identify */
nulldev, /* devo_probe */
dmfe_attach, /* devo_attach */
dmfe_detach, /* devo_detach */
nodev, /* devo_reset */
&dmfe_cb_ops, /* devo_cb_ops */
NULL /* devo_power */
};
static struct modldrv dmfe_modldrv = {
&mod_driverops, /* Type of module. This one is a driver */
dmfe_ident, /* short description */
&dmfe_dev_ops /* driver specific ops */
};
static struct modlinkage modlinkage = {
};
int
{
DMFE_GTRACE(("_info called"));
}
int
_init(void)
{
int i;
int status;
DMFE_GTRACE(("_init called"));
/* Calculate global timing parameters */
for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) {
switch (i) {
/*
* The chip doesn't spontaneously recover from
* a stall in these states, so we reset early
*/
stall_100_tix[i] = tmp100;
stall_10_tix[i] = tmp10;
break;
default:
/*
* The chip has been seen to spontaneously recover
* after an apparent stall in the SUSPEND state,
* so we'll allow it rather longer to do so. As
* stalls in other states have not been observed,
* we'll use long timeouts for them too ...
*/
break;
}
}
if (status == 0)
return (status);
}
int
_fini(void)
{
int status;
DMFE_GTRACE(("_fini called"));
if (status == 0)
return (status);
}