/*
* sfe_util.c: general ethernet mac driver framework version 2.6
*
* Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
*
* 3. Neither the name of the author nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* System Header files.
*/
#include <sys/ethernet.h>
#include <sys/byteorder.h>
#include <sys/sysmacros.h>
#include "sfe_mii.h"
#include "sfe_util.h"
extern char ident[];
/* Debugging support */
#ifdef GEM_DEBUG_LEVEL
#else
#define ASSERT(x)
#endif
/*
* Useful macros and typedefs
*/
#ifndef INT32_MAX
#endif
#ifndef VTAG_SIZE
#endif
#ifndef VTAG_TPID
#endif
#define BOOLEAN(x) ((x) != 0)
/*
* Macros to distinct chip generation.
*/
/*
* Private functions
*/
static void gem_mii_start(struct gem_dev *);
static void gem_mii_stop(struct gem_dev *);
/* local buffer management */
static int gem_alloc_memory(struct gem_dev *);
static void gem_free_memory(struct gem_dev *);
static void gem_init_rx_ring(struct gem_dev *);
static void gem_init_tx_ring(struct gem_dev *);
static void gem_tx_timeout(struct gem_dev *);
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
/* ============================================================== */
/*
* Misc runtime routines
*/
/* ============================================================== */
/*
* Ether CRC calculation according to 21143 data sheet
*/
{
return (crc);
}
{
int idx;
int bit;
crc = 0xffffffff;
}
}
return (crc);
}
int
{
}
static int
{
int i;
int cnt;
cnt = 0;
for (i = 0; i < 32; i++) {
if (x & (1 << i)) {
cnt++;
}
}
return (cnt);
}
#ifdef GEM_DEBUG_LEVEL
#ifdef GEM_DEBUG_VLAN
static void
{
uint8_t *p;
int rest;
int len;
char *bp;
msg[0] = 0;
offset = 0;
if (rest == 0) {
break;
}
}
offset = 0;
/* ethernet address */
"ether: %02x:%02x:%02x:%02x:%02x:%02x"
" -> %02x:%02x:%02x:%02x:%02x:%02x",
p[6], p[7], p[8], p[9], p[10], p[11],
p[0], p[1], p[2], p[3], p[4], p[5]);
/* vlag tag and etherrtype */
ethertype = GET_ETHERTYPE(p);
ethertype = GET_ETHERTYPE(p);
}
/* ethernet packet length */
} else {
}
}
}
if (ethertype != ETHERTYPE_IP) {
goto x;
}
/* ip address */
offset += sizeof (struct ether_header);
ipproto = p[9];
p[12], p[13], p[14], p[15],
p[16], p[17], p[18], p[19],
/* cksum for psuedo header */
/* tcp or udp protocol header */
if (ipproto == IPPROTO_TCP) {
if (check_cksum) {
}
} else if (ipproto == IPPROTO_UDP) {
}
}
x:
}
#endif /* GEM_DEBUG_VLAN */
#endif /* GEM_DEBUG_LEVEL */
/* ============================================================== */
/*
* IO cache flush
*/
/* ============================================================== */
__INLINE__ void
{
int n;
int m;
/* sync active descriptors */
if (rx_desc_unit_shift < 0 || nslot == 0) {
/* no rx descriptor ring */
return;
}
if ((m = nslot - n) > 0) {
(off_t)0,
(size_t)(m << rx_desc_unit_shift),
how);
nslot = n;
}
how);
}
__INLINE__ void
{
int n;
int m;
/* sync active descriptors */
if (tx_desc_unit_shift < 0 || nslot == 0) {
/* no tx descriptor ring */
return;
}
if ((m = nslot - n) > 0) {
(size_t)(m << tx_desc_unit_shift),
how);
nslot = n;
}
how);
}
static void
{
}
/* ============================================================== */
/*
* Buffer management
*/
/* ============================================================== */
static void
{
"!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
"tx_softq: %d[%d] %d[%d] (+%d), "
"tx_free: %d[%d] %d[%d] (+%d), "
"tx_desc: %d[%d] %d[%d] (+%d), "
"intr: %d[%d] (+%d), ",
}
static void
{
dp->rx_buf_freecnt++;
}
/*
* gem_get_rxbuf: supply a receive buffer which have been mapped into
* DMA space.
*/
struct rxbuf *
{
int i;
int err;
dp->rx_buf_freecnt));
/*
* Get rx buffer management structure
*/
if (rbp) {
/* get one from the recycle list */
dp->rx_buf_freecnt--;
return (rbp);
}
/*
* Allocate a rx buffer management structure
*/
/* no memory */
return (NULL);
}
/*
* Prepare a back pointer to the device structure which will be
* refered on freeing the buffer later.
*/
/* allocate a dma handle for rx data buffer */
"!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
return (NULL);
}
/* allocate a bounce buffer for rx */
/*
* if the nic requires a header at the top of receive buffers,
* it may access the rx buffer randomly.
*/
NULL,
"!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
return (NULL);
}
/* Mapin the bounce buffer into the DMA space */
:(DDI_DMA_READ | DDI_DMA_STREAMING)),
NULL,
&count)) != DDI_DMA_MAPPED) {
"!%s: ddi_dma_addr_bind_handle: failed, err=%d",
/*
* we failed to allocate a dma resource
* for the rx bounce buffer.
*/
return (NULL);
}
/* correct the rest of the DMA mapping */
for (i = 1; i < count; i++) {
}
/* Now we successfully prepared an rx buffer */
dp->rx_buf_allocated++;
return (rbp);
}
/* ============================================================== */
/*
* memory resource management
*/
/* ============================================================== */
static int
{
int i;
int err;
int tx_buf_len;
if (req_size > 0) {
/*
*/
"!%s: %s: ddi_dma_alloc_handle failed: %d",
return (ENOMEM);
}
"!%s: %s: ddi_dma_mem_alloc failed: "
"ret %d, request size: %d",
return (ENOMEM);
}
"!%s: %s: ddi_dma_addr_bind_handle failed: %d",
return (ENOMEM);
}
/* set base of rx descriptor ring */
/* set base of tx descriptor ring */
/* set base of io area */
}
/*
* Prepare DMA resources for tx packets
*/
/* Special dma attribute for tx bounce buffers */
/* Size for tx bounce buffers must be max tx packet size. */
/* setup bounce buffers for tx packets */
"!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
" err=%d, i=%d",
goto err_alloc_dh;
}
"!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
"ret %d, request size %d",
goto err_alloc_dh;
}
"!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
goto err_alloc_dh;
}
}
return (0);
while (i-- > 0) {
}
}
if (dp->desc_dma_handle) {
}
return (ENOMEM);
}
static void
{
int i;
if (dp->desc_dma_handle) {
}
/* Free dma handles for Tx */
/* Free bounce buffer associated to each txbuf */
}
/* Free rx buffer */
dp->rx_buf_freecnt--;
/* release DMA mapping */
/* free dma handles for rx bbuf */
/* it has dma mapping always */
/* free the associated bounce buffer and dma handle */
/* free the associated dma handle */
/* free the base memory of rx buffer management */
}
}
/* ============================================================== */
/*
*/
/* ============================================================== */
/*
* Initialize an empty rx ring.
*/
static void
{
int i;
/* make a physical chain of rx descriptors */
for (i = 0; i < rx_ring_size; i++) {
}
}
/*
* Prepare rx buffers and put them into the rx buffer/descriptor ring.
*/
static void
{
int i;
int nrbuf;
/* Now we have no active buffers in rx ring */
for (i = 0; i < nrbuf; i++) {
break;
}
}
}
/*
* Reclaim active rx buffers in rx buffer ring.
*/
static void
{
int i;
#ifdef GEM_DEBUG_LEVEL
int total;
#endif
/*
* clean up HW descriptors
*/
for (i = 0; i < rx_ring_size; i++) {
}
#ifdef GEM_DEBUG_LEVEL
total = 0;
#endif
/*
* Reclaim allocated rx buffers
*/
#ifdef GEM_DEBUG_LEVEL
total++;
#endif
/* remove the first one from rx buffer list */
/* recycle the rxbuf */
}
"!%s: %s: %d buffers freeed, total: %d free",
}
/*
* Initialize an empty transmit buffer/descriptor ring
*/
static void
{
int i;
/* initialize active list and free list */
dp->tx_slots_base =
for (i = 0; i < tx_ring_size; i++) {
}
}
static void
{
}
tbp->txb_nfrags = 0;
}
#pragma inline(gem_txbuf_free_dma_resources)
/*
* reclaim active tx buffers and reset positions in tx rings.
*/
static void
{
int i;
#ifdef GEM_DEBUG_LEVEL
int err;
#endif
/*
* clean up all HW descriptors
*/
for (i = 0; i < tx_ring_size; i++) {
}
/* dequeue all active and loaded buffers */
}
#ifdef GEM_DEBUG_LEVEL
/* ensure no dma resources for tx are not in use now */
err = 0;
"%s: %s: sn:%d[%d] mp:%p nfrags:%d",
err = 1;
}
sn++;
}
if (err) {
"gem_clean_tx_buf: tbp->txb_mp != NULL");
}
#endif
/* recycle buffers, now no active tx buffers in the ring */
/* fix positions in tx buffer rings */
}
/*
* Reclaim transmitted buffers from tx buffer/descriptor ring.
*/
__INLINE__ int
{
now = ddi_get_lbolt();
/* make non-zero timestamp */
now--;
}
#if GEM_DEBUG_LEVEL > 2
"testing active_head:%d[%d], active_tail:%d[%d]",
}
#endif
#ifdef DEBUG
if (dp->tx_reclaim_busy == 0) {
/* check tx buffer management consistency */
/* EMPTY */
}
#endif
dp->tx_reclaim_busy++;
/* sync all active HW descriptors */
int ndescs;
if (ndescs == 0) {
/* skip errored descriptors */
continue;
}
if (txstat == 0) {
/* not transmitted yet */
break;
}
}
if (txstat & GEM_TX_ERR) {
err = GEM_FAILURE;
}
#if GEM_DEBUG_LEVEL > 4
}
#endif
/* free transmitted descriptors */
}
/* we have reclaimed one or more tx buffers */
/* If we passed the next interrupt position, update it */
}
}
/* free dma mapping resources associated with transmitted tx buffers */
#if GEM_DEBUG_LEVEL > 2
}
#endif
}
/* recycle the tx buffers */
if (--dp->tx_reclaim_busy == 0) {
/* we are the last thread who can update free tail */
#if GEM_DEBUG_LEVEL > 4
/* check all resouces have been deallocated */
if (tbp->txb_nfrags) {
/* in use */
break;
}
sn++;
}
#endif
dp->tx_free_tail =
}
if (!dp->mac_active) {
/* someone may be waiting for me. */
}
#if GEM_DEBUG_LEVEL > 2
"free_head:%d free_tail:%d(+%d) added:%d",
#endif
return (err);
}
#pragma inline(gem_reclaim_txbuf)
/*
* Make tx descriptors in out-of-order manner
*/
static void
{
int (*tx_desc_write)
sn = start_slot;
do {
#if GEM_DEBUG_LEVEL > 1
flags |= GEM_TXFLAG_INTR;
}
#endif
/* write a tx descriptor */
flags = 0;
sn++;
}
static size_t
{
/* we use bounce buffer for the packet */
off = 0;
if (flag & GEM_TXFLAG_SWVTAG) {
/* need to increase min packet size */
}
/* copy the rest */
}
}
/*
* Extend the packet to minimum packet size explicitly.
* For software vlan packets, we shouldn't use tx autopad
* function because nics may not be aware of vlan.
* we must keep 46 octet of payload even if we use vlan.
*/
}
"!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
min_pkt));
/* save misc info */
#ifdef DEBUG_MULTIFRAGS
}
#endif
return (off);
}
#pragma inline(gem_setup_txbuf_copy)
static void
{
/* update HW descriptors from soft queue */
"%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
/* advance softq head and active tail */
}
#pragma inline(gem_tx_start_unit)
#ifdef GEM_DEBUG_LEVEL
#endif
/*
* check ether packet type and ip protocol
*/
static uint64_t
{
int off;
/*
* prepare continuous header of the packet for protocol analysis
*/
/* we use work buffer to copy mblk */
}
} else {
/* we can use mblk without copy */
}
/* process vlan tag for GLD v3 */
} else {
}
}
return (flag);
}
/*
* gem_send_common is an exported function because hw depend routines may
* use it for sending control frames like setup frames for 2114x chipset.
*/
mblk_t *
{
int nmblk;
int avail;
int i;
nmblk = 1;
nmblk++;
}
#ifdef GEM_DEBUG_LEVEL
gem_send_cnt[0]++;
#endif
/*
* Aquire resources
*/
if (dp->mac_suspended) {
while (mp) {
}
return (NULL);
}
/* don't send data packets while mac isn't active */
/* XXX - should we discard packets? */
return (mp_head);
}
/* allocate free slots */
"!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
if (avail == 0) {
/* no resources; short cut */
goto done;
}
}
/* update last interrupt position if tx buffers exhaust. */
}
/* remove one from the mblk list */
/* statistics for non-unicast packets */
ETHERADDRL) == 0) {
bcast++;
} else {
mcast++;
}
}
/* save misc info */
}
/* Append the tbp at the tail of the active tx buffer list */
/* extend the tail of softq, as new packets have been ready. */
/*
* The device status has changed while we are
* preparing tx buf.
* As we are the last one that make tx non-busy.
* wake up someone who may wait for us.
*/
} else {
}
}
done:
return (mp_head);
}
/* ========================================================== */
/*
* error detection and restart routines
*/
/* ========================================================== */
int
{
#ifdef GEM_DEBUG_LEVEL
#if GEM_DEBUG_LEVEL > 1
#endif
#endif
if (dp->mac_suspended) {
/* should we return GEM_FAILURE ? */
return (GEM_FAILURE);
}
/*
* We should avoid calling any routines except xxx_chip_reset
* when we are resuming the system.
*/
if (dp->mac_active) {
if (flags & GEM_RESTART_KEEP_BUF) {
/* stop rx gracefully */
}
}
/* reset the chip. */
goto err;
}
goto err;
}
/* setup media mode if the link have been up */
goto err;
}
}
/* setup mac address and enable rx filter */
goto err;
}
/*
* XXX - a panic happened because of linkdown.
* We must check mii_state here, because the link can be down just
* before the restart event happen. If the link is down now,
* gem_mac_start() will be called from gem_mii_link_check() when
* the link become up later.
*/
/* restart the nic */
(void) gem_mac_start(dp);
}
return (GEM_SUCCESS);
err:
return (GEM_FAILURE);
}
static void
{
now = ddi_get_lbolt();
goto schedule_next;
}
/* reclaim transmitted buffers to check the trasmitter hangs or not. */
/* tx error happened, reset transmitter in the chip */
(void) gem_restart_nic(dp, 0);
goto schedule_next;
}
/* check if the transmitter thread is stuck */
/* no tx buffer is loaded to the nic */
if (dp->tx_blocked &&
"gem_tx_timeout: tx blocked");
}
goto schedule_next;
}
goto schedule_next;
}
/* discard untransmitted packet and restart tx. */
/* restart the downstream if needed */
if (tx_sched) {
}
"!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
dp->timeout_id =
timeout((void (*)(void *))gem_tx_timeout,
}
/* ================================================================== */
/*
* Interrupt handler
*/
/* ================================================================== */
static void
{
/*
* Add new buffers into active rx buffer list
*/
} else {
}
/* need to notify the tail for the lower layer */
rbp->rxb_nfrags);
}
}
#pragma inline(gem_append_rxbuf)
mblk_t *
{
/* allocate a new mblk */
/*
* flush the range of the entire buffer to invalidate
* all of corresponding dirty entries in iocache.
*/
}
return (mp);
}
#ifdef GEM_DEBUG_LEVEL
#endif
int
{
int cnt = 0;
newbufs_tailp = &newbufs;
int len;
if (cnt == 0) {
cnt,
}
if (rx_header_len > 0) {
}
rbp->rxb_nfrags))
& (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
/* not received yet */
break;
}
/* Remove the head of the rx buffer list */
cnt--;
if (rxstat & GEM_RX_ERR) {
goto next;
}
/*
* Copy the packet
*/
/* no memory, discard the packet */
goto next;
}
/*
* Process VLAN tag
*/
}
/* check packet size */
goto next;
}
goto next;
}
#ifdef GEM_DEBUG_VLAN
}
#endif
/* append received packet to temporaly rx buffer list */
ETHERADDRL) == 0) {
} else {
}
}
next:
/* append new one to temporal new buffer list */
*newbufs_tailp = rbp;
}
/* advance rx_active_head */
}
/* terminate the working list */
*newbufs_tailp = NULL;
}
if (newbufs) {
/*
* fillfull rx list with new buffers
*/
/* save current tail */
/* call hw depend start routine if we have. */
}
if (rx_head) {
/*
* send up received packets
*/
}
#ifdef GEM_DEBUG_LEVEL
#endif
return (cnt);
}
{
goto x;
}
/* XXX - we must not have any packets in soft queue */
/*
* If we won't have chance to get more free tx buffers, and blocked,
* it is worth to reschedule the downstream i.e. tx side.
*/
/*
* As no further tx-done interrupts are scheduled, this
* is the last chance to kick tx side, which may be
* blocked now, otherwise the tx side never works again.
*/
dp->tx_max_packets =
}
x:
return (tx_sched);
}
static uint_t
{
if (dp->mac_suspended) {
return (DDI_INTR_UNCLAIMED);
}
if (ret == DDI_INTR_UNCLAIMED) {
return (ret);
}
if (!dp->mac_active) {
}
if (ret & INTR_RESTART_TX) {
ret &= ~INTR_RESTART_TX;
}
return (ret);
}
static void
{
/* schedule next call of tu_intr_watcher */
}
/* ======================================================================== */
/*
* MII support routines
*/
/* ======================================================================== */
static void
{
/* choose media mode */
} else if (dp->anadv_100hdx) {
} else {
}
}
{
}
}
void
{
}
}
#define fc_cap_decode(x) \
((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \
(((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
int
{
0, /* none */
MII_ABILITY_PAUSE, /* symmetric */
MII_ABILITY_ASMPAUSE, /* tx */
};
/*
* Configure bits in advertisement register
*/
if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
/* it's funny */
return (GEM_FAILURE);
}
/* Do not change the rest of the ability bits in the advert reg */
"!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
if (dp->anadv_100t4) {
}
if (dp->anadv_100fdx) {
}
if (dp->anadv_100hdx) {
}
if (dp->anadv_10fdx) {
}
if (dp->anadv_10hdx) {
}
/* set flow control capability */
"!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
dp->anadv_flow_control));
if (mii_stat & MII_STATUS_XSTATUS) {
/*
* 1000Base-T GMII support
*/
if (!dp->anadv_autoneg) {
/* enable manual configuration */
} else {
val = 0;
if (dp->anadv_1000fdx) {
}
if (dp->anadv_1000hdx) {
}
}
"!%s: %s: setting MII_1000TC reg:%b",
}
return (GEM_SUCCESS);
}
/* none */
/* sym */
/* tx */
};
static char *gem_fc_type[] = {
"without",
"with symmetric",
"with tx",
"with rx",
};
{
int linkdown_action;
now = ddi_get_lbolt();
/*
* For NWAM, don't show linkdown state right
* after the system boots
*/
if (dp->linkup_delay > 0) {
} else {
/* link up timeout */
}
}
case MII_STATE_UNKNOWN:
/* power-up, DP83840 requires 32 sync bits */
goto reset_phy;
case MII_STATE_RESETTING:
/* don't read phy registers in resetting */
goto next;
}
/* Timer expired, ensure reset bit is not set */
/* some phys need sync bits after reset */
}
if (val & MII_CONTROL_RESET) {
"!%s: time:%ld resetting phy not complete."
" mii_control:0x%b",
}
/* ensure neither isolated nor pwrdown nor auto-nego mode */
/* XXX -- this operation is required for NS DP83840A. */
/* As resetting PHY has completed, configure PHY registers */
/* we failed to configure PHY. */
goto reset_phy;
}
/* mii_config may disable autonegatiation */
dp->mii_lpable = 0;
dp->mii_advert = 0;
dp->mii_ctl1000 = 0;
dp->mii_stat1000 = 0;
if (!dp->anadv_autoneg) {
/* skip auto-negotiation phase */
dp->mii_interval = 0;
goto next_nowait;
}
/* Issue auto-negotiation command */
goto autonego;
/*
* Autonegotiation is in progress
*/
/*
* wait for a while, typically autonegotiation
* completes in 2.3 - 2.5 sec.
*/
goto next;
}
/* read PHY status */
"!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
if (status & MII_STATUS_REMFAULT) {
/*
* The link parnert told me something wrong happend.
* What do we do ?
*/
"!%s: auto-negotiation failed: remote fault",
goto autonego;
}
if ((status & MII_STATUS_ANDONE) == 0) {
/*
* Auto-negotiation was timed out,
* try again w/o resetting phy.
*/
if (!dp->mii_supress_msg) {
"!%s: auto-negotiation failed: timeout",
}
goto autonego;
}
/*
* Auto-negotiation is in progress. Wait.
*/
goto next;
}
/*
* Auto-negotiation have completed.
* Assume linkdown and fall through.
*/
"!%s: auto-negotiation completed, MII_STATUS:%b",
goto next;
}
diff = 0;
goto next_nowait;
case MII_STATE_AN_DONE:
/*
* Auto-negotiation have done. Now we can set up media.
*/
/* wait for a while */
goto next;
}
/*
* set up the result of auto negotiation
*/
/*
* Read registers required to determin current
* duplex mode and media speed.
*/
/*
* As the link watcher context has been suspended,
* 'status' is invalid. We must status register here
*/
}
if (exp == 0xffff) {
/* some phys don't have exp register */
exp = 0;
}
ctl1000 = 0;
stat1000 = 0;
}
"!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
"! MII_1000TC:%b, MII_1000TS:%b",
}
(exp & MII_AN_EXP_LPCANAN) == 0) {
"!%s: but the link partnar doesn't seem"
" to have auto-negotiation capability."
" please check the link configuration.",
}
/*
* it should be result of parallel detection, which
* cannot detect duplex mode.
*/
if (lpable & MII_ABILITY_100BASE_TX) {
/*
* we prefer full duplex mode for 100Mbps
* connection, if we can.
*/
}
}
/*
* as the link partnar isn't auto-negotiatable, use
* fixed mode temporally.
*/
} else if (lpable == 0) {
goto reset_phy;
}
/*
* configure current link mode according to AN priority.
*/
if ((ctl1000 & MII_1000TC_ADV_FULL) &&
(stat1000 & MII_1000TS_LP_FULL)) {
/* 1000BaseT & full duplex */
} else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
(stat1000 & MII_1000TS_LP_HALF)) {
/* 1000BaseT & half duplex */
} else if (val & MII_ABILITY_100BASE_TX_FD) {
/* 100BaseTx & full duplex */
} else if (val & MII_ABILITY_100BASE_T4) {
/* 100BaseT4 & full duplex */
} else if (val & MII_ABILITY_100BASE_TX) {
/* 100BaseTx & half duplex */
} else if (val & MII_ABILITY_10BASE_T_FD) {
/* 10BaseT & full duplex */
} else if (val & MII_ABILITY_10BASE_T) {
/* 10BaseT & half duplex */
} else {
/*
* It seems that the link partnar doesn't have
* auto-negotiation capability and our PHY
* could not report the correct current mode.
* We guess current mode by mii_control register.
*/
/* select 100m full or 10m half */
"!%s: auto-negotiation done but "
"common ability not found.\n"
"PHY state: control:%b advert:%b lpable:%b\n"
"guessing %d Mbps %s duplex mode",
}
if (dp->full_duplex) {
dp->flow_control =
[fc_cap_decode(lpable)];
} else {
}
/* FALLTHROUGH */
case MII_STATE_MEDIA_SETUP:
/* use short interval */
if ((!dp->anadv_autoneg) ||
/*
* write specified mode to phy.
*/
if (dp->full_duplex) {
}
case GEM_SPD_1000:
break;
case GEM_SPD_100:
val |= MII_CONTROL_100MB;
break;
default:
/* FALLTHROUGH */
case GEM_SPD_10:
/* for GEM_SPD_10, do nothing */
break;
}
}
}
/* notify the result of auto-negotiation to mac */
}
/* for built-in sis900 */
/* XXX - this code should be removed. */
}
goto next_nowait;
case MII_STATE_LINKDOWN:
if (status & MII_STATUS_LINKUP) {
/*
* Link going up
*/
"!%s: link up detected: mii_stat:%b",
/*
* MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
* ignored when MII_CONTROL_ANE is set.
*/
"!%s: Link up: %d Mbps %s duplex %s flow control",
/* XXX - we need other timer to watch statictics */
dp->mii_interval = 0;
}
if (!dp->mac_active) {
(void) gem_mac_start(dp);
}
}
goto next;
}
if (dp->anadv_autoneg) {
/*
* link down timer expired.
* need to restart auto-negotiation.
*/
goto restart_autonego;
}
}
/* don't change mii_state */
break;
case MII_STATE_LINKUP:
if ((status & MII_STATUS_LINKUP) == 0) {
/*
* Link going down
*/
"!%s: link down detected: mii_stat:%b",
dp->mac_active &&
(void) gem_mac_stop(dp, 0);
if (dp->tx_blocked) {
/* drain tx */
}
}
if (dp->anadv_autoneg) {
/* need to restart auto-negotiation */
goto restart_autonego;
}
/* for built-in sis900 */
}
goto next;
}
/* don't change mii_state */
dp->mii_interval = 0;
goto next;
}
break;
}
goto next;
/* Actions on the end of state routine */
switch (linkdown_action) {
case MII_ACTION_RESET:
if (!dp->mii_supress_msg) {
}
goto reset_phy;
case MII_ACTION_NONE:
goto autonego;
}
/* PHY will restart autonego automatically */
goto next;
case MII_ACTION_RSA:
if (!dp->mii_supress_msg) {
}
goto autonego;
default:
}
/* NOTREACHED */
if (!dp->mii_supress_msg) {
}
}
goto next;
if (!dp->mii_supress_msg) {
}
next:
/* we must schedule next mii_watcher */
timeout((void (*)(void *))&gem_mii_link_watcher,
}
/* notify new mii link state */
dp->linkup_delay = 0;
GEM_LINKUP(dp);
} else if (dp->linkup_delay <= 0) {
}
} else if (dp->linkup_delay < 0) {
/* first linkup timeout */
dp->linkup_delay = 0;
}
return (tx_sched);
}
static void
{
dp->link_watcher_id = 0;
#if GEM_DEBUG_LEVEL > 2
if (dp->link_watcher_id == 0) {
}
#endif
if (tx_sched) {
/* kick potentially stopped downstream */
}
}
int
{
/*
* Scan PHY
*/
/* ensure to send sync bits */
dp->mii_status = 0;
/* Try default phy first */
if (dp->mii_phy_addr) {
goto PHY_found;
}
if (dp->mii_phy_addr < 0) {
return (GEM_FAILURE);
}
"!%s: failed to probe default MII PHY at %d",
}
/* Try all possible address */
goto PHY_found;
}
}
goto PHY_found;
}
}
return (GEM_FAILURE);
if (dp->mii_phy_addr < 0) {
} else {
}
dp->mii_xstatus = 0;
if (status & MII_STATUS_XSTATUS) {
}
/* check if the phy can advertize pause abilities */
if ((adv & MII_ABILITY_PAUSE) == 0) {
}
if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
}
return (GEM_SUCCESS);
}
static void
{
/* make a first call of check link */
(void) gem_mii_link_watcher(dp);
}
static void
{
/* Ensure timer routine stopped */
if (dp->link_watcher_id) {
;
dp->link_watcher_id = 0;
}
}
{
char *valstr;
char *cp;
int c;
int i;
int j;
uint8_t v;
uint8_t d;
/*
* Get ethernet address from .conf file
*/
return (B_FALSE);
}
goto syntax_err;
}
j = 0;
ored = 0;
for (;;) {
v = 0;
for (i = 0; i < 2; i++) {
c = *cp++;
if (c >= 'a' && c <= 'f') {
d = c - 'a' + 10;
} else if (c >= 'A' && c <= 'F') {
d = c - 'A' + 10;
} else if (c >= '0' && c <= '9') {
d = c - '0';
} else {
goto syntax_err;
}
v = (v << 4) | d;
}
mac[j++] = v;
ored |= v;
if (j == ETHERADDRL) {
/* done */
break;
}
c = *cp++;
if (c != ':') {
goto syntax_err;
}
}
if (ored == 0) {
goto err;
}
for (i = 0; i < ETHERADDRL; i++) {
}
return (B_TRUE);
"!%s: read mac addr: trying .conf: syntax err %s",
err:
return (B_FALSE);
}
/* ============================================================== */
/*
*/
/* ============================================================== */
static int
{
}
/*
* gem_mac_init: cold start
*/
static int
{
if (dp->mac_suspended) {
return (GEM_FAILURE);
}
/* reset transmitter state */
dp->tx_reclaim_busy = 0;
return (GEM_FAILURE);
}
return (GEM_SUCCESS);
}
/*
* gem_mac_start: warm start
*/
static int
{
/* enable tx and rx */
if (dp->mac_suspended) {
return (GEM_FAILURE);
}
/* setup rx buffers */
return (GEM_FAILURE);
}
/* load untranmitted packets to the nic */
/* issue preloaded tx buffers */
}
return (GEM_SUCCESS);
}
static int
{
int i;
#ifdef GEM_DEBUG_LEVEL
#endif
/*
* Block transmits
*/
if (dp->mac_suspended) {
return (GEM_SUCCESS);
}
}
if ((flags & GEM_RESTART_NOWAIT) == 0) {
/*
* Wait for all tx buffers sent.
*/
i = 0;
#ifdef GEM_DEBUG_LEVEL
now = ddi_get_lbolt();
#endif
if (i > wait_time) {
/* timeout */
break;
}
(void) gem_reclaim_txbuf(dp);
drv_usecwait(100);
i += 100;
}
"!%s: %s: the nic have drained in %d uS, real %d mS",
}
/*
* Now we can stop the nic safely.
*/
}
}
/*
* Clear all rx buffers
*/
if (flags & GEM_RESTART_KEEP_BUF) {
(void) gem_receive(dp);
}
/*
* Update final statistics
*/
/*
* Clear all pended tx packets
*/
if (flags & GEM_RESTART_KEEP_BUF) {
/* restore active tx buffers */
} else {
}
return (ret);
}
static int
{
int cnt;
int err;
if (dp->mac_suspended) {
return (GEM_FAILURE);
}
/* append the new address at the end of the mclist */
}
}
/* multicast address list overflow */
} else {
}
/* tell new multicast list to the hardware */
return (err);
}
static int
{
int i;
int cnt;
int err;
if (dp->mac_suspended) {
return (GEM_FAILURE);
}
dp->mc_count_req--;
for (i = 0; i < cnt; i++) {
continue;
}
/* shrink the mclist by copying forward */
if (len > 0) {
}
break;
}
/* multicast address list overflow */
} else {
}
/* In gem v2, don't hold xmitlock on calling set_rx_filter */
return (err);
}
/* ============================================================== */
/*
* ND interface
*/
/* ============================================================== */
enum {
#ifdef DEBUG_RESUME
#endif
};
enum ioc_reply {
};
struct gem_nd_arg {
int item;
};
static int
{
long val;
switch (item) {
case PARAM_AUTONEG_CAP:
break;
case PARAM_PAUSE_CAP:
break;
case PARAM_ASYM_PAUSE_CAP:
break;
case PARAM_1000FDX_CAP:
break;
case PARAM_1000HDX_CAP:
break;
case PARAM_100T4_CAP:
break;
case PARAM_100FDX_CAP:
break;
case PARAM_100HDX_CAP:
break;
case PARAM_10FDX_CAP:
break;
case PARAM_10HDX_CAP:
break;
case PARAM_ADV_AUTONEG_CAP:
break;
case PARAM_ADV_PAUSE_CAP:
break;
case PARAM_ADV_ASYM_PAUSE_CAP:
break;
case PARAM_ADV_1000FDX_CAP:
break;
case PARAM_ADV_1000HDX_CAP:
break;
case PARAM_ADV_100T4_CAP:
break;
case PARAM_ADV_100FDX_CAP:
break;
case PARAM_ADV_100HDX_CAP:
break;
case PARAM_ADV_10FDX_CAP:
break;
case PARAM_ADV_10HDX_CAP:
break;
case PARAM_LP_AUTONEG_CAP:
break;
case PARAM_LP_PAUSE_CAP:
break;
case PARAM_LP_ASYM_PAUSE_CAP:
break;
case PARAM_LP_1000FDX_CAP:
break;
case PARAM_LP_1000HDX_CAP:
break;
case PARAM_LP_100T4_CAP:
break;
case PARAM_LP_100FDX_CAP:
break;
case PARAM_LP_100HDX_CAP:
break;
case PARAM_LP_10FDX_CAP:
break;
case PARAM_LP_10HDX_CAP:
break;
case PARAM_LINK_STATUS:
break;
case PARAM_LINK_SPEED:
break;
case PARAM_LINK_DUPLEX:
val = 0;
}
break;
case PARAM_LINK_AUTONEG:
break;
case PARAM_LINK_RX_PAUSE:
break;
case PARAM_LINK_TX_PAUSE:
break;
#ifdef DEBUG_RESUME
case PARAM_RESUME_TEST:
val = 0;
break;
#endif
default:
break;
}
return (0);
}
static int
{
long val;
char *end;
return (EINVAL);
}
return (EINVAL);
}
switch (item) {
case PARAM_ADV_AUTONEG_CAP:
goto err;
}
goto err;
}
break;
case PARAM_ADV_PAUSE_CAP:
goto err;
}
if (val) {
} else {
}
break;
case PARAM_ADV_ASYM_PAUSE_CAP:
goto err;
}
if (val) {
} else {
}
break;
case PARAM_ADV_1000FDX_CAP:
goto err;
}
MII_XSTATUS_1000BASEX_FD)) == 0) {
goto err;
}
break;
case PARAM_ADV_1000HDX_CAP:
goto err;
}
(MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
goto err;
}
break;
case PARAM_ADV_100T4_CAP:
goto err;
}
goto err;
}
break;
case PARAM_ADV_100FDX_CAP:
goto err;
}
goto err;
}
break;
case PARAM_ADV_100HDX_CAP:
goto err;
}
goto err;
}
break;
case PARAM_ADV_10FDX_CAP:
goto err;
}
goto err;
}
break;
case PARAM_ADV_10HDX_CAP:
goto err;
}
goto err;
}
break;
}
/* sync with PHY */
/* XXX - Can we ignore the return code ? */
(void) gem_mii_link_check(dp);
}
return (0);
err:
return (EINVAL);
}
static void
{
}
static void
{
/* Our advertised capabilities */
!dp->mii_advert_ro),
!dp->mii_advert_ro),
!dp->mii_advert_ro),
!dp->mii_advert_ro),
!dp->mii_advert_ro),
/* Partner's advertised capabilities */
/* Current operating modes */
#ifdef DEBUG_RESUME
#endif
}
static
enum ioc_reply
{
case ND_GET:
case ND_SET:
if (!ok) {
return (IOC_INVAL);
}
return (IOC_REPLY);
}
return (IOC_RESTART_REPLY);
}
return (IOC_INVAL);
}
static void
{
}
static void
{
int cmd;
/*
* Validate the command before bothering with the mutex ...
*/
switch (cmd) {
default:
break;
case ND_GET:
case ND_SET:
break;
}
#ifdef DEBUG_RESUME
}
#endif
/*
* Finally, decide how to reply
*/
switch (status) {
default:
case IOC_INVAL:
/*
* Error, reply with a NAK and EINVAL or the specified error
*/
break;
case IOC_DONE:
/*
* OK, reply already sent
*/
break;
case IOC_RESTART_ACK:
case IOC_ACK:
/*
* OK, reply with an ACK
*/
break;
case IOC_RESTART_REPLY:
case IOC_REPLY:
/*
* OK, send prepared reply as ACK or NAK
*/
break;
}
}
#ifndef SYS_MAC_H
#define XCVR_UNDEFINED 0
#endif
static int
{
val = XCVR_100T4;
} else if (dp->mii_status &
} else if (dp->mii_status &
val = XCVR_100T2;
} else if (dp->mii_status &
(MII_STATUS_10_FD | MII_STATUS_10)) {
}
} else if (dp->mii_xstatus &
val = XCVR_1000T;
} else if (dp->mii_xstatus &
val = XCVR_1000X;
}
return (val);
}
/* ============================================================== */
/*
* GLDv3 interface
*/
/* ============================================================== */
static int gem_m_start(void *);
static void gem_m_stop(void *);
static int gem_m_setpromisc(void *, boolean_t);
static int gem_m_unicst(void *, const uint8_t *);
NULL,
};
static int
{
int err = 0;
if (dp->mac_suspended) {
goto x;
}
goto x;
}
/* reset rx filter state */
dp->mc_count_req = 0;
/* setup media mode if the link have been up */
}
/* setup initial rx filter */
goto x;
}
goto x;
}
}
return (0);
x:
return (err);
}
static void
{
/* stop rx */
if (dp->mac_suspended) {
return;
}
(void) gem_mac_set_rx_filter(dp);
/* stop tx timeout watcher */
if (dp->timeout_id) {
;
dp->timeout_id = 0;
}
/* make the nic state inactive */
if (dp->mac_suspended) {
return;
}
/* we need deassert mac_active due to block interrupt handler */
/* block interrupts */
}
(void) gem_mac_stop(dp, 0);
}
static int
{
int err;
int ret;
if (add) {
} else {
}
err = 0;
if (ret != GEM_SUCCESS) {
}
return (err);
}
static int
{
if (dp->mac_suspended) {
return (EIO);
}
if (on) {
} else {
}
}
return (err);
}
int
{
if (dp->mac_suspended) {
return (EIO);
}
} else {
if (dp->mac_suspended) {
return (EIO);
}
}
return (EIO);
}
switch (stat) {
case MAC_STAT_IFSPEED:
break;
case MAC_STAT_MULTIRCV:
break;
case MAC_STAT_BRDCSTRCV:
break;
case MAC_STAT_MULTIXMT:
break;
case MAC_STAT_BRDCSTXMT:
break;
case MAC_STAT_NORCVBUF:
break;
case MAC_STAT_IERRORS:
break;
case MAC_STAT_NOXMTBUF:
break;
case MAC_STAT_OERRORS:
break;
case MAC_STAT_COLLISIONS:
break;
case MAC_STAT_RBYTES:
break;
case MAC_STAT_IPACKETS:
break;
case MAC_STAT_OBYTES:
break;
case MAC_STAT_OPACKETS:
break;
case MAC_STAT_UNDERFLOWS:
break;
case MAC_STAT_OVERFLOWS:
break;
case ETHER_STAT_ALIGN_ERRORS:
break;
case ETHER_STAT_FCS_ERRORS:
break;
break;
break;
case ETHER_STAT_SQE_ERRORS:
break;
case ETHER_STAT_DEFER_XMTS:
break;
break;
case ETHER_STAT_EX_COLLISIONS:
break;
case ETHER_STAT_MACXMT_ERRORS:
break;
break;
break;
case ETHER_STAT_MACRCV_ERRORS:
break;
case ETHER_STAT_XCVR_ADDR:
break;
case ETHER_STAT_XCVR_ID:
break;
case ETHER_STAT_XCVR_INUSE:
break;
case ETHER_STAT_CAP_1000FDX:
break;
case ETHER_STAT_CAP_1000HDX:
break;
case ETHER_STAT_CAP_100FDX:
break;
case ETHER_STAT_CAP_100HDX:
break;
case ETHER_STAT_CAP_10FDX:
break;
case ETHER_STAT_CAP_10HDX:
break;
case ETHER_STAT_CAP_ASMPAUSE:
break;
case ETHER_STAT_CAP_PAUSE:
break;
case ETHER_STAT_CAP_AUTONEG:
break;
break;
break;
break;
break;
case ETHER_STAT_ADV_CAP_10FDX:
break;
case ETHER_STAT_ADV_CAP_10HDX:
break;
break;
case ETHER_STAT_ADV_CAP_PAUSE:
break;
break;
break;
break;
case ETHER_STAT_LP_CAP_100FDX:
break;
case ETHER_STAT_LP_CAP_100HDX:
break;
case ETHER_STAT_LP_CAP_10FDX:
break;
case ETHER_STAT_LP_CAP_10HDX:
break;
break;
case ETHER_STAT_LP_CAP_PAUSE:
break;
break;
case ETHER_STAT_LINK_ASMPAUSE:
break;
case ETHER_STAT_LINK_PAUSE:
break;
case ETHER_STAT_LINK_AUTONEG:
break;
case ETHER_STAT_LINK_DUPLEX:
break;
break;
case ETHER_STAT_LP_REMFAULT:
break;
case ETHER_STAT_JABBER_ERRORS:
break;
case ETHER_STAT_CAP_100T4:
break;
case ETHER_STAT_ADV_CAP_100T4:
break;
case ETHER_STAT_LP_CAP_100T4:
break;
default:
#if GEM_DEBUG_LEVEL > 2
"%s: unrecognized parameter value = %d",
#endif
return (ENOTSUP);
}
return (0);
}
static int
{
int err = 0;
if (dp->mac_suspended) {
return (EIO);
}
}
return (err);
}
/*
* gem_m_tx is used only for sending data packets into ethernet wire.
*/
static mblk_t *
{
/* Some nics hate to send packets when the link is down. */
while (mp) {
}
return (NULL);
}
}
static void
{
}
/* ARGSUSED */
static boolean_t
{
return (B_FALSE);
}
static void
{
}
}
/* ======================================================================== */
/*
*/
/* ======================================================================== */
static void
{
int val;
/*
* Get media mode infomation from .conf file
*/
DDI_PROP_DONTPASS, "full-duplex"))) {
if (dp->full_duplex) {
} else {
}
}
switch (val) {
case 1000:
break;
case 100:
break;
case 10:
break;
default:
"!%s: property %s: illegal value:%d",
break;
}
}
"!%s: property %s: illegal value:%d",
} else {
}
}
}
/*
* Gem kstat support
*/
(sizeof (struct gem_dev) + \
sizeof (struct mcast_addr) * GEM_MAXMC + \
sizeof (void *) * ((gc)->gc_tx_buf_size))
struct gem_dev *
{
int i;
int ret;
int unit;
int nports;
nports = 1;
}
if (nports == 1) {
}
unit));
/*
* Allocate soft data structure
*/
return (NULL);
}
/* ddi_set_driver_private(dip, dp); */
/* link to private area */
/*
* Get iblock cookie
*/
"!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
goto err_free_private;
}
dp->iblock_cookie = c;
/*
* Initialize mutex's for this device.
*/
/*
* configure gem parameter
*/
/* patch for simplify dma resource management */
/* fix copy threadsholds */
/* fix rx buffer boundary for iocache line size */
/* fix descriptor boundary for cache line size */
/* patch get_packet method */
}
/* patch get_rx_start method */
}
/* calculate descriptor area */
if (gc->gc_rx_desc_unit_shift >= 0) {
dp->rx_desc_size =
}
if (gc->gc_tx_desc_unit_shift >= 0) {
dp->tx_desc_size =
}
/* link tx buffers */
}
/* performance tuning parameters */
/*
* Get media mode information from .conf file
*/
/* rx_buf_len is required buffer length without padding for alignment */
/*
* Reset the chip
*/
if (ret != GEM_SUCCESS) {
goto err_free_regs;
}
/*
* HW dependant paremeter initialization
*/
if (ret != GEM_SUCCESS) {
goto err_free_regs;
}
#ifdef DEBUG_MULTIFRAGS
#endif
/* allocate tx and rx resources */
if (gem_alloc_memory(dp)) {
goto err_free_regs;
}
"!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
/* copy mac address */
/* Probe MII phy (scan phy) */
dp->mii_lpable = 0;
dp->mii_advert = 0;
dp->mii_ctl1000 = 0;
dp->mii_stat1000 = 0;
goto err_free_ring;
}
/* mask unsupported abilities */
dp->anadv_1000fdx &=
dp->anadv_1000hdx &=
/* initialize MII phy if required */
goto err_free_ring;
}
}
/*
* initialize kstats including mii statistics
*/
/*
* Add interrupt to system.
*/
goto err_release_stats;
}
if (ddi_add_softintr(dip,
goto err_unregister;
}
goto err_unregister;
}
} else {
/*
* Dont use interrupt.
* schedule first call of gem_intr_watcher
*/
timeout((void (*)(void *))gem_intr_watcher,
}
/* link this device to dev_info */
/* reset mii phy and start mii link watcher */
return (dp);
/* release NDD resources */
if (macp) {
}
return (NULL);
}
int
{
int priv_size;
return (DDI_SUCCESS);
}
while (dp) {
/* unregister with gld v3 */
return (DDI_FAILURE);
}
/* ensure any rx buffers are not used */
/* resource is busy */
"!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
/* NOT REACHED */
}
/* stop mii link watcher */
/* unregister interrupt handler */
} else {
/* stop interrupt watcher */
if (dp->intr_watcher_id) {
;
dp->intr_watcher_id = 0;
}
}
/* release NDD resources */
/* release buffers, descriptors and dma resources */
/* release locks and condition variables */
/* release basic memory resources */
}
/* release common private memory for the nic */
/* release register mapping resources */
return (DDI_SUCCESS);
}
int
{
/*
* stop the device
*/
/* stop mii link watcher */
/* stop interrupt watcher for no-intr mode */
if (dp->intr_watcher_id) {
;
}
dp->intr_watcher_id = 0;
}
/* stop tx timeout watcher */
if (dp->timeout_id) {
;
dp->timeout_id = 0;
}
/* make the nic state inactive */
(void) gem_mac_stop(dp, 0);
/* no further register access */
}
/* XXX - power down the nic */
return (DDI_SUCCESS);
}
int
{
/*
* restart the device
*/
/*
* Bring up the nic after power up
*/
/* gem_xxx.c layer to setup power management state. */
/* reset the chip, because we are just after power up. */
goto err;
}
/* initialize mii phy because we are just after power up */
}
/*
* schedule first call of gem_intr_watcher
* instead of interrupts.
*/
timeout((void (*)(void *))gem_intr_watcher,
}
/* restart mii link watcher */
/* restart mac */
goto err_reset;
}
/* setup media mode if the link have been up */
goto err_reset;
}
}
/* enable mac address and rx filter */
goto err_reset;
}
/* restart tx timeout watcher */
(void *)dp,
/* now the nic is fully functional */
goto err_reset;
}
}
}
return (DDI_SUCCESS);
if (dp->intr_watcher_id) {
;
dp->intr_watcher_id = 0;
}
err:
return (DDI_FAILURE);
}
/*
* misc routines for PCI
*/
{
/* search power management capablities */
while (pci_cap_ptr) {
/* read pci capability header */
/* found */
break;
}
/* get next_ptr */
}
return (pci_cap_ptr);
}
int
{
const char *drv_name;
/* search power management capablities */
if (pci_cap_ptr == 0) {
"!%s%d: doesn't have pci power management capability",
return (DDI_FAILURE);
}
/* read power management capabilities */
"!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
/*
* Is the resuested power mode supported?
*/
/* not yet */
/*
* move to new mode
*/
return (DDI_SUCCESS);
}
/*
* select suitable register for by specified address space or register
* offset in PCI config space
*/
int
struct ddi_device_acc_attr *attrp,
{
uint_t n;
uint_t i;
int ret;
const char *drv_name;
/* Search IO-range or memory-range to be mapped */
len = 0;
if ((ret = ddi_prop_lookup_int_array(
"!%s%d: failed to get reg property (ret:%d)",
return (DDI_FAILURE);
}
n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
#if GEM_DEBUG_LEVEL > 0
for (i = 0; i < n; i++) {
"!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
regs[i].pci_phys_hi,
regs[i].pci_phys_mid,
regs[i].pci_phys_low,
regs[i].pci_size_hi,
regs[i].pci_size_low);
}
#endif
for (i = 0; i < n; i++) {
/* it's the requested space */
goto address_range_found;
}
}
return (DDI_FAILURE);
!= DDI_SUCCESS) {
"!%s%d: ddi_regs_map_setup failed (ret:%d)",
}
return (ret);
}
void
{
}
void
{
}