yge.c revision 388fc72deb85983f006abcdb4f8724ddd5360140
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This driver was derived from the FreeBSD if_msk.c driver, which
* bears the following copyright attributions and licenses.
*/
/*
*
* LICENSE:
*
* The computer program files contained in this folder ("Files")
* are provided to you under the BSD-type license terms provided
* below, and any use of such Files and any derivative works
* thereof created by you shall be governed by the following terms
* and conditions:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* with the distribution.
* - Neither the name of Marvell nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* /LICENSE
*
*/
/*
* Copyright (c) 1997, 1998, 1999, 2000
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/ethernet.h>
#include <sys/mac_ether.h>
#include <sys/mac_provider.h>
#include "yge.h"
static struct ddi_device_acc_attr yge_regs_attr = {
};
static struct ddi_device_acc_attr yge_ring_attr = {
};
static struct ddi_device_acc_attr yge_buf_attr = {
};
#define DESC_ALIGN 0x1000
static ddi_dma_attr_t yge_ring_dma_attr = {
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo */
0x00000000ffffffffull, /* dma_attr_addr_hi */
0x00000000ffffffffull, /* dma_attr_count_max */
DESC_ALIGN, /* dma_attr_align */
0x000007fc, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
0x00000000ffffffffull, /* dma_attr_maxxfer */
0x00000000ffffffffull, /* dma_attr_seg */
1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0 /* dma_attr_flags */
};
static ddi_dma_attr_t yge_buf_dma_attr = {
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo */
0x00000000ffffffffull, /* dma_attr_addr_hi */
0x00000000ffffffffull, /* dma_attr_count_max */
1, /* dma_attr_align */
0x0000fffc, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
0x000000000000ffffull, /* dma_attr_maxxfer */
0x00000000ffffffffull, /* dma_attr_seg */
8, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0 /* dma_attr_flags */
};
static int yge_attach(yge_dev_t *);
static void yge_detach(yge_dev_t *);
static int yge_suspend(yge_dev_t *);
static int yge_resume(yge_dev_t *);
static void yge_setup_rambuffer(yge_dev_t *);
static int yge_init_port(yge_port_t *);
static void yge_uninit_port(yge_port_t *);
static int yge_register_port(yge_port_t *);
static int yge_unregister_port(yge_port_t *);
static void yge_tick(void *);
static int yge_intr_gmac(yge_port_t *);
static void yge_intr_enable(yge_dev_t *);
static void yge_intr_disable(yge_dev_t *);
static void yge_intr_hwerr(yge_dev_t *);
static void yge_txeof(yge_port_t *, int);
static void yge_set_rambuffer(yge_port_t *);
static void yge_start_port(yge_port_t *);
static void yge_stop_port(yge_port_t *);
static void yge_free_ring(yge_ring_t *);
static int yge_txrx_dma_alloc(yge_port_t *);
static void yge_txrx_dma_free(yge_port_t *);
static void yge_init_rx_ring(yge_port_t *);
static void yge_init_tx_ring(yge_port_t *);
static void yge_mii_notify(void *, link_state_t);
static void yge_setrxfilt(yge_port_t *);
static void yge_restart_task(yge_dev_t *);
static void yge_task(void *);
static void yge_dispatch(yge_dev_t *, int);
static void yge_stats_clear(yge_port_t *);
static void yge_stats_update(yge_port_t *);
static int yge_m_unicst(void *, const uint8_t *);
static int yge_m_promisc(void *, boolean_t);
static int yge_m_start(void *);
static void yge_m_stop(void *);
const void *);
extern void yge_phys_update(yge_port_t *);
static mac_callbacks_t yge_m_callbacks = {
NULL, /* mc_getcapab */
NULL, /* mc_open */
NULL, /* mc_close */
};
static mii_ops_t yge_mii_ops = {
NULL /* reset */
};
/*
* This is the low level interface routine to read from the PHY
* MII registers. There is multiple steps to these accesses. First
* the register number is written to an address register. Then after
* a specified delay status is checked until the data is present.
*/
static uint16_t
{
for (int i = 0; i < YGE_TIMEOUT; i += 10) {
drv_usecwait(10);
if ((val & GM_SMI_CT_RD_VAL) != 0) {
return (val);
}
}
return (0xffff);
}
/*
* This is the low level interface routine to write to the PHY
* MII registers. There is multiple steps to these accesses. The
* data and the target registers address are written to the PHY.
* Then the PHY is polled until it is done with the write. Note
* that the delays are specified and required!
*/
static void
{
for (int i = 0; i < YGE_TIMEOUT; i += 10) {
drv_usecwait(10);
return;
}
}
static uint16_t
{
return (rv);
}
static void
{
}
/*
* The MII common code calls this function to let the MAC driver
* know when there has been a change in status.
*/
void
{
int speed;
if (link == LINK_STATE_UP) {
/* Enable Tx FIFO Underrun. */
GM_IS_TX_FF_UR | /* TX FIFO underflow */
GM_IS_RX_FF_OR); /* RX FIFO overflow */
switch (fc) {
case LINK_FLOWCTRL_BI:
gmac = GMC_PAUSE_ON;
break;
case LINK_FLOWCTRL_TX:
gmac = GMC_PAUSE_ON;
break;
case LINK_FLOWCTRL_RX:
gmac = GMC_PAUSE_ON;
break;
case LINK_FLOWCTRL_NONE:
default:
break;
}
switch (speed) {
case 1000:
break;
case 100:
break;
case 10:
default:
break;
}
if (duplex == LINK_DUPLEX_FULL) {
gpcr |= GM_GPCR_DUP_FULL;
} else {
gpcr &= ~(GM_GPCR_DUP_FULL);
}
/* Read again to ensure writing. */
/* write out the flow control gmac setting */
} else {
/* Read again to ensure writing. */
}
}
}
static void
{
int pnum;
if (dev->d_suspended)
return;
/* Set station address. */
for (int i = 0; i < (ETHERADDRL / 2); i++) {
}
for (int i = 0; i < (ETHERADDRL / 2); i++) {
}
/* Figure out receive filtering mode. */
} else {
}
/* Write the multicast filter. */
/* Write the receive filtering mode. */
}
static void
{
int prod;
/* ala bzero, but uses safer acch access */
/* Hang out receive buffers. */
}
/* Update prefetch unit. */
YGE_RX_RING_CNT - 1);
}
static void
{
}
static void
{
int next;
int i;
/* Get adapter SRAM size. */
return;
/*
* Give receiver 2/3 of memory and round down to the multiple
* of 1024.
*/
}
}
static void
{
int i;
if (powerup) {
/* Switch power to VCC (WA for VAUX problem). */
/* Disable Core Clock Division, set Clock Select to 0. */
val = 0;
/* Enable bits are inverted. */
}
/*
* Enable PCI & Core Clock, enable clock gating for both Links.
*/
/* Deassert Low Power for 1st PHY. */
val |= PCI_Y2_PHY1_COMA;
val |= PCI_Y2_PHY2_COMA;
}
case CHIP_ID_YUKON_EC_U:
case CHIP_ID_YUKON_EX:
case CHIP_ID_YUKON_FE_P: {
/* Enable all clocks. */
/* Set all bits to 0 except bits 15..12. */
/* Set to default value. */
/*
* Enable workaround for dev 4.107 on Yukon-Ultra
* and Extreme
*/
break;
}
default:
break;
}
for (i = 0; i < dev->d_num_port; i++) {
}
} else {
val &= ~PCI_Y2_PHY1_COMA;
val &= ~PCI_Y2_PHY2_COMA;
} else {
}
/* Enable bits are inverted. */
val = 0;
}
/*
* Disable PCI & Core Clock, disable clock gating for
* both Links.
*/
}
}
static void
{
int i;
/* Turn off ASF */
/* Clear AHB bridge & microcontroller reset */
status &= ~Y2_ASF_CPU_MODE;
status &= ~Y2_ASF_AHB_RST;
/* Clear ASF microcontroller state */
status &= ~Y2_ASF_STAT_MSK;
} else {
}
/*
* Since we disabled ASF, S/W reset is required for Power Management.
*/
/* Allow writes to PCI config space */
/* Clear all error bits in the PCI status register. */
case PEX_BUS:
/* Clear all PEX errors. */
/* is error bit status stuck? */
}
break;
case PCI_BUS:
/* Set Cache Line Size to 2 (8 bytes) if configured to 0. */
break;
case PCIX_BUS:
/* Set Cache Line Size to 2 (8 bytes) if configured to 0. */
/* Set Cache Line Size opt. */
val |= PCI_CLS_OPT;
break;
}
/* Set PHY power state. */
for (i = 0; i < dev->d_num_port; i++) {
/* GPHY Control reset. */
/* GMAC Control reset. */
}
}
/* LED On. */
/* Clear TWSI IRQ. */
/* Turn off hardware timer. */
/* Turn off descriptor polling. */
/* Turn off time stamps. */
/* Don't permit config space writing */
/* enable TX Arbiters */
for (i = 0; i < dev->d_num_port; i++)
/* Configure timeout values. */
for (i = 0; i < dev->d_num_port; i++) {
}
/* Disable all interrupts. */
/*
* On dual port PCI-X card, there is an problem where status
* can be received out of order due to split transactions.
*/
int pcix;
/* Clear Max Outstanding Split Transactions. */
pcix_cmd &= ~0x70;
}
}
/* Change Max. Read Request Size to 4096 bytes. */
v &= ~PEX_DC_MAX_RRS_MSK;
v |= PEX_DC_MAX_RD_RQ_SIZE(5);
v = (v & PEX_LS_LINK_WI_MSK) >> 4;
if (v != width)
"Negotiated width of PCIe link(x%d) != "
"max. width of link(x%d)\n", width, v);
}
/* Clear status list. */
dev->d_stat_cons = 0;
/* Set the status list base address. */
/* Set the status list last index. */
/* WA for dev. #4.3 */
/* WA for dev #4.18 */
} else {
/* ISR status FIFO watermark */
else
}
/*
* Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
*/
/* Enable status unit. */
}
static int
{
int i;
KM_SLEEP);
KM_SLEEP);
} else {
}
/* Disable jumbo frame for Yukon FE. */
/*
* Start out assuming a regular MTU. Users can change this
* with dladm. The dladm daemon is supposed to issue commands
* to change the default MTU using m_setprop during early boot
* (before the interface is plumbed) if the user has so
* requested.
*/
return (DDI_FAILURE);
}
/* We assume all parts support asymmetric pause */
/*
* Get station address for this interface. Note that
* dual port cards actually come with three station
* addresses: one for each port, plus an extra. The
* extra one is used by the SysKonnect driver software
* as a 'virtual' station address for when both ports
* are operating in failover mode. Currently we don't
* use this extra address.
*/
for (i = 0; i < ETHERADDRL; i++) {
port->p_curraddr[i] =
}
/* Register with Nemo. */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
int count;
int actual;
int rv;
int i, j;
return (DDI_FAILURE);
}
/*
* Allocate the interrupt. Note that we only bother with a single
* interrupt. One could argue that for MSI devices with dual ports,
* it would be nice to have a separate interrupt per port. But right
* now I don't know how to configure that, so we'll just settle for
* a single interrupt.
*/
return (DDI_FAILURE);
}
"Unable to allocate interrupt, %d, count %d",
return (DDI_FAILURE);
}
DDI_SUCCESS) {
"Unable to get interrupt priority, %d", rv);
return (DDI_FAILURE);
}
DDI_SUCCESS) {
"Unable to get interrupt capabilities, %d", rv);
return (DDI_FAILURE);
}
/* register interrupt handler to kernel */
"Unable to add interrupt handler, %d", rv);
for (j = 0; j < i; j++)
return (DDI_FAILURE);
}
}
return (DDI_SUCCESS);
}
static int
{
int intr_types;
int rv;
/* Allocate IRQ resources. */
if (rv != DDI_SUCCESS) {
"Unable to determine supported interrupt types, %d", rv);
return (DDI_FAILURE);
}
/*
* We default to not supporting MSI. We've found some device
* and motherboard combinations don't always work well with
* MSI interrupts. Users may override this if they choose.
*/
if (intr_types & DDI_INTR_TYPE_FIXED) {
}
}
if (intr_types & DDI_INTR_TYPE_MSIX) {
return (DDI_SUCCESS);
}
if (intr_types & DDI_INTR_TYPE_MSI) {
return (DDI_SUCCESS);
}
if (intr_types & DDI_INTR_TYPE_FIXED) {
return (DDI_SUCCESS);
}
return (DDI_FAILURE);
}
static void
{
int i;
/* Call ddi_intr_block_enable() for MSI interrupts */
} else {
/* Call ddi_intr_enable for FIXED interrupts */
}
}
void
{
int i;
} else {
}
}
static uint8_t
{
return (0);
}
/* This assumes PCI, and not CardBus. */
while (ptr != 0) {
return (ptr);
}
}
return (0);
}
static int
{
int rv;
int nattached;
goto fail;
}
/*
*/
/* ensure the pmcsr status is D0 state */
if (pm_cap != 0) {
pmcsr | PCI_PMCSR_D0);
}
/* Enable PCI access and bus master. */
/* Allocate I/O resource */
if (rv != DDI_SUCCESS) {
goto fail;
}
/* Enable all clocks. */
/*
* Bail out if chip is not recognized. Note that we only enforce
* this in production builds. The Ultra-2 (88e8057) has a problem
* right now where TX works fine, but RX seems not to. So we've
* disabled that for now.
*/
#ifndef DEBUG
goto fail;
#endif
}
/* Soft reset. */
dev->d_coppertype = 0;
else
/* Check number of MACs. */
dev->d_num_port++;
}
/* Check bus type. */
} else {
}
case CHIP_ID_YUKON_EC:
break;
case CHIP_ID_YUKON_UL_2:
break;
case CHIP_ID_YUKON_SUPR:
break;
case CHIP_ID_YUKON_EC_U:
break;
case CHIP_ID_YUKON_EX:
break;
case CHIP_ID_YUKON_FE:
break;
case CHIP_ID_YUKON_FE_P:
break;
case CHIP_ID_YUKON_XL:
break;
default:
break;
}
if (rv != DDI_SUCCESS)
goto fail;
/* Setup event taskq. */
goto fail;
}
/* Init the condition variable */
/* Allocate IRQ resources. */
goto fail;
}
/* Set base interrupt mask. */
/* Reset the adapter. */
nattached = 0;
for (int i = 0; i < dev->d_num_port; i++) {
goto fail;
}
}
/* set up the periodic to run once per second */
for (int i = 0; i < dev->d_num_port; i++) {
nattached++;
}
}
if (nattached == 0) {
goto fail;
}
/* Dispatch the taskq */
DDI_SUCCESS) {
goto fail;
}
return (DDI_SUCCESS);
fail:
return (DDI_FAILURE);
}
static int
{
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* Free up port specific resources. This is called only when the
* port is not registered (and hence not running).
*/
static void
{
sizeof (yge_buf_t) * YGE_TX_RING_CNT);
sizeof (yge_buf_t) * YGE_RX_RING_CNT);
}
static void
{
/*
* Turn off the periodic.
*/
if (dev->d_periodic)
for (int i = 0; i < dev->d_num_port; i++) {
}
/*
* Make sure all interrupts are disabled.
*/
/* LED Off. */
/* Put hardware reset. */
}
}
}
}
static int
{
int rv;
unsigned ndmac;
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
if (rv != DDI_DMA_MAPPED) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
}
static int
{
size_t l;
int sflag;
int rv;
unsigned ndmac;
/* Now allocate Tx buffers. */
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
if (rv != DDI_DMA_MAPPED) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
if (b->b_paddr)
(void) ddi_dma_unbind_handle(b->b_dmah);
b->b_paddr = 0;
if (b->b_acch)
ddi_dma_mem_free(&b->b_acch);
if (b->b_dmah)
ddi_dma_free_handle(&b->b_dmah);
}
static int
{
int rv;
int i;
yge_buf_t *b;
/*
* It seems that Yukon II supports full 64 bit DMA operations.
* But we limit it to 32 bits only for now. The 64 bit
* operation would require substantially more complex
* descriptor handling, since in such a case we would need two
* LEs to represent a single physical address.
*
* If we find that this is limiting us, then we should go back
* and re-examine it.
*/
/* Note our preferred buffer size. */
/* Allocate Tx ring. */
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
/* Now allocate Tx buffers. */
for (i = 0; i < YGE_TX_RING_CNT; i++) {
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
b++;
}
/* Allocate Rx ring. */
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
/* Now allocate Rx buffers. */
for (i = 0; i < YGE_RX_RING_CNT; i++) {
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
b++;
}
return (DDI_SUCCESS);
}
static void
{
yge_buf_t *b;
/* Tx ring. */
/* Rx ring. */
/* Tx buffers. */
for (int i = 0; i < YGE_TX_RING_CNT; i++, b++) {
yge_free_buf(b);
}
/* Rx buffers. */
for (int i = 0; i < YGE_RX_RING_CNT; i++, b++) {
yge_free_buf(b);
}
}
{
/*
* For now we're not going to support checksum offload or LSO.
*/
/* too big! */
return (B_TRUE);
}
/* Check number of available descriptors. */
return (B_FALSE);
}
/* Update producer index. */
return (B_TRUE);
}
static int
{
for (int i = 0; i < dev->d_num_port; i++) {
}
for (int i = 0; i < dev->d_num_port; i++) {
}
}
/* Disable all interrupts. */
/* Put hardware reset. */
return (DDI_SUCCESS);
}
static int
{
/* ensure the pmcsr status is D0 state */
pmcsr | PCI_PMCSR_D0);
}
/* Enable PCI access and bus master. */
/* Enable all clocks. */
case CHIP_ID_YUKON_EX:
case CHIP_ID_YUKON_EC_U:
case CHIP_ID_YUKON_FE_P:
break;
}
/* Make sure interrupts are reenabled */
for (int i = 0; i < dev->d_num_port; i++) {
}
}
/* Reset MII layer */
for (int i = 0; i < dev->d_num_port; i++) {
}
}
return (DDI_SUCCESS);
}
static mblk_t *
{
return (NULL);
/*
* Apparently the status for this chip is not reliable.
* Only perform minimal consistency checking; the MAC
* and upper protocols will have to filter any garbage.
*/
goto bad;
}
} else {
((status & GMR_FS_ANY_ERR) != 0) ||
((status & GMR_FS_RX_OK) == 0)) {
goto bad;
}
}
/* good packet - yay */
} else {
}
bad:
cons);
return (mp);
}
static boolean_t
{
int prog;
return (B_FALSE);
}
prog = 0;
break;
prog++;
/* No need to sync LEs as we didn't update LEs. */
}
if (prog > 0) {
return (resched);
} else {
return (B_FALSE);
}
}
static void
{
}
}
static void
{
for (int i = 0; i < dev->d_num_port; i++) {
}
for (int i = 0; i < dev->d_num_port; i++) {
}
for (int i = 0; i < dev->d_num_port; i++) {
}
}
static void
{
int idx;
if (dev->d_suspended) {
return;
}
for (int i = 0; i < dev->d_num_port; i++) {
continue;
/*
* Reclaim first as there is a possibility of losing
* Tx completion interrupts.
*/
} else {
/* detect TX hang */
"TX hang detected!");
}
}
}
}
if (restart) {
} else {
if (resched) {
for (int i = 0; i < dev->d_num_port; i++) {
}
}
}
}
static int
{
int dispatch_wrk = 0;
/* GMAC Rx FIFO overrun. */
if ((status & GM_IS_RX_FF_OR) != 0) {
}
/* GMAC Tx FIFO underrun. */
if ((status & GM_IS_TX_FF_UR) != 0) {
/*
* Tx MAC but that would also require
* resynchronization with status LEs. Reinitializing
* status LEs would affect the other port in dual MAC
* configuration so it should be avoided if we can.
* Due to lack of documentation it's all vague guess
* but it needs more investigation.
*/
}
return (dispatch_wrk);
}
static void
{
if ((status & Y2_IS_PAR_RD1) != 0) {
/* Clear IRQ. */
}
if ((status & Y2_IS_PAR_WR1) != 0) {
/* Clear IRQ. */
}
if ((status & Y2_IS_PAR_MAC1) != 0) {
/* Clear IRQ. */
}
if ((status & Y2_IS_PAR_RX1) != 0) {
/* Clear IRQ. */
}
/* Clear IRQ. */
}
}
static void
{
/* Time Stamp timer overflow. */
if ((status & Y2_IS_TIST_OV) != 0)
if ((status & Y2_IS_PCI_NEXP) != 0) {
/*
* PCI Express Error occurred which is not described in PEX
* spec.
* This error is also mapped either to Master Abort(
* Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
* can only be cleared there.
*/
}
if ((status & Y2_IS_IRQ_STAT) != 0)
if ((status & Y2_IS_MST_ERR) != 0)
/* Reset all bits in the PCI status register. */
}
/* Check for PCI Express Uncorrectable Error. */
if ((status & Y2_IS_PCI_EXP) != 0) {
/*
* On PCI Express bus bridges are called root complexes (RC).
* PCI Express errors are recognized by the root complex too,
* which requests the system to handle the problem. After
* error occurrence it may be that no access to the adapter
* may be performed any longer.
*/
if ((v32 & PEX_UNSUP_REQ) != 0) {
/* Ignore unsupported request error. */
"Uncorrectable PCI Express error");
}
int i;
/* Get TLP header form Log Registers. */
for (i = 0; i < 4; i++)
PEX_HEADER_LOG + i * 4);
/* Check for vendor defined broadcast message. */
dev->d_intrhwemask);
}
}
/* Clear the interrupt. */
}
}
/*
* Returns B_TRUE if there is potentially more work to do.
*/
static boolean_t
{
return (B_FALSE);
}
/* Sync status LE. */
break;
}
control & STLE_OP_MASK);
goto finish;
}
switch (control & STLE_OP_MASK) {
case OP_RXSTAT:
else
}
break;
case OP_TXINDEXLE:
txindex[1] =
break;
default:
control & STLE_OP_MASK);
break;
}
/* Give it back to HW. */
break;
}
}
return (B_TRUE);
else
return (B_FALSE);
}
/*ARGSUSED1*/
static uint_t
{
int txindex[2];
int dispatch_wrk;
dispatch_wrk = 0;
if (dev->d_suspended) {
return (DDI_INTR_UNCLAIMED);
}
/* Get interrupt source. */
/* Reenable interrupts. */
return (DDI_INTR_UNCLAIMED);
}
if ((status & Y2_IS_HW_ERR) != 0) {
}
if (status & Y2_IS_IRQ_MAC1) {
}
if (status & Y2_IS_IRQ_MAC2) {
}
"Rx descriptor error");
}
"Tx descriptor error");
}
/* handle events until it returns false */
/* NOP */;
if ((status & Y2_IS_STAT_BMU)) {
}
/* Reenable interrupts. */
if (dispatch_wrk) {
}
if (txindex[0] >= 0) {
}
if (heads[0])
} else {
if (heads[0]) {
}
}
}
if (txindex[1] >= 0) {
}
if (heads[1])
} else {
if (heads[1]) {
}
}
}
return (DDI_INTR_CLAIMED);
}
static void
{
case CHIP_ID_YUKON_EX:
goto yukon_ex_workaround;
else
break;
default:
/* Set Tx GMAC FIFO Almost Empty Threshold. */
/* Disable Store & Forward mode for Tx. */
} else {
/* Enable Store & Forward mode for Tx. */
}
break;
}
}
static void
{
else
/*
* Note for the future, if we enable offloads:
* In Yukon EC Ultra, TSO & checksum offload is not
* supported for jumbo frame.
*/
/* GMAC Control reset */
/*
* Initialize GMAC first such that speed/duplex/flow-control
* parameters are renegotiated with the interface is brought up.
*/
/* Dummy read the Interrupt Source Register. */
/* Clear MIB stats. */
/* Disable FCS. */
/* Setup Transmit Control Register. */
/* Setup Transmit Flow Control Register. */
/* Setup Transmit Parameter Register. */
/* Disable interrupts for counter overflows. */
/* Configure Rx MAC FIFO. */
reg |= GMF_RX_OVER_ON;
/* Set receive filter. */
/* Flush Rx MAC FIFO on any flow control or error. */
/*
* Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
* due to hardware hang on receipt of pause frames.
*/
/* FE+ magic */
reg = 0x178;
/* Configure Tx MAC FIFO. */
/* Set Rx Pause threshold. */
} else {
}
/* Configure store-and-forward for TX */
}
/* Disable dynamic watermark */
reg &= ~TX_DYN_WM_ENA;
}
/*
* Disable Force Sync bit and Alloc bit in Tx RAM interface
* arbiter as we don't use Sync Tx queue.
*/
/* Enable the RAM Interface Arbiter. */
/* Setup RAM buffer. */
/* Disable Tx sync Queue. */
/* Setup Tx Queue Bus Memory Interface. */
case CHIP_ID_YUKON_EC_U:
/* Fix for Yukon-EC Ultra: set BMU FIFO level */
}
break;
case CHIP_ID_YUKON_EX:
/*
* Yukon Extreme seems to have silicon bug for
* automatic Tx checksum calculation capability.
*/
break;
}
/* Setup Rx Queue Bus Memory Interface. */
} else {
}
/* MAC Rx RAM Read is controlled by hardware. */
}
/* Disable Rx checksum offload and RSS hash. */
/* Configure interrupt handling. */
}
/* Read again to ensure writing. */
/* Reset TX timer */
}
static void
{
int pnum;
return;
/* Setup Rx Queue. */
utpp =
ltpp =
/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
/* Setup Tx Queue. */
/* Enable Store & Forward for Tx side. */
}
static void
{
/* Reset the prefetch unit. */
/* Set LE base address. */
/* Set the list last index. */
/* Turn on prefetch unit. */
/* Dummy read to ensure write. */
}
static void
{
int i;
/*
* shutdown timeout
*/
/* Disable interrupts. */
if (pnum == YGE_PORT_A) {
} else {
}
/* Read again to ensure writing. */
/* Update stats and clear counters. */
/* Stop Tx BMU. */
for (i = 0; i < YGE_TIMEOUT; i += 10) {
} else
break;
drv_usecwait(10);
}
/* This is probably fairly catastrophic. */
/* Disable all GMAC interrupt. */
/* Disable the RAM Interface Arbiter. */
/* Reset the PCI FIFO of the async Tx queue */
/* Reset the Tx prefetch units. */
/* Reset the RAM Buffer async Tx queue. */
/* Reset Tx MAC FIFO. */
/* Set Pause Off. */
/*
* The Rx Stop command will not work for Yukon-2 if the BMU does not
* reach the end of packet and since we can't make sure that we have
* incoming data, we must reset the BMU while it is not during a DMA
* transfer. Since it is possible that the Rx path is still active,
* the Rx RAM buffer will be stopped first, so any possible incoming
* data will not trigger a DMA. After the RAM buffer is stopped, the
* BMU is polled until any DMA in progress is ended and only then it
* will be reset.
*/
/* Disable the RAM Buffer receive queue. */
for (i = 0; i < YGE_TIMEOUT; i += 10) {
break;
drv_usecwait(10);
}
/* This is probably nearly a fatal error. */
if (i == YGE_TIMEOUT)
/* Reset the Rx prefetch unit. */
/* Reset the RAM Buffer receive queue. */
/* Reset Rx MAC FIFO. */
}
/*
* When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
* counter clears high 16 bits of the counter such that accessing
* lower 16 bits should be the last operation.
*/
#define YGE_READ_MIB32(x, y) \
GMAC_READ_4(dev, x, y)
#define YGE_READ_MIB64(x, y) \
(uint64_t)YGE_READ_MIB32(x, y))
static void
{
/* Set MIB Clear Counter Mode. */
/* Read all MIB Counters with Clear Mode set. */
(void) YGE_READ_MIB32(pnum, i);
/* Clear MIB Clear Counter Mode. */
gmac &= ~GM_PAR_MIB_CLR;
}
static void
{
struct yge_hw_stats *stats;
return;
}
/* Set MIB Clear Counter Mode. */
/* Rx stats. */
/* Tx stats. */
/* Clear MIB Clear Counter Mode. */
gmac &= ~GM_PAR_MIB_CLR;
}
{
int idx;
int bit;
#define POLY_BE 0x04c11db7
crc = 0xffffffff;
}
}
return (crc % 64);
}
int
{
if (stat == MAC_STAT_IFSPEED) {
/*
* This is the first stat we are asked about. We update only
* for this stat, to avoid paying the hefty cost of the update
* once for each stat.
*/
}
return (0);
}
switch (stat) {
case MAC_STAT_MULTIRCV:
break;
case MAC_STAT_BRDCSTRCV:
break;
case MAC_STAT_MULTIXMT:
break;
case MAC_STAT_BRDCSTXMT:
break;
case MAC_STAT_IPACKETS:
break;
case MAC_STAT_RBYTES:
break;
case MAC_STAT_OPACKETS:
break;
case MAC_STAT_OBYTES:
break;
case MAC_STAT_NORCVBUF:
break;
case MAC_STAT_COLLISIONS:
break;
case ETHER_STAT_ALIGN_ERRORS:
break;
case ETHER_STAT_FCS_ERRORS:
break;
break;
break;
break;
case ETHER_STAT_EX_COLLISIONS:
break;
break;
case MAC_STAT_OVERFLOWS:
break;
case MAC_STAT_UNDERFLOWS:
break;
break;
case ETHER_STAT_JABBER_ERRORS:
break;
default:
return (ENOTSUP);
}
return (0);
}
int
yge_m_start(void *arg)
{
/*
* We defer resource allocation to this point, because we
* don't want to waste DMA resources that might better be used
* elsewhere, if the port is not actually being used.
*
* Furthermore, this gives us a more graceful handling of dynamic
* MTU modification.
*/
/* Make sure we free up partially allocated resources. */
return (ENOMEM);
}
return (0);
}
void
yge_m_stop(void *arg)
{
if (!dev->d_suspended)
/* Release resources we don't need */
}
int
{
/* Save current promiscuous mode. */
return (0);
}
int
{
int bit;
if (add) {
/* Set the corresponding bit in the hash table. */
}
} else {
}
}
if (update) {
}
return (0);
}
int
{
return (0);
}
mblk_t *
{
int enq = 0;
int idx;
/* carrier_errors++; */
}
return (NULL);
}
/* attempt a reclaim */
break;
}
enq++;
}
if (enq > 0) {
/* Transmit */
}
if (resched)
return (mp);
}
void
{
#ifdef YGE_MII_LOOPBACK
/* LINTED E_FUNC_SET_NOT_USED */
/*
* Right now, the MII common layer does not properly handle
* loopback on these PHYs. Fixing this should be done at some
* point in the future.
*/
return;
#else
#endif
}
int
{
int err = 0;
return (err);
}
switch (pr_num) {
case MAC_PROP_MTU:
if (pr_valsize < sizeof (new_mtu)) {
break;
}
/* no change */
err = 0;
break;
}
"Maximum MTU size too small: %d", new_mtu);
break;
}
ETHERMTU : YGE_JUMBO_MTU)) {
"Maximum MTU size too big: %d", new_mtu);
break;
}
"Unable to change maximum MTU while running");
break;
}
/*
* NB: It would probably be better not to hold the
* DEVLOCK, but releasing it creates a potential race
* if m_start is called concurrently.
*
* It turns out that the MAC layer guarantees safety
* for us here by using a cut out for this kind of
* notification call back anyway.
*
* See R8. and R14. in mac.c locking comments, which read
* as follows:
*
* R8. Since it is not guaranteed (see R14) that
* drivers won't hold locks across mac driver
* interfaces, the MAC layer must provide a cut out
* for control interfaces like upcall notifications
* and start them in a separate thread.
*
* R14. It would be preferable if MAC drivers don't
* hold any locks across any mac call. However at a
* minimum they must not hold any locks across data
* upcalls. They must also make sure that all
* references to mac data structures are cleaned up
* and that it is single threaded at mac_unregister
* time.
*/
if (err != 0) {
/* This should never occur! */
"Failed notifying GLDv3 of new maximum MTU");
} else {
}
break;
default:
break;
}
err:
return (err);
}
int
{
int err;
return (err);
}
if (pr_valsize == 0)
return (EINVAL);
*perm = MAC_PROP_PERM_RW;
switch (pr_num) {
case MAC_PROP_MTU:
if (!(pr_flags & MAC_PROP_POSSIBLE)) {
break;
}
if (pr_valsize < sizeof (mac_propval_range_t))
return (EINVAL);
err = 0;
break;
default:
break;
}
return (err);
}
void
{
}
void
{
int flags;
for (;;) {
dev->d_task_flags = 0;
/*
* This should be the first thing after the sleep so if we are
* requested to exit we do that and not waste time doing work
* we will then abandone.
*/
if (flags & YGE_TASK_EXIT)
break;
/* all processing done without holding locks */
if (flags & YGE_TASK_RESTART)
}
}
void
{
char buf[256];
int ppa;
} else {
else
}
}
static int
{
int rv;
switch (cmd) {
case DDI_ATTACH:
if (rv != DDI_SUCCESS) {
}
return (rv);
case DDI_RESUME:
return (yge_resume(dev));
default:
return (DDI_FAILURE);
}
}
static int
{
int rv;
switch (cmd) {
case DDI_DETACH:
/* attempt to unregister MACs from Nemo */
for (int i = 0; i < dev->d_num_port; i++) {
if (rv != DDI_SUCCESS) {
return (DDI_FAILURE);
}
}
return (DDI_SUCCESS);
case DDI_SUSPEND:
return (yge_suspend(dev));
default:
return (DDI_FAILURE);
}
}
static int
{
/* NB: No locking! We are called in single threaded context */
for (int i = 0; i < dev->d_num_port; i++) {
}
/* Disable all interrupts. */
/* Put hardware into reset. */
return (DDI_SUCCESS);
}
/*
* Stream information
*/
/*
* Module linkage information.
*/
static struct modldrv yge_modldrv = {
&mod_driverops, /* drv_modops */
"Yukon 2 Ethernet", /* drv_linkinfo */
&yge_devops /* drv_dev_ops */
};
static struct modlinkage yge_modlinkage = {
MODREV_1, /* ml_rev */
&yge_modldrv, /* ml_linkage */
};
/*
* DDI entry points.
*/
int
_init(void)
{
int rv;
}
return (rv);
}
int
_fini(void)
{
int rv;
}
return (rv);
}
int
{
}