/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
*/
/*
*/
/*
* PSARC/2007/453 MSI-X interrupt limit override
*/
/*
* Software workaround for a Neptune (PCI-E)
* hardware interrupt bug which the hardware
* may generate spurious interrupts after the
* device interrupt handler was removed. If this flag
* is enabled, the driver will reset the
* hardware when devices are being detached.
*/
/*
* Software workaround for the hardware
* checksum bugs that affect packet transmission
* and receive:
*
* Usage of nxge_cksum_offload:
*
* (1) nxge_cksum_offload = 0 (default):
* - transmits packets:
* TCP: uses the hardware checksum feature.
* UDP: driver will compute the software checksum
* based on the partial checksum computed
* by the IP layer.
* - receives packets
* TCP: marks packets checksum flags based on hardware result.
* UDP: will not mark checksum flags.
*
* (2) nxge_cksum_offload = 1:
* - transmit packets:
* - receives packets
*
* (3) nxge_cksum_offload = 2:
* - The driver will not register its checksum capability.
* Checksum for both TCP and UDP will be computed
* by the stack.
* - The software LSO is not allowed in this case.
*
* (4) nxge_cksum_offload > 2:
* - Will be treated as it is set to 2
* (stack will compute the checksum).
*
* (5) If the hardware bug is fixed, this workaround
* needs to be updated accordingly to reflect
* the new hardware revision.
*/
/*
*
*/
/* MAX LSO size */
/*
* Add tunable to reduce the amount of time spent in the
* ISR doing Rx Processing.
*/
/*
* Tunables to manage the receive buffer blocks.
*
* nxge_rx_threshold_hi: copy all buffers.
* nxge_rx_bcopy_size_type: receive buffer block size type.
* nxge_rx_threshold_lo: copy only up to tunable block size type.
*/
/* Use kmem_alloc() to allocate data buffers. */
#if defined(__sparc)
#else
#endif
/*
* The hardware sometimes fails to allow enough time for the link partner
* to send an acknowledgement for packets that the hardware sent to it. The
* hardware resends the packets earlier than it should be in those instances.
* This behavior caused some switches to acknowledge the wrong packets
* and it triggered the fatal error.
* This software workaround is to set the replay timer to a value
* suggested by the hardware team.
*
* PCI config space replay timer register:
* The following replay timeout value is 0xc
* for bit 14:18.
*/
/*
* The transmit serialization sometimes causes
* longer sleep before calling the driver transmit
* function as it sleeps longer than it should.
* The performace group suggests that a time wait tunable
* can be used to set the maximum wait time when needed
* and the default is set to 1 tick.
*/
#if defined(sun4v)
/*
*/
/*
* The following is the default API supported:
* major 1 and minor 1.
*
* Please update the MAX_NIU_MAJORS,
* MAX_NIU_MINORS, and minor number supported
* when the newer Hypervior API interfaces
* are added. Also, please update nxge_hsvc_register()
* if needed.
*/
NIU_MINOR_VER, "nxge"
};
static int nxge_hsvc_register(p_nxge_t);
#endif
/*
* Function Prototypes
*/
static void nxge_unattach(p_nxge_t);
static int nxge_quiesce(dev_info_t *);
#if NXGE_PROPERTY
static void nxge_remove_hard_properties(p_nxge_t);
#endif
/*
* These two functions are required by nxge_hio.c
*/
static void nxge_destroy_mutexes(p_nxge_t);
#ifdef NXGE_DEBUG
#endif
static void nxge_suspend(p_nxge_t);
static void nxge_destroy_dev(p_nxge_t);
static void nxge_free_mem_pool(p_nxge_t);
static void nxge_free_rx_mem_pool(p_nxge_t);
static void nxge_free_tx_mem_pool(p_nxge_t);
struct ddi_dma_attr *,
static void nxge_dma_mem_free(p_nxge_dma_common_t);
static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t);
size_t);
static int nxge_init_common_dev(p_nxge_t);
static void nxge_uninit_common_dev(p_nxge_t);
char *, caddr_t);
#if defined(sun4v)
#endif
/*
* The next declarations are for the GLDv3 interface.
*/
static int nxge_m_start(void *);
static void nxge_m_stop(void *);
static int nxge_m_promisc(void *, boolean_t);
static int nxge_m_setprop(void *, const char *, mac_prop_id_t,
uint_t, const void *);
static int nxge_m_getprop(void *, const char *, mac_prop_id_t,
uint_t, void *);
static void nxge_m_propinfo(void *, const char *, mac_prop_id_t,
static void nxge_priv_propinfo(const char *, mac_prop_info_handle_t);
const void *);
static void nxge_fill_ring(void *, mac_ring_type_t, const int, const int,
static void nxge_set_pci_replay_timeout(nxge_t *);
char *nxge_priv_props[] = {
"_adv_10gfdx_cap",
"_adv_pause_cap",
"_function_number",
"_fw_version",
"_port_mode",
"_hot_swap_phy",
"_rxdma_intr_time",
"_rxdma_intr_pkts",
"_class_opt_ipv4_tcp",
"_class_opt_ipv4_udp",
"_class_opt_ipv4_ah",
"_class_opt_ipv4_sctp",
"_class_opt_ipv6_tcp",
"_class_opt_ipv6_udp",
"_class_opt_ipv6_ah",
"_class_opt_ipv6_sctp",
"_soft_lso_enable",
};
#define NXGE_M_CALLBACK_FLAGS \
NULL,
NULL,
NULL,
NULL,
NULL,
};
void
/* PSARC/2007/453 MSI-X interrupt limit override. */
static int nxge_create_msi_property(p_nxge_t);
/*
* For applications that care about the
* latency, it was requested by PAE and the
* customers that the driver has tunables that
* allow the user to tune it to a higher number
* interrupts to spread the interrupts among
* multiple channels. The DDI framework limits
* the maximum number of MSI-X resources to allocate
* to 8 (ddi_msix_alloc_limit). If more than 8
* is set, ddi_msix_alloc_limit must be set accordingly.
* The default number of MSI interrupts are set to
* 8 for 10G and 2 for 1G link.
*/
/*
* These global variables control the message
* output.
*/
/*
* This list contains the instance structures for the Neptune
* devices present in the system. The lock exists to guarantee
* mutually exclusive access to the list.
*/
extern uint64_t npi_debug_level;
extern void nxge_fm_init(p_nxge_t,
ddi_dma_attr_t *);
extern void nxge_fm_fini(p_nxge_t);
/*
* Count used to maintain the number of buffers being used
* by Neptune instances and loaned up to the upper layers.
*/
/*
* Device register access attributes for PIO.
*/
};
/*
* Device descriptor access attributes for DMA.
*/
};
/*
* Device buffer access attributes for DMA.
*/
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
#ifndef NIU_PA_WORKAROUND
0x100000, /* alignment */
#else
0x2000,
#endif
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int) 1, /* granularity */
0 /* attribute flags */
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
#if defined(_BIG_ENDIAN)
0x2000, /* alignment */
#else
0x1000, /* alignment */
#endif
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int) 1, /* granularity */
0 /* attribute flags */
};
DMA_ATTR_V0, /* version number. */
0, /* low address */
0xffffffffffffffff, /* high address */
0xffffffffffffffff, /* address counter max */
0x2000, /* alignment */
0xfc00fc, /* dlim_burstsizes */
0x1, /* minimum transfer size */
0xffffffffffffffff, /* maximum transfer size */
0xffffffffffffffff, /* maximum segment size */
(unsigned int) 1, /* granularity */
DDI_DMA_RELAXED_ORDERING /* attribute flags */
};
(uint_t)0, /* dlim_addr_lo */
0x1, /* dlim_minxfer */
1024 /* dlim_speed */
};
/*
* dma chunk sizes.
*
* Try to allocate the largest possible size
* so that fewer number of dma chunks would be managed
*/
#ifdef NIU_PA_WORKAROUND
#else
0x10000, 0x20000, 0x40000, 0x80000,
0x100000, 0x200000, 0x400000, 0x800000,
0x1000000, 0x2000000, 0x4000000};
#endif
/*
* Translate "dev_t" to a pointer to the associated "dev_info_t".
*/
extern void nxge_get_environs(nxge_t *);
static int
{
int instance;
/*
* Get the device instance since we'll need to setup
* or retrieve a soft state for this instance.
*/
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
break;
}
break;
}
} else {
}
goto nxge_attach_exit;
case DDI_PM_RESUME:
break;
}
break;
}
goto nxge_attach_exit;
default:
goto nxge_attach_exit;
}
goto nxge_attach_exit;
}
status = NXGE_ERROR;
goto nxge_attach_fail2;
}
/* Are we a guest running in a Hybrid I/O environment? */
goto nxge_attach_fail3;
}
/* Create & initialize the per-Neptune data structure */
/* (even if we're a guest). */
"nxge_init_common_dev failed"));
goto nxge_attach_fail4;
}
/*
* Software workaround: set the replay timer.
*/
}
#if defined(sun4v)
/* This is required by nxge_hio_init(), which follows. */
goto nxge_attach_fail4;
#endif
"nxge_hio_init failed"));
goto nxge_attach_fail4;
}
" function %d. Only functions 0 and 1 are "
status = NXGE_ERROR;
goto nxge_attach_fail4;
}
}
if (isLDOMguest(nxgep)) {
/*
* Use the function number here.
*/
/* XXX We'll set the MAC address counts to 1 for now. */
} else {
else
/*
* Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
* internally, the rest 2 ports use BMAC (1G "Big" MAC).
* The two types of MACs have different characterizations.
*/
} else {
}
}
/*
* Setup the Ndd parameters for the this instance.
*/
/*
* Setup Register Tracing Buffer.
*/
/* init stats ptr */
/*
* Copy the vpd info from eeprom to a local data
* structure, and then check its validity.
*/
if (!isLDOMguest(nxgep)) {
int *regp;
int rv;
/* Find the NIU config handle. */
if (rv != DDI_PROP_SUCCESS) {
goto nxge_attach_fail5;
}
/*
* The address_hi, that is the first int, in the reg
* property consists of config handle, but need to remove
* the bits 28-31 which are OBP specific info.
*/
}
/*
* Set the defaults for the MTU size.
*/
if (isLDOMguest(nxgep)) {
extern void nxge_get_logical_props(p_nxge_t);
"phy-type", "virtual transceiver");
/*
* local-mac-address property gives us info on which
* specific MAC address the Hybrid resource is associated
* with.
*/
"local-mac-address", &prop_val,
&prop_len) != DDI_PROP_SUCCESS) {
goto nxge_attach_fail5;
}
if (prop_len != ETHERADDRL) {
goto nxge_attach_fail5;
}
/*
* Enable Jumbo property based on the "max-frame-size"
* property value.
*/
"max-frame-size", NXGE_MTU_DEFAULT_MAX);
if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) &&
(max_frame_size <= TX_JUMBO_MTU)) {
}
} else {
" Couldn't determine card type"
" .... exit "));
goto nxge_attach_fail5;
}
"get_hw create failed"));
goto nxge_attach_fail;
}
}
/*
* Setup the Kstats for the driver.
*/
if (!isLDOMguest(nxgep))
goto nxge_attach_fail;
}
if (!isLDOMguest(nxgep))
goto nxge_attach_fail;
}
#if defined(sun4v)
if (isLDOMguest(nxgep)) {
/* Find our VR & channel sets. */
if (status != DDI_SUCCESS) {
"nxge_hio_vr_add failed"));
goto nxge_attach_fail;
}
goto nxge_attach_exit;
}
#endif
if (status != DDI_SUCCESS) {
goto nxge_attach_fail;
}
if (status != DDI_SUCCESS) {
goto nxge_attach_fail;
}
/* If a guest, register with vio_net instead. */
"unable to register to mac layer (%d)", status));
goto nxge_attach_fail;
}
"registered to mac (instance %d)", instance));
/* nxge_link_monitor calls xcvr.check_link recursively */
goto nxge_attach_exit;
goto nxge_attach_fail1;
/*
* Tear down the ndd parameters setup.
*/
/*
* Tear down the kstat setup.
*/
}
/*
* Unmap the register setup.
*/
status));
return (status);
}
static int
{
int instance;
goto nxge_detach_exit;
}
switch (cmd) {
case DDI_DETACH:
break;
case DDI_PM_SUSPEND:
break;
case DDI_SUSPEND:
}
break;
default:
}
if (cmd != DDI_DETACH)
goto nxge_detach_exit;
/*
* Stop the xcvr polling.
*/
"<== nxge_detach status = 0x%08X", status));
return (DDI_FAILURE);
}
"<== nxge_detach (mac_unregister) status = 0x%08X", status));
status));
return (status);
}
static void
{
return;
}
nxgep->nxge_magic = 0;
if (nxgep->nxge_timerid) {
nxgep->nxge_timerid = 0;
}
/*
* If this flag is set, it will affect the Neptune
* only.
*/
}
#if defined(sun4v)
if (isLDOMguest(nxgep)) {
(void) nxge_hio_vr_release(nxgep);
}
#endif
}
#if defined(sun4v)
}
#endif
/*
* Stop any further interrupts.
*/
/*
* Stop the device and free resources.
*/
if (!isLDOMguest(nxgep)) {
}
/*
* Tear down the ndd parameters setup.
*/
/*
* Tear down the kstat setup.
*/
/*
* Free any memory allocated for PHY properties
*/
}
/*
* Destroy all mutexes.
*/
/*
* Remove the list of ndd parameters which
* were setup during attach.
*/
" nxge_unattach: remove all properties"));
}
#if NXGE_PROPERTY
#endif
/*
* Unmap the register setup.
*/
}
#if defined(sun4v)
int
{
int i, j;
return (DDI_SUCCESS);
}
/*
* Currently, the NIU Hypervisor API supports two major versions:
* version 1 and 2.
* If Hypervisor introduces a higher major or minor version,
* please update NIU_MAJOR_HI and NIU_MINOR_HI accordingly.
*/
sizeof (hsvc_info_t));
for (i = NIU_MAJOR_HI; i > 0; i--) {
for (j = NIU_MINOR_HI; j >= 0; j--) {
"nxge_hsvc_register: %s: negotiating "
"hypervisor services revision %d "
"group: 0x%lx major: 0x%lx "
"minor: 0x%lx",
nxgep->niu_min_ver));
&nxgep->niu_min_ver)) == 0) {
/* Use the supported minor */
"nxge_hsvc_register: %s: negotiated "
"hypervisor services revision %d "
"group: 0x%lx major: 0x%lx "
"minor: 0x%lx (niu_min_ver 0x%lx)",
nxgep->niu_min_ver));
"<== nxge_hsvc_register: "
"NIU Hypervisor service enabled"));
return (DDI_SUCCESS);
}
"nxge_hsvc_register: %s: negotiated failed - "
"try lower major number "
"hypervisor services revision %d "
"group: 0x%lx major: 0x%lx minor: 0x%lx "
"errno: %d",
}
}
"nxge_hsvc_register: %s: cannot negotiate "
"hypervisor services revision %d group: 0x%lx "
"major: 0x%lx minor: 0x%lx errno: %d",
"<== nxge_hsvc_register: Register to NIU Hypervisor failed"));
return (DDI_FAILURE);
}
#endif
static nxge_status_t
{
char *devname;
#ifdef NXGE_DEBUG
char *sysname;
#endif
#if !defined(_BIG_ENDIAN)
#endif
if (isLDOMguest(nxgep)) {
return (nxge_guest_regs_map(nxgep));
}
"nxge_map_regs: pathname devname %s", devname));
/*
* The driver is running on a N2-NIU system if devname is something
* like "/niu@80/network@0"
*/
/*
* Get function number:
*/
nxgep->function_num));
} else {
int *prop_val;
0, "reg",
"Reg property not found"));
goto nxge_map_regs_fail0;
} else {
"Reg property found: fun # %d",
func_num));
if (isLDOMguest(nxgep)) {
return (NXGE_OK);
}
}
}
default:
"nxge_map_regs: pci config size 0x%x", regsize));
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs, nxge bus config regs failed"));
goto nxge_map_regs_fail0;
}
"nxge_map_reg: PCI config addr 0x%0llx "
dev_regs->nxge_pciregh));
/*
* IMP IMP
* workaround for bit swapping bug in HW
* which ends up in no-snoop = yes
* resulting, in DMA not synched properly
*/
#if !defined(_BIG_ENDIAN)
/* workarounds for x86 systems */
#endif
"nxge_map_regs: pio size 0x%x", regsize));
/* set up the device mapped register */
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs for Neptune global reg failed"));
goto nxge_map_regs_fail1;
}
"nxge_map_regs: msix size 0x%x", regsize));
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs for msi reg failed"));
goto nxge_map_regs_fail2;
}
/* set up the vio region mapped register */
"nxge_map_regs: vio size 0x%x", regsize));
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs for nxge vio reg failed"));
goto nxge_map_regs_fail3;
}
break;
case N2_NIU:
/*
* Set up the device mapped register (FWARC 2006/556)
* (changed back to 1: reg starts at 1!)
*/
"nxge_map_regs: dev size 0x%x", regsize));
if (ddi_status != DDI_SUCCESS) {
goto nxge_map_regs_fail1;
}
/* set up the first vio region mapped register */
"nxge_map_regs: vio (1) size 0x%x", regsize));
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs for nxge vio reg failed"));
goto nxge_map_regs_fail2;
}
/* set up the second vio region mapped register */
"nxge_map_regs: vio (3) size 0x%x", regsize));
if (ddi_status != DDI_SUCCESS) {
"ddi_map_regs for nxge vio2 reg failed"));
goto nxge_map_regs_fail3;
}
break;
}
goto nxge_map_regs_exit;
if (dev_regs->nxge_msix_regh) {
}
if (dev_regs->nxge_vir_regh) {
}
}
if (dev_regs->nxge_pciregh) {
}
if (ddi_status != DDI_SUCCESS)
return (status);
}
static void
{
if (isLDOMguest(nxgep)) {
return;
}
"==> nxge_unmap_regs: bus"));
}
"==> nxge_unmap_regs: device registers"));
}
"==> nxge_unmap_regs: device interrupts"));
}
"==> nxge_unmap_regs: vio region"));
}
"==> nxge_unmap_regs: vio2 region"));
}
}
}
static nxge_status_t
{
int partition;
/*
* Get the interrupt cookie so the mutexes can be
* Initialized.
*/
if (isLDOMguest(nxgep)) {
nxgep->interrupt_cookie = 0;
} else {
if (ddi_status != DDI_SUCCESS) {
"<== nxge_setup_mutexes: failed 0x%x",
ddi_status));
goto nxge_setup_mutexes_exit;
}
}
/*
* Initialize mutexes for this device.
*/
/*
* FFLP Mutexes are never used in interrupt context
* as fflp operation can take very long time to
* complete and hence not suitable to invoke from interrupt
* handlers.
*/
if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
}
}
"<== nxge_setup_mutexes status = %x", status));
if (ddi_status != DDI_SUCCESS)
return (status);
}
static void
{
int partition;
/* Destroy all polling resources. */
/* free data structures, based on HW type */
if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
}
}
}
{
return (status);
}
/*
*/
goto nxge_init_fail1;
}
if (!isLDOMguest(nxgep)) {
/*
* Initialize and enable the TXC registers.
* (Globally enable the Tx controller,
* enable the port, configure the dma channel bitmap,
* configure the max burst size).
*/
NXGE_ERR_CTL, "init txc failed\n"));
goto nxge_init_fail2;
}
}
/*
* Initialize and enable TXDMA channels.
*/
goto nxge_init_fail3;
}
/*
* Initialize and enable RXDMA channels.
*/
goto nxge_init_fail4;
}
/*
* The guest domain is now done.
*/
if (isLDOMguest(nxgep)) {
goto nxge_init_exit;
}
/*
* Initialize TCAM and FCRAM (Neptune).
*/
goto nxge_init_fail5;
}
/*
* Initialize ZCP
*/
goto nxge_init_fail5;
}
/*
* Initialize IPP.
*/
goto nxge_init_fail5;
}
/*
* Initialize the MAC block.
*/
goto nxge_init_fail5;
}
/*
* Enable the interrrupts for DDI.
*/
goto nxge_init_exit;
if (!isLDOMguest(nxgep)) {
(void) nxge_txc_uninit(nxgep);
}
"<== nxge_init status (failed) = 0x%08x", status));
return (status);
status));
return (status);
}
{
}
return (NULL);
}
/*ARGSUSED*/
void
{
if (timerid) {
}
}
void
{
"==> nxge_uninit: not initialized"));
"<== nxge_uninit"));
return;
}
if (!isLDOMguest(nxgep)) {
/*
* Reset the receive MAC side.
*/
(void) nxge_rx_mac_disable(nxgep);
/*
* Drain the IPP.
*/
(void) nxge_ipp_drain(nxgep);
}
/* stop timer */
if (nxgep->nxge_timerid) {
nxgep->nxge_timerid = 0;
}
(void) nxge_intr_hw_disable(nxgep);
/* Disable and soft reset the IPP */
if (!isLDOMguest(nxgep))
(void) nxge_ipp_disable(nxgep);
/* Free classification resources */
(void) nxge_classify_uninit(nxgep);
/*
*/
/*
* Reset the transmit MAC side.
*/
(void) nxge_tx_mac_disable(nxgep);
/*
* Start the timer if the reset flag is not set.
* If this reset flag is set, the link monitor
* will not be started in order to stop furthur bus
* activities coming from this interface.
* The driver will start the monitor function
* if the interface was initialized again later.
*/
if (!nxge_peu_reset_enable) {
}
"nxge_mblks_pending %d", nxge_mblks_pending));
}
void
{
int i, retry;
regdata = 0;
retry = 1;
for (i = 0; i < retry; i++) {
}
}
void
{
}
/*ARGSUSED*/
/*VARARGS*/
void
{
int instance;
/* In case a developer has changed nxge_debug_level. */
}
if ((level & debug_level) ||
(level == NXGE_ERR_CTL)) {
/* do the msg processing */
}
if (level & NXGE_ERR_CTL) {
}
instance = -1;
} else {
(void) sprintf(prefix_buffer,
}
}
}
char *
{
int i;
if (!size)
size = 60;
if (size > MAX_DUMP_SZ) {
/* Dump the leading bytes */
for (i = 0; i < MAX_DUMP_SZ/2; i++) {
if (*ap > 0x0f)
*cp++ = ':';
}
for (i = 0; i < 20; i++)
*cp++ = '.';
/* Dump the last MAX_DUMP_SZ/2 bytes */
for (i = 0; i < MAX_DUMP_SZ/2; i++) {
if (*ap > 0x0f)
*cp++ = ':';
}
} else {
for (i = 0; i < size; i++) {
if (*ap > 0x0f)
*cp++ = ':';
}
}
*--cp = 0;
return (etherbuf);
}
#ifdef NXGE_DEBUG
static void
{
char *dev_ptr;
int i;
if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
"Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
"Neptune PCI cfg_ptr vendor id ptr 0x%llx",
"\tvendorid 0x%x devid 0x%x",
"PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
"bar1c 0x%x",
"\nNeptune PCI BAR: base20 0x%x base24 0x%x "
"base 28 0x%x bar2c 0x%x\n",
"\nNeptune PCI BAR: base30 0x%x\n",
"first 0x%llx second 0x%llx third 0x%llx "
"last 0x%llx ",
}
}
#endif
static void
{
}
static nxge_status_t
{
(void) nxge_rx_mac_enable(nxgep);
(void) nxge_tx_mac_enable(nxgep);
"<== nxge_resume status = 0x%x", status));
return (status);
}
static nxge_status_t
{
status = NXGE_ERROR;
}
" nxge_setup_dev status "
"(xcvr init 0x%08x)", status));
goto nxge_setup_dev_exit;
}
"<== nxge_setup_dev port %d status = 0x%08x",
return (status);
}
static void
{
(void) nxge_hw_stop(nxgep);
}
static nxge_status_t
{
" nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
" default_block_size %d iommu_pagesize %d",
if (iommu_pagesize != 0) {
if (iommu_pagesize > 0x4000)
} else {
}
}
}
"==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
"default_block_size %d page mask %d",
nxgep->sys_page_mask));
switch (nxgep->sys_page_sz) {
default:
break;
case 0x1000:
break;
case 0x2000:
break;
case 0x4000:
break;
case 0x8000:
break;
}
#ifndef USE_RX_BIG_BUF
#else
#endif
/*
* Get the system DMA burst size.
*/
DDI_DMA_DONTWAIT, 0,
&nxgep->dmasparehandle);
if (ddi_status != DDI_SUCCESS) {
"ddi_dma_alloc_handle: failed "
" status 0x%x", ddi_status));
}
sizeof (nxgep->dmasparehandle),
DDI_DMA_DONTWAIT, 0,
if (ddi_status != DDI_DMA_MAPPED) {
"Binding spare handle to find system"
" burstsize failed."));
}
if (ddi_status != DDI_SUCCESS)
"<== nxge_setup_system_dma_pages status = 0x%08x", status));
return (status);
}
static nxge_status_t
{
return (NXGE_ERROR);
}
return (NXGE_ERROR);
}
return (NXGE_OK);
}
static void
{
}
{
/*
* Allocate memory for the common DMA data structures.
*/
KM_SLEEP);
/*
* Assume that each DMA channel will be configured with
* the default block size.
* rbr block counts are modulo the batch count (16).
*/
if (!nxge_port_rbr_size) {
}
if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
}
}
if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
"nxge_alloc_rx_mem_pool: RBR size too high %d, "
"set to default %d",
}
if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
"nxge_alloc_rx_mem_pool: RCR too high %d, "
"set to default %d",
}
/*
* memory allocation on data buffers to 4M (contig_mem_alloc)
* function).
*/
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
(!ISP2(nxge_port_rbr_size))) {
}
if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
(!ISP2(nxge_port_rcr_size))) {
}
}
#endif
/*
* Addresses of receive block ring, receive completion ring and the
* mailbox must be all cache-aligned (64 bytes).
*/
rx_cntl_alloc_size *= (sizeof (rx_desc_t));
rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
"nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
"nxge_port_rcr_size = %d "
"rx_cntl_alloc_size = %d",
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
if (!ISP2(rx_buf_alloc_size)) {
"==> nxge_alloc_rx_mem_pool: "
" must be power of 2"));
}
"==> nxge_alloc_rx_mem_pool: "
" limit size to 4M"));
}
if (rx_cntl_alloc_size < 0x2000) {
rx_cntl_alloc_size = 0x2000;
}
}
#endif
/* Allocate the receive rings, too. */
"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
return (status);
}
/*
* nxge_alloc_rxb
*
* Allocate buffers for an RDC.
*
* Arguments:
* nxgep
* channel The channel to map into our kernel space.
*
* Notes:
*
* NPI function calls:
*
* NXGE function calls:
*
* Registers accessed:
*
* Context:
*
* Taking apart:
*
* Open questions:
*
*/
int channel)
{
/*
* Allocate memory for the receive buffers and descriptor rings.
* Replace these allocation functions with the interface functions
*/
/*
* Allocate memory for the receive buffer blocks.
*/
if ((status = nxge_alloc_rx_buf_dma(
return (status);
}
/*
* Allocate memory for descriptor rings and mailbox.
*/
if ((status = nxge_alloc_rx_cntl_dma(
!= NXGE_OK) {
return (status);
}
"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
return (status);
}
void
int channel)
{
}
static void
{
"<== nxge_free_rx_mem_pool "
"(null rx buf pool or buf not allocated"));
return;
}
"<== nxge_free_rx_mem_pool "
"(null rx cntl buf pool or cntl buf not allocated"));
return;
}
sizeof (p_nxge_dma_common_t) * rdc_max);
sizeof (p_nxge_dma_common_t) * rdc_max);
nxgep->rx_buf_pool_p = 0;
nxgep->rx_cntl_pool_p = 0;
sizeof (p_rx_rbr_ring_t) * rdc_max);
sizeof (p_rx_rcr_ring_t) * rdc_max);
sizeof (p_rx_mbox_t) * rdc_max);
nxgep->rx_rbr_rings = 0;
nxgep->rx_rcr_rings = 0;
nxgep->rx_mbox_areas_p = 0;
}
static nxge_status_t
{
KM_SLEEP);
" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
#if defined(RX_USE_RECLAIM_POST)
#endif
i = 0;
size_index = 0;
while ((size_index < array_size) &&
size_index++;
if (size_index >= array_size) {
}
/* For Neptune, use kmem_alloc if the kmem flag is set. */
size_index = 0;
#endif
"==> nxge_alloc_rx_buf_dma: "
"Neptune use kmem_alloc() - size_index %d",
size_index));
}
while ((allocated < total_alloc_size) &&
(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
rx_dmap[i].dma_chunk_index = i;
/*
* needs to call Hypervisor api to set up
* logical pages.
*/
} else if (use_kmem_alloc) {
/* For Neptune, use kmem_alloc */
"==> nxge_alloc_rx_buf_dma: "
"Neptune use kmem_alloc()"));
}
"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
"i %d nblocks %d alength %d",
(p_nxge_dma_common_t)(&rx_dmap[i]));
"nxge_alloc_rx_buf_dma: Alloc Failed: "
"dma %d size_index %d size requested %d",
size_index--;
} else {
" nxge_alloc_rx_buf_dma DONE alloc mem: "
"dma %d dma_buf_p $%p kaddrp $%p alength %d "
"buf_alloc_state %d alloc_type %d",
&rx_dmap[i],
rx_dmap[i].buf_alloc_type));
" alloc_rx_buf_dma allocated rdc %d "
"chunk %d size %x dvma %x bufp %llx kaddrp $%p",
i++;
}
}
if (allocated < total_alloc_size) {
"==> nxge_alloc_rx_buf_dma: not enough for channel %d "
"allocated 0x%x requested 0x%x",
status = NXGE_ERROR;
goto nxge_alloc_rx_mem_fail1;
}
"==> nxge_alloc_rx_buf_dma: Allocated for channel %d "
"allocated 0x%x requested 0x%x",
" alloc_rx_buf_dma rdc %d allocated %d chunks",
dma_channel, i));
*num_chunks = i;
goto nxge_alloc_rx_mem_exit;
"<== nxge_alloc_rx_buf_dma status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
"==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
if (dmap == 0)
return;
for (i = 0; i < num_chunks; i++) {
"==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
i, dmap));
}
}
/*ARGSUSED*/
static nxge_status_t
{
size,
rx_dmap);
}
"<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
if (dmap == 0)
return;
}
typedef struct {
static
{
/*
* Assume that each DMA channel will be configured with the
* default transmit buffer size for copying transmit data.
* (If a packet is bigger than this, it will not be copied.)
*/
} else {
}
cr_size += sizeof (txdma_mailbox_t);
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
"==> nxge_tdc_sizes: Tx size"
" must be power of 2"));
return (NXGE_ERROR);
}
"==> nxge_tdc_sizes: Tx size"
" limited to 4M"));
return (NXGE_ERROR);
}
if (cr_size < 0x2000)
cr_size = 0x2000;
}
#endif
return (NXGE_OK);
}
/*
* nxge_alloc_txb
*
* Allocate buffers for an TDC.
*
* Arguments:
* nxgep
* channel The channel to map into our kernel space.
*
* Notes:
*
* NPI function calls:
*
* NXGE function calls:
*
* Registers accessed:
*
* Context:
*
* Taking apart:
*
* Open questions:
*
*/
int channel)
{
return (NXGE_ERROR);
/*
* Allocate memory for transmit buffers and descriptor rings.
* Replace these allocation functions with the interface functions
* provided by the partition manager Real Soon Now.
*/
/*
* Allocate memory for transmit buffers and descriptor rings.
* Replace allocation functions with interface functions provided
* by the partition manager when it is available.
*
* Allocate memory for the transmit buffer pool.
*/
"sizes: tx: %ld, cr:%ld, th:%ld",
*num_chunks = 0;
return (status);
}
/*
* Allocate memory for descriptor rings and mailbox.
*/
return (status);
}
return (NXGE_OK);
}
void
int channel)
{
}
/*
* nxge_alloc_tx_mem_pool
*
* This function allocates all of the per-port TDC control data structures.
* The per-channel (TDC) data structures are allocated when needed.
*
* Arguments:
* nxgep
*
* Notes:
*
* Context:
* Any domain
*/
{
int tdc_max;
/*
* Allocate memory for each transmit DMA channel.
*/
KM_SLEEP);
if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
"nxge_alloc_tx_mem_pool: TDC too high %d, "
"set to default %d",
}
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
/*
* memory allocation on data buffers to 4M (contig_mem_alloc)
* function). The transmit ring is limited to 8K (includes the
* mailbox).
*/
if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
(!ISP2(nxge_tx_ring_size))) {
}
}
#endif
"==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d",
return (NXGE_OK);
}
{
KM_SLEEP);
i = 0;
size_index = 0;
while ((size_index < array_size) &&
size_index++;
if (size_index >= array_size) {
}
while ((allocated < total_alloc_size) &&
(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
tx_dmap[i].dma_chunk_index = i;
/*
* needs to call Hypervisor api to set up
* logical pages.
*/
}
(p_nxge_dma_common_t)(&tx_dmap[i]));
size_index--;
} else {
i++;
}
}
if (allocated < total_alloc_size) {
"==> nxge_alloc_tx_buf_dma: not enough channel %d: "
"allocated 0x%x requested 0x%x",
status = NXGE_ERROR;
goto nxge_alloc_tx_mem_fail1;
}
"==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
"allocated 0x%x requested 0x%x",
*num_chunks = i;
"==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
*dmap, i));
goto nxge_alloc_tx_mem_exit;
"<== nxge_alloc_tx_buf_dma status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
int i;
if (dmap == 0)
return;
for (i = 0; i < num_chunks; i++) {
}
}
/*ARGSUSED*/
{
size,
tx_dmap);
}
"<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
return (status);
}
/*ARGSUSED*/
static void
{
if (dmap == 0)
return;
}
/*
* nxge_free_tx_mem_pool
*
* This function frees all of the per-port TDC control data structures.
* The per-channel (TDC) data structures are freed when the channel
* is stopped.
*
* Arguments:
* nxgep
*
* Notes:
*
* Context:
* Any domain
*/
static void
{
"<== nxge_free_tx_mem_pool "
"(null tx buf pool or buf not allocated"));
return;
}
"<== nxge_free_tx_mem_pool "
"(null tx cntl buf pool or cntl buf not allocated"));
return;
}
/* 1. Free the mailboxes. */
sizeof (p_tx_mbox_t) * tdc_max);
nxgep->tx_mbox_areas_p = 0;
/* 2. Free the transmit ring arrays. */
sizeof (p_tx_ring_t) * tdc_max);
/* 3. Free the completion ring data structures. */
sizeof (p_nxge_dma_common_t) * tdc_max);
nxgep->tx_cntl_pool_p = 0;
/* 4. Free the data ring data structures. */
sizeof (p_nxge_dma_common_t) * tdc_max);
nxgep->tx_buf_pool_p = 0;
}
/*ARGSUSED*/
static nxge_status_t
struct ddi_dma_attr *dma_attrp,
{
/*
* contig_alloc_type for contiguous memory only allowed
*/
"nxge_dma_mem_alloc: alloc type not allowed (%d)",
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
if (ddi_status != DDI_SUCCESS) {
"nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
switch (contig_alloc_type) {
case B_FALSE:
switch (kmem_alloc_type) {
case B_FALSE:
&dma_p->acc_handle);
if (ddi_status != DDI_SUCCESS) {
"nxge_dma_mem_alloc: "
"ddi_dma_mem_alloc failed"));
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"nxge_dma_mem_alloc:di_dma_mem_alloc "
"< length."));
return (NXGE_ERROR);
}
NULL,
if (ddi_status != DDI_DMA_MAPPED) {
"nxge_dma_mem_alloc: ddi_dma_addr_bind "
"failed "
"(staus 0x%x ncookies %d.)", ddi_status,
if (dma_p->acc_handle) {
}
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"nxge_dma_mem_alloc:ddi_dma_addr_bind "
"> 1 cookie"
"(staus 0x%x ncookies %d.)", ddi_status,
if (dma_p->acc_handle) {
}
return (NXGE_ERROR);
}
break;
case B_TRUE:
"nxge_dma_mem_alloc:ddi_dma_mem_alloc "
"kmem alloc failed"));
return (NXGE_ERROR);
}
DDI_DMA_DONTWAIT, 0,
if (ddi_status != DDI_DMA_MAPPED) {
"nxge_dma_mem_alloc:ddi_dma_addr_bind: "
"(kmem_alloc) failed kaddrp $%p length %d "
"(staus 0x%x (%d) ncookies %d.)",
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"nxge_dma_mem_alloc:ddi_dma_addr_bind "
"(kmem_alloc) > 1 cookie"
"(staus 0x%x ncookies %d.)", ddi_status,
return (NXGE_ERROR);
}
"nxge_dma_mem_alloc: kmem_alloc dmap $%p "
"kaddr $%p alength %d",
break;
}
break;
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
case B_TRUE:
"nxge_dma_mem_alloc:contig_mem_alloc failed."));
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
if (ddi_status != DDI_DMA_MAPPED) {
"nxge_dma_mem_alloc:di_dma_addr_bind failed "
"(status 0x%x ncookies %d.)", ddi_status,
"==> nxge_dma_mem_alloc: (not mapped)"
"length %lu (0x%x) "
"free contig kaddrp $%p "
"va_to_pa $%p",
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
"cookie or "
"dmac_laddress is NULL $%p size %d "
" (status 0x%x ncookies %d.)",
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
break;
#else
case B_TRUE:
"nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
return (NXGE_ERROR | NXGE_DDI_FAILED);
#endif
}
#if defined(__i386)
#else
#endif
#if defined(__i386)
#else
#endif
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
#endif
"dma buffer allocated: dma_p $%p "
"return dmac_ladress from cookie $%p cookie dmac_size %d "
"dma_p->ioaddr_p $%p "
"dma_p->orig_ioaddr_p $%p "
"orig_vatopa $%p "
"alength %d (0x%x) "
"kaddrp $%p "
"length %d (0x%x)",
return (NXGE_OK);
}
static void
{
}
}
}
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
if (dma_p->contig_alloc_type &&
"kaddrp $%p (orig_kaddrp $%p)"
"mem type %d ",
"orig_alength %d "
"alength 0x%x (%d)",
}
#endif
}
static void
{
}
}
}
"==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d",
dma_p->buf_alloc_state));
"<== nxge_dma_free_rx_data_buf: "
"outstanding data buffers"));
return;
}
#if defined(sun4v) && defined(NIU_LP_WORKAROUND)
if (dma_p->contig_alloc_type &&
"kaddrp $%p (orig_kaddrp $%p)"
"mem type %d ",
"orig_alength %d "
"alength 0x%x (%d)",
return;
}
#endif
if (dma_p->kmem_alloc_type) {
"nxge_dma_free_rx_data_buf: free kmem "
"kaddrp $%p (orig_kaddrp $%p)"
"alloc type %d "
"orig_alength %d "
"alength 0x%x (%d)",
#if defined(__i386)
#else
#endif
"nxge_dma_free_rx_data_buf: free dmap $%p "
"kaddr $%p buf_size %d",
dma_p->orig_alength = 0;
}
}
/*
* nxge_m_start() -- start transmitting and receiving.
*
* This function is called by the MAC layer when the first
* stream is open to prepare the hardware ready for sending
* and transmitting packets.
*/
static int
{
/*
* Are we already started?
*/
return (0);
}
}
/*
* Make sure RX MAC is disabled while we initialize.
*/
if (!isLDOMguest(nxgep)) {
(void) nxge_rx_mac_disable(nxgep);
}
/*
* Grab the global lock.
*/
/*
* Initialize the driver and hardware.
*/
"<== nxge_m_start: initialization failed"));
return (EIO);
}
/*
* Start timer to check the system error and tx hangs
*/
if (!isLDOMguest(nxgep))
#if defined(sun4v)
else
#endif
nxgep->link_check_count = 0;
/*
* Let the global lock go, since we are intialized.
*/
/*
* Let the MAC start receiving packets, now that
* we are initialized.
*/
if (!isLDOMguest(nxgep)) {
"<== nxge_m_start: enable of RX mac failed"));
return (EIO);
}
/*
* Enable hardware interrupts.
*/
}
#if defined(sun4v)
else {
/*
* In guest domain we enable RDCs and their interrupts as
* the last step.
*/
"<== nxge_m_start: enable of RDCs failed"));
return (EIO);
}
"<== nxge_m_start: intrs enable for RDCs failed"));
return (EIO);
}
}
#endif
return (0);
}
static boolean_t
{
int i;
for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
return (B_FALSE);
}
return (B_TRUE);
}
/*
* nxge_m_stop(): stop transmitting and receiving.
*/
static void
{
/*
* Are the groups stopped?
*/
if (!groups_stopped) {
return;
}
if (!isLDOMguest(nxgep)) {
/*
* Disable the RX mac.
*/
(void) nxge_rx_mac_disable(nxgep);
/*
* Wait for the IPP to drain.
*/
(void) nxge_ipp_drain(nxgep);
/*
* Disable hardware interrupts.
*/
}
#if defined(sun4v)
else {
}
#endif
/*
* Grab the global lock.
*/
if (nxgep->nxge_timerid) {
nxgep->nxge_timerid = 0;
}
/*
* Clean up.
*/
/*
* Let go of the global lock.
*/
}
static int
{
"==> nxge_m_multicst: add %d", add));
if (add) {
"<== nxge_m_multicst: add multicast failed"));
return (EINVAL);
}
} else {
"<== nxge_m_multicst: del multicast failed"));
return (EINVAL);
}
}
return (0);
}
static int
{
"==> nxge_m_promisc: on %d", on));
"<== nxge_m_promisc: set promisc failed"));
return (EINVAL);
}
"<== nxge_m_promisc: on %d", on));
return (0);
}
static void
{
int err;
int cmd;
switch (cmd) {
default:
return;
case LB_GET_INFO_SIZE:
case LB_GET_INFO:
case LB_GET_MODE:
break;
case LB_SET_MODE:
break;
case NXGE_GET_MII:
case NXGE_PUT_MII:
case NXGE_GET64:
case NXGE_PUT64:
case NXGE_GET_TX_RING_SZ:
case NXGE_GET_TX_DESC:
case NXGE_TX_SIDE_RESET:
case NXGE_RX_SIDE_RESET:
case NXGE_GLOBAL_RESET:
case NXGE_RESET_MAC:
case NXGE_TX_REGS_DUMP:
case NXGE_RX_REGS_DUMP:
case NXGE_INT_REGS_DUMP:
case NXGE_VIR_INT_REGS_DUMP:
case NXGE_PUT_TCAM:
case NXGE_GET_TCAM:
case NXGE_RTRACE:
case NXGE_RDUMP:
case NXGE_RX_CLASS:
case NXGE_RX_HASH:
break;
case NXGE_INJECT_ERR:
break;
}
if (need_privilege) {
if (err != 0) {
"<== nxge_m_ioctl: no priv"));
return;
}
}
switch (cmd) {
case LB_GET_MODE:
case LB_SET_MODE:
case LB_GET_INFO_SIZE:
case LB_GET_INFO:
break;
case NXGE_GET_MII:
case NXGE_PUT_MII:
case NXGE_PUT_TCAM:
case NXGE_GET_TCAM:
case NXGE_GET64:
case NXGE_PUT64:
case NXGE_GET_TX_RING_SZ:
case NXGE_GET_TX_DESC:
case NXGE_TX_SIDE_RESET:
case NXGE_RX_SIDE_RESET:
case NXGE_GLOBAL_RESET:
case NXGE_RESET_MAC:
case NXGE_TX_REGS_DUMP:
case NXGE_RX_REGS_DUMP:
case NXGE_INT_REGS_DUMP:
case NXGE_VIR_INT_REGS_DUMP:
"==> nxge_m_ioctl: cmd 0x%x", cmd));
break;
case NXGE_RX_CLASS:
else
break;
case NXGE_RX_HASH:
else
break;
}
}
void
{
int i;
for (i = 0; i < ETHERADDRL; i++) {
if (factory) {
(ETHERADDRL-1) - i];
} else {
(ETHERADDRL - 1) - i];
}
}
}
/*
* nxge_altmac_set() -- Set an alternate MAC address
*/
static int
{
return (EIO);
/*
* Set the rdc table number for the host info entry
* for this mac address slot.
*/
if (usetbl)
else
return (EIO);
}
/*
* Enable comparison with the alternate MAC address.
* While the first alternate addr is enabled by bit 1 of register
* BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
* XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
* accordingly before calling npi_mac_altaddr_entry.
*/
else
return (EIO);
}
return (0);
}
/*
* nxeg_m_mmac_add_g() - find an unused address slot, set the address
* value to the one specified, enable the port to start filtering on
* the new MAC address. Returns 0 on success.
*/
int
{
int slot;
int err;
/*
* Make sure that nxge is initialized, if _start() has
* not been called.
*/
return (ENXIO);
}
}
return (ENOSPC);
}
/*
* Search for the first available slot. Because naddrfree
* is not zero, we are guaranteed to find one.
* Each of the first two ports of Neptune has 16 alternate
* MAC slots but only the first 7 (of 15) slots have assigned factory
* MAC addresses. We first search among the slots without bundled
* factory MACs. If we fail to find one in that range, then we
* search the slots with bundled factory MACs. A factory MAC
* will be wasted while the slot is used with a user MAC address.
* But the slot could be used by factory MAC again after calling
* nxge_m_mmac_remove and nxge_m_mmac_reserve.
*/
break;
}
usetbl)) != 0) {
return (err);
}
return (0);
}
/*
* Remove the specified mac address and update the HW not to filter
* the mac address anymore.
*/
int
{
int err = 0;
/*
* Make sure that nxge is initialized, if _start() has
* not been called.
*/
return (ENXIO);
}
}
return (EINVAL);
}
else
== NPI_SUCCESS) {
/*
* Regardless if the MAC we just stopped filtering
* is a user addr or a facory addr, we must set
* the MMAC_VENDOR_ADDR flag if this slot has an
* associated factory MAC to indicate that a factory
* MAC is available.
*/
|= MMAC_VENDOR_ADDR;
}
/*
* Clear mac_pool[slot].addr so that kstat shows 0
* alternate MAC address if the slot is not used.
* (But nxge_m_mmac_get returns the factory MAC even
* when the slot is not used!)
*/
} else {
}
} else {
}
return (err);
}
/*
* The callback to query all the factory addresses. naddr must be the same as
* the number of factory addresses (returned by MAC_CAPAB_MULTIFACTADDR), and
* mcm_addr is the space allocated for keep all the addresses, whose size is
* naddr * MAXMACADDRLEN.
*/
static void
{
int i;
for (i = 0; i < naddr; i++) {
}
}
static boolean_t
{
switch (cap) {
case MAC_CAPAB_HCKSUM:
"==> nxge_m_getcapab: checksum %d", nxge_cksum_offload));
if (nxge_cksum_offload <= 1) {
}
break;
case MAC_CAPAB_MULTIFACTADDR: {
if (!isLDOMguest(nxgep)) {
}
break;
}
case MAC_CAPAB_LSO: {
if (nxgep->soft_lso_enable) {
if (nxge_cksum_offload <= 1) {
if (nxge_lso_max > NXGE_LSO_MAXLEN) {
}
}
break;
} else {
return (B_FALSE);
}
}
case MAC_CAPAB_RINGS: {
if (isLDOMguest(nxgep)) {
} else {
/*
* Service Domain.
*/
}
"==> nxge_m_getcapab: rx nrings[%d] ngroups[%d]",
} else {
/*
* TX Rings.
*/
if (isLDOMguest(nxgep)) {
} else {
/*
* Service Domain.
*/
/*
* Share capable.
*
* Do not report the default group: hence -1
*/
}
"==> nxge_m_getcapab: tx rings # of rings %d",
}
break;
}
#if defined(sun4v)
case MAC_CAPAB_SHARES: {
/*
* Only the service domain driver responds to
* this capability request.
*/
if (isLDOMservice(nxgep)) {
} else {
return (B_FALSE);
}
break;
}
#endif
default:
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
{
/*
* All adv_* parameters are locked (read-only) while
* the device is in any sort of loopback mode ...
*/
switch (pr_num) {
case MAC_PROP_ADV_1000FDX_CAP:
case MAC_PROP_EN_1000FDX_CAP:
case MAC_PROP_ADV_1000HDX_CAP:
case MAC_PROP_EN_1000HDX_CAP:
case MAC_PROP_ADV_100FDX_CAP:
case MAC_PROP_EN_100FDX_CAP:
case MAC_PROP_ADV_100HDX_CAP:
case MAC_PROP_EN_100HDX_CAP:
case MAC_PROP_ADV_10FDX_CAP:
case MAC_PROP_EN_10FDX_CAP:
case MAC_PROP_ADV_10HDX_CAP:
case MAC_PROP_EN_10HDX_CAP:
case MAC_PROP_AUTONEG:
case MAC_PROP_FLOWCTRL:
return (B_TRUE);
}
return (B_FALSE);
}
/*
*/
static int
{
int err = 0;
/*
* All adv_* parameters are locked (read-only)
* while the device is in any sort of loopback mode.
*/
"==> nxge_m_setprop: loopback mode: read only"));
return (EBUSY);
}
switch (pr_num) {
case MAC_PROP_EN_1000FDX_CAP:
goto reprogram;
case MAC_PROP_EN_100FDX_CAP:
goto reprogram;
case MAC_PROP_EN_10FDX_CAP:
goto reprogram;
case MAC_PROP_AUTONEG:
goto reprogram;
case MAC_PROP_MTU: {
"==> nxge_m_setprop: set MTU: %d is_jumbo %d",
err = 0;
break;
}
break;
}
if ((new_mtu < NXGE_DEFAULT_MTU) ||
(new_mtu > NXGE_MAXIMUM_MTU)) {
break;
}
if (nxge_mac_set_framesize(nxgep)) {
break;
}
"==> nxge_m_setprop: set MTU: %d maxframe %d",
break;
}
case MAC_PROP_FLOWCTRL: {
switch (fl) {
case LINK_FLOWCTRL_NONE:
break;
case LINK_FLOWCTRL_RX:
break;
case LINK_FLOWCTRL_TX:
case LINK_FLOWCTRL_BI:
break;
default:
break;
}
if (!nxge_param_link_update(nxgep)) {
}
} else {
}
break;
}
case MAC_PROP_PRIVATE:
"==> nxge_m_setprop: private property"));
break;
default:
break;
}
"<== nxge_m_setprop (return %d)", err));
return (err);
}
static int
{
"==> nxge_m_getprop: pr_num %d", pr_num));
switch (pr_num) {
case MAC_PROP_DUPLEX:
break;
case MAC_PROP_SPEED: {
break;
}
case MAC_PROP_STATUS: {
break;
}
case MAC_PROP_AUTONEG:
break;
case MAC_PROP_FLOWCTRL: {
break;
}
case MAC_PROP_ADV_1000FDX_CAP:
break;
case MAC_PROP_EN_1000FDX_CAP:
break;
case MAC_PROP_ADV_100FDX_CAP:
break;
case MAC_PROP_EN_100FDX_CAP:
break;
case MAC_PROP_ADV_10FDX_CAP:
break;
case MAC_PROP_EN_10FDX_CAP:
break;
case MAC_PROP_PRIVATE:
pr_val));
default:
return (ENOTSUP);
}
return (0);
}
static void
{
/*
* otherwise by the driver.
*/
switch (pr_num) {
case MAC_PROP_DUPLEX:
case MAC_PROP_SPEED:
case MAC_PROP_STATUS:
case MAC_PROP_EN_1000HDX_CAP:
case MAC_PROP_EN_100HDX_CAP:
case MAC_PROP_EN_10HDX_CAP:
case MAC_PROP_ADV_1000FDX_CAP:
case MAC_PROP_ADV_1000HDX_CAP:
case MAC_PROP_ADV_100FDX_CAP:
case MAC_PROP_ADV_100HDX_CAP:
case MAC_PROP_ADV_10FDX_CAP:
case MAC_PROP_ADV_10HDX_CAP:
/*
* Note that read-only properties don't need to
* provide default values since they cannot be
* changed by the administrator.
*/
break;
case MAC_PROP_EN_1000FDX_CAP:
case MAC_PROP_EN_100FDX_CAP:
case MAC_PROP_EN_10FDX_CAP:
break;
case MAC_PROP_AUTONEG:
break;
case MAC_PROP_FLOWCTRL:
break;
case MAC_PROP_MTU:
break;
case MAC_PROP_PRIVATE:
break;
}
/*
* Some properties are locked (read-only) while the
* device is in any sort of loopback mode.
*/
}
}
static void
{
"%d", RXDMA_RCR_TO_DEFAULT);
"%d", RXDMA_RCR_PTHRES_DEFAULT);
}
}
/* ARGSUSED */
static int
const void *pr_val)
{
int err = 0;
long result;
"==> nxge_set_priv_prop: name %s", pr_name));
/* Blanking */
(char *)pr_val,
if (err) {
"<== nxge_set_priv_prop: "
"unable to set (%s)", pr_name));
} else {
err = 0;
"<== nxge_set_priv_prop: "
"set (%s)", pr_name));
}
"<== nxge_set_priv_prop: name %s (value %d)",
return (err);
}
(char *)pr_val,
if (err) {
"<== nxge_set_priv_prop: "
"unable to set (%s)", pr_name));
} else {
err = 0;
"<== nxge_set_priv_prop: "
"set (%s)", pr_name));
}
"<== nxge_set_priv_prop: name %s (value %d)",
return (err);
}
/* Classification */
return (err);
}
"<== nxge_set_priv_prop: name %s (value 0x%x)",
return (err);
}
return (err);
}
"<== nxge_set_priv_prop: name %s (value 0x%x)",
return (err);
}
return (err);
}
"<== nxge_set_priv_prop: name %s (value 0x%x)",
return (err);
}
return (err);
}
"<== nxge_set_priv_prop: name %s (value 0x%x)",
return (err);
}
return (err);
}
"<== nxge_set_priv_prop: name %s (value 0x%x)",
return (err);
}
return (err);
}
"<== nxge_set_priv_prop: name %s (value 0x%x)",
return (err);
}
return (err);
}
"<== nxge_set_priv_prop: name %s (value 0x%x)",
return (err);
}
return (err);
}
"<== nxge_set_priv_prop: name %s (value 0x%x)",
return (err);
}
"==> nxge_set_priv_prop: name %s (null)", pr_name));
return (err);
}
"<== nxge_set_priv_prop: name %s "
"(lso %d pr_val %s value %d)",
} else {
"no change (%d %d)",
return (0);
}
}
"<== nxge_set_priv_prop: name %s (value %d)",
return (err);
}
/*
* following code to be executed.
*/
return (err);
}
return (err);
}
return (ENOTSUP);
}
static int
void *pr_val)
{
"==> nxge_get_priv_prop: property %s", pr_name));
/* function number */
"==> nxge_get_priv_prop: name %s "
"(value %d valstr %s)",
err = 0;
goto done;
}
/* Neptune firmware version */
"==> nxge_get_priv_prop: name %s "
"(value %d valstr %s)",
err = 0;
goto done;
}
/* port PHY mode */
case PORT_1G_COPPER:
"[Hot Swappable]" : "");
break;
case PORT_1G_FIBER:
"[hot swappable]" : "");
break;
case PORT_10G_COPPER:
"10G copper %s",
"[hot swappable]" : "");
break;
case PORT_10G_FIBER:
"[hot swappable]" : "");
break;
case PORT_10G_SERDES:
"[hot swappable]" : "");
break;
case PORT_1G_SERDES:
"[hot swappable]" : "");
break;
case PORT_1G_TN1010:
"[hot swappable]" : "");
break;
case PORT_10G_TN1010:
"[hot swappable]" : "");
break;
case PORT_1G_RGMII_FIBER:
"[hot swappable]" : "");
break;
case PORT_HSP_MODE:
"phy not present[hot swappable]");
break;
default:
"[hot swappable]" : "");
break;
}
"==> nxge_get_priv_prop: name %s (value %s)",
err = 0;
goto done;
}
/* Hot swappable PHY */
"yes" : "no");
"==> nxge_get_priv_prop: name %s "
"(value %d valstr %s)",
err = 0;
goto done;
}
/* Receive Interrupt Blanking Parameters */
err = 0;
"==> nxge_get_priv_prop: name %s (value %d)",
goto done;
}
err = 0;
"==> nxge_get_priv_prop: name %s (value %d)",
goto done;
}
/* Classification and Load Distribution Configuration */
"==> nxge_get_priv_prop: %s", valstr));
goto done;
}
"==> nxge_get_priv_prop: %s", valstr));
goto done;
}
"==> nxge_get_priv_prop: %s", valstr));
goto done;
}
"==> nxge_get_priv_prop: %s", valstr));
goto done;
}
"==> nxge_get_priv_prop: %s", valstr));
goto done;
}
"==> nxge_get_priv_prop: %s", valstr));
goto done;
}
"==> nxge_get_priv_prop: %s", valstr));
goto done;
}
"==> nxge_get_priv_prop: %s", valstr));
goto done;
}
/* Software LSO */
err = 0;
"==> nxge_get_priv_prop: name %s (value %d)",
goto done;
}
err = 0;
goto done;
} else {
goto done;
}
}
err = 0;
goto done;
} else {
goto done;
}
}
done:
if (err == 0) {
if (pr_valsize < strsize) {
} else {
}
}
"<== nxge_get_priv_prop: return %d", err));
return (err);
}
/*
* Module loading and removing entry points.
*/
/*
* Module linkage information for the kernel.
*/
};
};
int
_init(void)
{
int status;
if (status != 0) {
"failed to init device soft state"));
goto _init_exit;
}
if (status != 0) {
goto _init_exit;
}
return (status);
return (status);
}
int
_fini(void)
{
int status;
if (nxge_mblks_pending)
return (EBUSY);
if (status != DDI_SUCCESS) {
"Module removal failed 0x%08x",
status));
goto _fini_exit;
}
return (status);
return (status);
}
int
{
int status;
return (status);
}
/*ARGSUSED*/
static int
{
return (0);
}
static void
{
}
int
{
int i;
return (0);
}
/* set rcr_ring */
}
}
return (0);
}
static void
{
}
static int
{
int i;
#if defined(sun4v)
if (isLDOMguest(nxgep)) {
return (nxge_hio_get_dc_htable_idx(nxgep,
channel));
}
#endif
switch (type) {
case MAC_RING_TYPE_TX:
return ((int)
}
}
break;
case MAC_RING_TYPE_RX:
return ((int)
}
}
}
return (-1);
}
/*
* Callback funtion for MAC layer to register all rings.
*/
static void
{
int htable_idx;
switch (rtype) {
case MAC_RING_TYPE_TX: {
"==> nxge_fill_ring (TX) 0x%x index %d ntdcs %d",
channel);
if (htable_idx >= 0)
else
break;
}
case MAC_RING_TYPE_RX: {
int nxge_rindex;
"==> nxge_fill_ring (RX) 0x%x index %d nrdcs %d",
/*
* 'index' is the ring index within the group.
* Find the ring index in the nxge instance.
*/
/*
* Entrypoint to enable interrupt (disable poll) and
* disable interrupt (enable poll).
*/
channel);
if (htable_idx >= 0)
else
break;
}
default:
break;
}
}
static void
{
int dev_gindex;
int rv;
switch (type) {
case MAC_RING_TYPE_TX:
/*
* nxge_grp_dc_add takes a channel number which is a
* "devise" ring ID.
*/
/*
* Remove the ring from the default group
*/
}
/*
* nxge->tx_set.group[] is an array of groups indexed by
* a "port" group ID.
*/
if (rv != 0) {
"nxge_group_add_ring: nxge_grp_dc_add failed"));
}
break;
case MAC_RING_TYPE_RX:
/*
* nxge->rx_set.group[] is an array of groups indexed by
* a "port" group ID.
*/
/*
* nxge_grp_dc_add takes a channel number which is a
* "devise" ring ID.
*/
if (rv != 0) {
"nxge_group_add_ring: nxge_grp_dc_add failed"));
}
break;
}
}
static void
{
int dev_gindex;
switch (type) {
case MAC_RING_TYPE_TX:
/*
* Add the ring back to the default group
*/
}
break;
case MAC_RING_TYPE_RX:
break;
}
}
/*ARGSUSED*/
static nxge_status_t
{
int intr_types;
int type = 0;
} else if (nxge_msi_enable) {
}
/* Get the supported interrupt types */
!= DDI_SUCCESS) {
"ddi_intr_get_supported_types failed: status 0x%08x",
ddi_status));
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"ddi_intr_get_supported_types: 0x%08x", intr_types));
/*
* Solaris MSIX is not supported yet. use MSI for now.
* nxge_msi_enable (1):
* 1 - MSI 2 - MSI-X others - FIXED
*/
switch (nxge_msi_enable) {
default:
"use fixed (intx emulation) type %08x",
type));
break;
case 2:
"ddi_intr_get_supported_types: 0x%08x", intr_types));
if (intr_types & DDI_INTR_TYPE_MSIX) {
"ddi_intr_get_supported_types: MSIX 0x%08x",
type));
} else if (intr_types & DDI_INTR_TYPE_MSI) {
"ddi_intr_get_supported_types: MSI 0x%08x",
type));
} else if (intr_types & DDI_INTR_TYPE_FIXED) {
"ddi_intr_get_supported_types: MSXED0x%08x",
type));
}
break;
case 1:
if (intr_types & DDI_INTR_TYPE_MSI) {
"ddi_intr_get_supported_types: MSI 0x%08x",
type));
} else if (intr_types & DDI_INTR_TYPE_MSIX) {
"ddi_intr_get_supported_types: MSIX 0x%08x",
type));
} else if (intr_types & DDI_INTR_TYPE_FIXED) {
"ddi_intr_get_supported_types: MSXED0x%08x",
type));
}
}
type == DDI_INTR_TYPE_FIXED) &&
" nxge_add_intrs: "
" nxge_add_intrs_adv failed: status 0x%08x",
status));
return (status);
} else {
"interrupts registered : type %d", type));
"\nAdded advanced nxge add_intr_adv "
"intr type 0x%x\n", type));
return (status);
}
}
"failed to register interrupts"));
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
return (status);
}
static nxge_status_t
{
int intr_type;
intr_type));
switch (intr_type) {
case DDI_INTR_TYPE_MSI: /* 0x2 */
case DDI_INTR_TYPE_MSIX: /* 0x4 */
case DDI_INTR_TYPE_FIXED: /* 0x1 */
default:
return (NXGE_ERROR);
}
}
/*ARGSUSED*/
static nxge_status_t
{
int behavior;
int inum = 0;
int x, y;
intrp->start_inum = 0;
"ddi_intr_get_nintrs() failed, status: 0x%x%, "
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"ddi_intr_get_navail() failed, status: 0x%x%, "
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"ddi_intr_get_navail() returned: nintrs %d, navail %d",
/* PSARC/2007/453 MSI-X interrupt limit override */
if (int_type == DDI_INTR_TYPE_MSIX) {
"nxge_add_intrs_adv_type: nintrs %d "
"navail %d (nrequest %d)",
}
}
/* MSI must be power of 2 */
navail = 16;
navail = 8;
navail = 4;
navail = 2;
} else {
navail = 1;
}
"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
}
" ddi_intr_alloc() failed: %d",
ddi_status));
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
" ddi_intr_get_pri() failed: %d",
ddi_status));
/* Free already allocated interrupts */
for (y = 0; y < nactual; y++) {
}
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
nrequired = 0;
default:
break;
case N2_NIU:
break;
}
"nxge_add_intrs_adv_typ:nxge_ldgv_init "
"failed: 0x%x", status));
/* Free already allocated interrupts */
for (y = 0; y < nactual; y++) {
}
return (status);
}
"nxge_add_intrs_adv_type: "
"arg1 0x%x arg2 0x%x: "
"1-1 int handler (entry %d intdata 0x%x)\n",
"nxge_add_intrs_adv_type: "
"arg1 0x%x arg2 0x%x: "
"nldevs %d int handler "
"(entry %d intdata 0x%x)\n",
}
"==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
!= DDI_SUCCESS) {
"==> nxge_add_intrs_adv_type: failed #%d "
"status 0x%x", x, ddi_status));
for (y = 0; y < intrp->intr_added; y++) {
(void) ddi_intr_remove_handler(
}
/* Free already allocated intr */
for (y = 0; y < nactual; y++) {
}
(void) nxge_ldgv_uninit(nxgep);
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
ldgp->htable_idx = x;
intrp->intr_added++;
}
"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
intrp->intr_added));
(void) nxge_intr_ldgv_init(nxgep);
return (status);
}
/*ARGSUSED*/
static nxge_status_t
{
int behavior;
int inum = 0;
int x, y;
intrp->start_inum = 0;
"ddi_intr_get_nintrs() failed, status: 0x%x%, "
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"ddi_intr_get_navail() failed, status: 0x%x%, "
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
" ddi_intr_alloc() failed: %d",
ddi_status));
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
" ddi_intr_get_pri() failed: %d",
ddi_status));
/* Free already allocated interrupts */
for (y = 0; y < nactual; y++) {
}
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
nrequired = 0;
default:
break;
case N2_NIU:
break;
}
"nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
"failed: 0x%x", status));
/* Free already allocated interrupts */
for (y = 0; y < nactual; y++) {
}
return (status);
}
}
"nxge_add_intrs_adv_type_fix: "
"1-1 int handler(%d) ldg %d ldv %d "
"arg1 $%p arg2 $%p\n",
"nxge_add_intrs_adv_type_fix: "
"shared ldv %d int handler(%d) ldv %d ldg %d"
"arg1 0x%016llx arg2 0x%016llx\n",
}
!= DDI_SUCCESS) {
"==> nxge_add_intrs_adv_type_fix: failed #%d "
"status 0x%x", x, ddi_status));
for (y = 0; y < intrp->intr_added; y++) {
(void) ddi_intr_remove_handler(
}
for (y = 0; y < nactual; y++) {
}
/* Free already allocated intr */
(void) nxge_ldgv_uninit(nxgep);
return (NXGE_ERROR | NXGE_DDI_FAILED);
}
ldgp->htable_idx = x;
intrp->intr_added++;
}
return (status);
}
static void
{
int i, inum;
if (!intrp->intr_registered) {
"<== nxge_remove_intrs: interrupts not registered"));
return;
}
intrp->intr_added);
} else {
for (i = 0; i < intrp->intr_added; i++) {
}
}
}
}
"nxge_remove_intrs: ddi_intr_free inum %d "
"msi_intx_cnt %d intr_added %d",
inum,
intrp->intr_added));
}
}
intrp->msi_intx_cnt = 0;
intrp->intr_added = 0;
(void) nxge_ldgv_uninit(nxgep);
"#msix-request");
}
/*ARGSUSED*/
static void
{
int i;
int status;
if (!intrp->intr_registered) {
"interrupts are not registered"));
return;
}
if (intrp->intr_enabled) {
"<== nxge_intrs_enable: already enabled"));
return;
}
intrp->intr_added);
"block enable - status 0x%x total inums #%d\n",
} else {
for (i = 0; i < intrp->intr_added; i++) {
"ddi_intr_enable:enable - status 0x%x "
"total inums %d enable inum #%d\n",
if (status == DDI_SUCCESS) {
}
}
}
}
/*ARGSUSED*/
static void
{
int i;
if (!intrp->intr_registered) {
"interrupts are not registered"));
return;
}
intrp->intr_added);
} else {
for (i = 0; i < intrp->intr_added; i++) {
}
}
}
{
int status;
return (NXGE_ERROR);
if (!isLDOMguest(nxgep)) {
} else {
}
if (isLDOMguest(nxgep))
else
"==> nxge_mac_register: instance %d "
"max_sdu %d margin %d maxframe %d (header %d)",
if (isLDOMguest(nxgep)) {
}
if (status != 0) {
"!nxge_mac_register failed (status %d instance %d)",
return (NXGE_ERROR);
}
return (NXGE_OK);
}
void
{
size = 1024;
switch (blk_id) {
case MAC_BLK_ID:
break;
case TXMAC_BLK_ID:
break;
case RXMAC_BLK_ID:
break;
case MIF_BLK_ID:
break;
case IPP_BLK_ID:
break;
case TXC_BLK_ID:
break;
case TXDMA_BLK_ID:
break;
case RXDMA_BLK_ID:
break;
case ZCP_BLK_ID:
break;
case ESPC_BLK_ID:
break;
case FFLP_BLK_ID:
break;
case PHY_BLK_ID:
break;
case ETHER_SERDES_BLK_ID:
break;
case PCIE_SERDES_BLK_ID:
break;
case VIR_BLK_ID:
break;
}
}
static int
{
"==> nxge_init_common_dev:func # %d",
nxgep->function_num));
/*
* Loop through existing per neptune hardware list.
*/
"==> nxge_init_common_device:func # %d "
"hw_p $%p parent dip $%p",
hw_p,
p_dip));
"==> nxge_init_common_device:func # %d "
"hw_p $%p parent dip $%p "
"ndevs %d (found)",
hw_p,
break;
}
}
char **prop_val;
int i;
"==> nxge_init_common_device:func # %d "
"parent dip $%p (new)",
p_dip));
} else {
}
nxge_hw_list = hw_p;
for (i = 0; i < prop_len; i++) {
NXGE_ROCK_COMPATIBLE) == 0)) {
"ROCK hw_p->platform_type %d",
hw_p->platform_type));
break;
}
"nxge_init_common_dev: read compatible"
" property[%d] val[%s]",
}
}
}
nxgep->platform_type));
}
"==> nxge_init_common_device (nxge_hw_list) $%p",
nxge_hw_list));
return (NXGE_OK);
}
static void
{
"<== nxge_uninit_common_device (no common)"));
return;
}
"==> nxge_uninit_common_device:func # %d "
"hw_p $%p parent dip $%p "
"ndevs %d (found)",
hw_p,
/*
* Release the RDC table, a shared resoruce
* of the nxge hardware. The RDC table was
* assigned to this instance of nxge in
* nxge_use_cfg_dma_config().
*/
if (!isLDOMguest(nxgep)) {
p_cfgp =
(void) nxge_fzc_rdc_tbl_unbind(nxgep,
/* Cleanup any outstanding groups. */
}
}
sizeof (tcam_flow_spec_t) *
"==> nxge_uninit_common_device: "
"func # %d "
"hw_p $%p parent dip $%p "
"ndevs %d (last)",
hw_p,
if (hw_p == nxge_hw_list) {
"==> nxge_uninit_common_device:"
"remove head func # %d "
"hw_p $%p parent dip $%p "
"ndevs %d (head)",
hw_p,
} else {
"==> nxge_uninit_common_device:"
"remove middle func # %d "
"hw_p $%p parent dip $%p "
"ndevs %d (middle)",
hw_p,
}
}
break;
} else {
}
}
"==> nxge_uninit_common_device (nxge_hw_list) $%p",
nxge_hw_list));
}
/*
* Determines the number of ports from the niu_type or the platform type.
* Returns the number of ports, or returns zero on failure.
*/
int
{
int nports = 0;
case N2_NIU:
case NEPTUNE_2_10GF:
nports = 2;
break;
case NEPTUNE_4_1GC:
case NEPTUNE_2_10GF_2_1GC:
case NEPTUNE_1_10GF_3_1GC:
case NEPTUNE_2_10GF_2_1GRF:
nports = 4;
break;
default:
switch (nxgep->platform_type) {
case P_NEPTUNE_NIU:
case P_NEPTUNE_ATLAS_2PORT:
nports = 2;
break;
case P_NEPTUNE_ATLAS_4PORT:
case P_NEPTUNE_MARAMBA_P0:
case P_NEPTUNE_MARAMBA_P1:
case P_NEPTUNE_ROCK:
case P_NEPTUNE_ALONSO:
nports = 4;
break;
default:
break;
}
break;
}
return (nports);
}
/*
* The following two functions are to support
* PSARC/2007/453 MSI-X interrupt limit override.
*/
static int
{
int nmsi;
extern int ncpus;
case PORT_10G_COPPER:
case PORT_10G_FIBER:
case PORT_10G_TN1010:
/*
* The maximum MSI-X requested will be 8.
* If the # of CPUs is less than 8, we will request
* # MSI-X based on the # of CPUs (default).
*/
"==>nxge_create_msi_property (10G): nxge_msix_10g_intrs %d",
if ((nxge_msix_10g_intrs == 0) ||
"==>nxge_create_msi_property (10G): reset to 8"));
} else {
}
/*
* If # of interrupts requested is 8 (default),
* the checking of the number of cpus will be
* be maintained.
*/
if ((nmsi == NXGE_MSIX_REQUEST_10G) &&
"==>nxge_create_msi_property (10G): reset to 8"));
}
"==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
break;
default:
"==>nxge_create_msi_property (1G): nxge_msix_1g_intrs %d",
if ((nxge_msix_1g_intrs == 0) ||
"==>nxge_create_msi_property (1G): reset to 2"));
} else {
}
"==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
break;
}
return (nmsi);
}
/*
* The following is a software around for the Neptune hardware's
* interrupt bugs; The Neptune hardware may generate spurious interrupts when
* an interrupr handler is removed.
*/
NXGE_GLU_RESET | \
static void
{
int i, j;
"==> nxge_niu_peu_reset: NULL hardware pointer"));
return;
}
"==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d",
nxgep->nxge_timerid));
/*
* Make sure other instances from the same hardware
* stop sending PIO and in quiescent state.
*/
for (i = 0; i < NXGE_MAX_PORTS; i++) {
"==> nxge_niu_peu_reset: checking entry %d "
"nxgep $%p", i, fnxgep));
#ifdef NXGE_DEBUG
if (fnxgep) {
"==> nxge_niu_peu_reset: entry %d (function %d) "
"link timer id %d hw timer id %d",
i, fnxgep->function_num,
fnxgep->nxge_timerid));
}
#endif
"==> nxge_niu_peu_reset: checking $%p "
"(function %d) timer ids",
for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) {
"==> nxge_niu_peu_reset: waiting"));
if (!fnxgep->nxge_timerid &&
break;
}
}
if (fnxgep->nxge_timerid ||
"<== nxge_niu_peu_reset: cannot reset "
"hardware (devices are still in use)"));
return;
}
}
}
"nxge_niu_peu_reset: read offset 0x%x (%d) "
"(data 0x%x)",
rvalue));
"nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x",
rvalue));
}
}
static void
{
if (!nxge_set_replay_timer) {
"==> nxge_set_pci_replay_timeout: will not change "
"the timeout"));
return;
}
"==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p",
"==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or "
"no PCI handle",
dev_regs));
return;
}
"nxge_set_pci_replay_timeout: replay timeout value before set 0x%x "
"(timeout value to set 0x%x at offset 0x%x) value 0x%x",
value);
"nxge_set_pci_replay_timeout: replay timeout value after set 0x%x",
}
/*
* quiesce(9E) entry point.
*
* This function is called when the system is single-threaded at high
* PIL with preemption disabled. Therefore, this function must not be
* blocked.
*
* This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
* DDI_FAILURE indicates an error condition and should almost never happen.
*/
static int
{
return (DDI_FAILURE);
/* Turn off debugging */
/*
* Stop link monitor only when linkchkmod is interrupt based
*/
}
(void) nxge_intr_hw_disable(nxgep);
/*
* Reset the receive MAC side.
*/
(void) nxge_rx_mac_disable(nxgep);
/* Disable and soft reset the IPP */
if (!isLDOMguest(nxgep))
(void) nxge_ipp_disable(nxgep);
/*
*/
/*
* Reset the transmit MAC side.
*/
(void) nxge_tx_mac_disable(nxgep);
return (DDI_SUCCESS);
}