/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2002-2009 Neterion, Inc.
* All right Reserved.
*
* FileName : xgell.c
*
* Description: Xge Link Layer data path implementation
*
*/
#include "xgell.h"
sizeof (struct ether_vlan_header))
/* DMA attributes used for Tx side */
DMA_ATTR_V0, /* dma_attr_version */
0x0ULL, /* dma_attr_addr_lo */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_count_max */
#if defined(__sparc)
0x2000, /* dma_attr_align */
#else
0x1000, /* dma_attr_align */
#endif
0xFC00FC, /* dma_attr_burstsizes */
0x1, /* dma_attr_minxfer */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_maxxfer */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */
18, /* dma_attr_sgllen */
(unsigned int)1, /* dma_attr_granular */
0 /* dma_attr_flags */
};
/*
* DMA attributes used when using ddi_dma_mem_alloc to
* allocat HAL descriptors and Rx buffers during replenish
*/
DMA_ATTR_V0, /* dma_attr_version */
0x0ULL, /* dma_attr_addr_lo */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_count_max */
#if defined(__sparc)
0x2000, /* dma_attr_align */
#else
0x1000, /* dma_attr_align */
#endif
0xFC00FC, /* dma_attr_burstsizes */
0x1, /* dma_attr_minxfer */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_maxxfer */
0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */
1, /* dma_attr_sgllen */
(unsigned int)1, /* dma_attr_sgllen */
DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
};
static int xgell_m_start(void *);
static void xgell_m_stop(void *);
static int xgell_m_promisc(void *, boolean_t);
NULL,
NULL,
NULL,
};
/*
* xge_device_poll
*
* Timeout should call me every 1s. xge_callback_event_queued should call me
* when HAL hope event was rescheduled.
*/
/*ARGSUSED*/
void
{
if (lldev->is_initialized) {
} else {
lldev->timeout_id = 0;
}
}
/*
* xge_device_poll_now
*
* Will call xge_device_poll() immediately
*/
void
{
if (lldev->is_initialized) {
}
}
/*
* xgell_callback_link_up
*
* This function called by HAL to notify HW link up state change.
*/
void
{
}
/*
* xgell_callback_link_down
*
* This function called by HAL to notify HW link down state change.
*/
void
{
}
/*
* xgell_rx_buffer_replenish_all
*
* To replenish all freed dtr(s) with buffers in free pool. It's called by
* xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
* Must be called with pool_lock held.
*/
static void
{
rxd_priv = (xgell_rxd_priv_t *)
}
}
/*
* xgell_rx_buffer_release
*
* The only thing done here is to put the buffer back to the pool.
* Calling this function need be protected by mutex, bf_pool.pool_lock.
*/
static void
{
/* Put the buffer back to pool */
}
/*
* xgell_rx_buffer_recycle
*
* Called by desballoc() to "free" the resource.
* We will try to replenish all descripters.
*/
/*
* Previously there were much lock contention between xgell_rx_1b_compl() and
* xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
* effect on rx performance. A separate recycle list is introduced to overcome
* this. The recycle list is used to record the rx buffer that has been recycled
* and these buffers will be retuned back to the free list in bulk instead of
* one-by-one.
*/
static void
{
/*
* Before finding a good way to set this hiwat, just always call to
* replenish_all. *TODO*
*/
}
}
/*
* xgell_rx_buffer_alloc
*
* Allocate one rx buffer and return with the pointer to the buffer.
* Return NULL if failed.
*/
static xgell_rx_buffer_t *
{
void *vaddr;
extern ddi_device_acc_attr_t *p_xge_dev_attr;
0, &dma_handle) != DDI_SUCCESS) {
goto handle_failed;
}
/* reserve some space at the end of the buffer for recycling */
DDI_SUCCESS) {
goto mem_failed;
}
real_size) {
goto bind_failed;
}
goto bind_failed;
}
goto check_failed;
}
sizeof (xgell_rx_buffer_t));
return (rx_buffer);
(void) ddi_dma_unbind_handle(dma_handle);
return (NULL);
}
/*
* xgell_rx_destroy_buffer_pool
*
* Destroy buffer pool. If there is still any buffer hold by upper layer,
* recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
*/
static boolean_t
{
int i;
/*
* If the pool has been destroied, just return B_TRUE
*/
return (B_TRUE);
}
/*
* If there is any posted buffer, the driver should reject to be
* detached. Need notice upper layer to release them.
*/
"%s%d has some buffers not be recycled, try later!",
return (B_FALSE);
}
/*
* Release buffers one by one.
*/
return (B_FALSE);
}
}
return (B_TRUE);
}
/*
* xgell_rx_create_buffer_pool
*
* Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
*/
static boolean_t
{
int i;
return (B_TRUE);
/*
* Allocate buffers one by one. If failed, destroy whole pool by
* call to xgell_rx_destroy_buffer_pool().
*/
(void) xgell_rx_destroy_buffer_pool(ring);
return (B_FALSE);
}
}
return (B_TRUE);
}
/*
* xgell_rx_dtr_replenish
*
* Replenish descriptor with rx_buffer in RX buffer pool.
* The dtr should be post right away.
*/
{
return (XGE_HAL_FAIL);
}
return (XGE_HAL_OK);
}
/*
* xgell_get_ip_offset
*
* Calculate the offset to IP header.
*/
static inline int
{
int ip_off;
/* get IP-header offset */
case XGE_HAL_FRAME_TYPE_DIX:
break;
case XGE_HAL_FRAME_TYPE_IPX:
break;
case XGE_HAL_FRAME_TYPE_LLC:
break;
case XGE_HAL_FRAME_TYPE_SNAP:
break;
default:
ip_off = 0;
break;
}
}
return (ip_off);
}
/*
* xgell_rx_hcksum_assoc
*
* Judge the packet type and then call to hcksum_assoc() to associate
* h/w checksum information.
*/
static inline void
{
int cksum_flags = 0;
}
}
if (cksum_flags != 0) {
}
}
/*
* Just pass the partial cksum up to IP.
*/
} else {
start = 40;
}
}
}
/*
* xgell_rx_1b_msg_alloc
*
* Allocate message header for data buffer, and decide if copy the packet to
* new data buffer to release big rx_buffer to save memory.
*
* If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
* new message and copy the payload in.
*/
static mblk_t *
{
char *vaddr;
/*
* Copy packet into new allocated message buffer, if pkt_length
* is less than XGELL_RX_DMA_LOWAT
*/
return (NULL);
}
return (mp);
}
/*
* Just allocate mblk for current data buffer
*/
/* Drop it */
return (NULL);
}
/*
*/
return (mp);
}
/*
* xgell_rx_1b_callback
*
* If the interrupt is because of a received frame or if the receive ring
* contains fresh as yet un-processed frames, this function is called.
*/
static xge_hal_status_e
void *userdata)
{
int pkt_burst = 0;
do {
int pkt_length;
if (t_code != 0) {
" completed due to error t_code %01x", XGELL_IFNAME,
t_code);
continue;
}
/*
* Sync the DMA memory
*/
continue;
}
/*
* Allocate message for the packet.
*/
} else {
}
/*
* Release the buffer and recycle it later
*/
} else {
/*
* Count it since the buffer should be loaned up.
*/
}
"%s%d: rx: can not allocate mp mblk",
continue;
}
/*
* Associate cksum_flags per packet type and h/w
* cksum flags.
*/
} else {
}
/*
* Inlined implemented polling function.
*/
}
/* have polled enough packets. */
break;
} else {
/* continue polling packets. */
continue;
}
}
/*
* We're not in polling mode, so try to chain more messages
* or send the chain up according to pkt_burst.
*/
continue;
/* Replenish rx buffers */
}
ring->ring_gen_num);
}
pkt_burst = 0;
/*
* Always call replenish_all to recycle rx_buffers.
*/
/*
* If we're not in polling cycle, call mac_rx(), otherwise
* just return while leaving packets chained to ring->poll_mp.
*/
ring->ring_gen_num);
}
return (XGE_HAL_OK);
}
mblk_t *
{
int got_rx = 0;
return (mp);
}
/*
* xgell_xmit_compl
*
* If an interrupt was raised to indicate DMA complete of the Tx packet,
* this function is called. It identifies the last TxD whose buffer was
* freed and frees all skbs whose data have already DMA'ed into the NICs
* internal memory.
*/
static xge_hal_status_e
void *userdata)
{
do {
int i;
if (t_code) {
" completed due to error t_code %01x", XGELL_IFNAME,
t_code);
}
for (i = 0; i < txd_priv->handle_cnt; i++) {
(void) ddi_dma_unbind_handle(
txd_priv->dma_handles[i]);
txd_priv->dma_handles[i] = 0;
}
}
txd_priv->handle_cnt = 0;
}
if (ring->need_resched)
return (XGE_HAL_OK);
}
mblk_t *
{
handle_cnt = frag_cnt = 0;
sent_bytes = 0;
return (mp);
/*
* If the free Tx dtrs count reaches the lower threshold,
* inform the gld to stop sending more packets till the free
* dtrs count exceeds higher threshold. Driver informs the
* gld through gld_sched call, when the free dtrs count exceeds
* the higher threshold.
*/
<= XGELL_TX_LEVEL_LOW) {
"free descriptors count at low threshold %d",
goto _exit;
}
if (status != XGE_HAL_OK) {
switch (status) {
"%s%d: channel %d is not ready.", XGELL_IFNAME,
((xge_hal_channel_t *)
goto _exit;
" out of descriptors.", XGELL_IFNAME,
((xge_hal_channel_t *)
goto _exit;
default:
return (mp);
}
}
/*
* VLAN tag should be passed down along with MAC header, so h/w needn't
* do insertion.
*
* For NIC driver that has to strip and re-insert VLAN tag, the example
* is the other implementation for xge. The driver can simple bcopy()
* ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
* automatically, since it's impossible that GLD sends down mp(s) with
* splited ether_vlan_header.
*
* struct ether_vlan_header *evhp;
* uint16_t tci;
*
* evhp = (struct ether_vlan_header *)mp->b_rptr;
* if (evhp->ether_tpid == htons(VLAN_TPID)) {
* tci = ntohs(evhp->ether_tci);
* (void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
* 2 * ETHERADDRL);
* mp->b_rptr += VLAN_TAGSZ;
*
* xge_hal_fifo_dtr_vlan_set(dtr, tci);
* }
*/
copied = 0;
int mblen;
/* skip zero-length message blocks */
if (mblen == 0) {
continue;
}
sent_bytes += mblen;
/*
* Check the message length to decide to DMA or bcopy() data
* to tx descriptor(s).
*/
if (rc == XGE_HAL_OK) {
continue;
} else if (used_copy) {
}
} else if (used_copy) {
}
DDI_DMA_DONTWAIT, 0, &dma_handle);
if (ret != DDI_SUCCESS) {
"%s%d: can not allocate dma handle", XGELL_IFNAME,
goto _exit_cleanup;
}
&dma_cookie, &ncookies);
switch (ret) {
case DDI_DMA_MAPPED:
/* everything's fine */
break;
case DDI_DMA_NORESOURCES:
"%s%d: can not bind dma address",
goto _exit_cleanup;
case DDI_DMA_NOMAPPING:
case DDI_DMA_INUSE:
case DDI_DMA_TOOBIG:
default:
/* drop packet, don't retry */
"%s%d: can not map message buffer",
goto _exit_cleanup;
}
"requested c:%d+f:%d", XGELL_IFNAME,
(void) ddi_dma_unbind_handle(dma_handle);
goto _exit_cleanup;
}
/* setup the descriptors for this data buffer */
while (ncookies) {
if (--ncookies) {
}
}
"too many FRAGs [%d], pull up them", frag_cnt);
/* Drop packet, don't retry */
"%s%d: can not pullup message buffer",
goto _exit_cleanup;
}
}
}
/* finalize unfinished copies */
if (used_copy) {
frag_cnt++);
}
/*
* If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
* do all necessary work.
*/
}
if (hckflags & HCK_IPV4_HDRCKSUM) {
}
if (hckflags & HCK_FULLCKSUM) {
}
/* Update per-ring tx statistics */
return (NULL);
/*
* Could not successfully transmit but have changed the message,
* so just free it and return NULL
*/
for (i = 0; i < handle_cnt; i++) {
txd_priv->dma_handles[i] = 0;
}
return (NULL);
return (mp);
}
/*
* xgell_ring_macaddr_init
*/
static void
{
int i;
int slot_start;
/*
* For the default rx ring, the first MAC address is the factory one.
* This will be set by the framework, so need to clear it for now.
*/
(void) xge_hal_device_macaddr_clear(hldev, 0);
/*
* Read the MAC address Configuration Memory from HAL.
* The first slot will hold a factory MAC address, contents in other
* slots will be FF:FF:FF:FF:FF:FF.
*/
}
}
static int
{
int slot;
int slot_start;
return (ENOSPC);
}
/* First slot is for factory MAC address */
break;
}
}
0) {
return (EIO);
}
/* Simply enable RTS for the whole section. */
/*
* Read back the MAC address from HAL to keep the array up to date.
*/
return (EIO);
}
return (0);
}
static int
{
int slot;
int slot_start;
if (slot == -1)
return (EINVAL);
/*
* Adjust slot to the offset in the MAC array of this ring (group).
*/
slot -= slot_start;
/*
* Only can remove a pre-set MAC address for this ring (group).
*/
return (EINVAL);
/*
* The result will be unexpected when reach here. WARNING!
*/
"%s%d: caller is trying to remove an unset MAC address",
return (ENXIO);
}
if (status != XGE_HAL_OK) {
return (EIO);
}
/*
* TODO: Disable MAC RTS if all addresses have been cleared.
*/
/*
* Read back the MAC address from HAL to keep the array up to date.
*/
return (0);
}
/*
* Temporarily calling hal function.
*
* With MSI-X implementation, no lock is needed, so that the interrupt
* handling could be faster.
*/
int
{
return (0);
}
int
{
return (0);
}
static int
{
return (0);
}
/*ARGSUSED*/
static void
{
}
/*ARGSUSED*/
static int
{
return (0);
}
/*ARGSUSED*/
static void
{
}
/*
* Callback funtion for MAC layer to register all rings.
*
* Xframe hardware doesn't support grouping explicitly, so the driver needs
* to pretend having resource groups. We may also optionally group all 8 rx
* rings into a single group for increased scalability on CMT architectures,
* or group one rx ring per group for maximum virtualization.
*
* TX grouping is actually done by framework, so, just register all TX
* resources without grouping them.
*/
void
{
switch (rtype) {
case MAC_RING_TYPE_RX: {
/*
* Performance vs. Virtualization
*/
else
break;
}
case MAC_RING_TYPE_TX: {
break;
}
default:
break;
}
}
void
{
switch (rtype) {
case MAC_RING_TYPE_RX: {
break;
}
case MAC_RING_TYPE_TX:
xge_assert(0);
break;
default:
break;
}
}
/*
* xgell_macaddr_set
*/
static int
{
"setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
if (status != XGE_HAL_OK) {
return (EIO);
}
return (0);
}
/*
* xgell_rx_dtr_term
*
* Function will be called by HAL to terminate all DTRs for
* Ring(s) type of channels.
*/
static void
{
if (state == XGE_HAL_DTR_STATE_POSTED) {
}
}
/*
* To open a rx ring.
*/
static boolean_t
{
return (B_TRUE);
/* Create the buffer pool first */
if (!xgell_rx_create_buffer_pool(rx_ring)) {
return (B_FALSE);
}
/* Default ring initialization */
if (status != XGE_HAL_OK) {
(void) xgell_rx_destroy_buffer_pool(rx_ring);
return (B_FALSE);
}
rx_ring->polled_bytes = 0;
return (B_TRUE);
}
static void
{
return;
/* This may not clean up all used buffers, driver will handle it */
}
/*
* xgell_rx_open
* @lldev: the link layer object
*
* Initialize and open all RX channels.
*/
static boolean_t
{
int i;
if (lldev->live_rx_rings != 0)
return (B_TRUE);
lldev->live_rx_rings = 0;
/*
* Initialize all rings
*/
for (i = 0; i < lldev->init_rx_rings; i++) {
if (!xgell_rx_ring_open(rx_ring))
return (B_FALSE);
lldev->live_rx_rings++;
}
return (B_TRUE);
}
static void
{
int i;
if (lldev->live_rx_rings == 0)
return;
/*
* Close all rx rings
*/
for (i = 0; i < lldev->init_rx_rings; i++) {
lldev->live_rx_rings--;
}
}
}
/*
* xgell_tx_term
*
* Function will be called by HAL to terminate all DTRs for
* Fifo(s) type of channels.
*/
static void
{
int i;
/*
* for Tx we must clean up the DTR *only* if it has been
* posted!
*/
if (state != XGE_HAL_DTR_STATE_POSTED) {
return;
}
for (i = 0; i < txd_priv->handle_cnt; i++) {
txd_priv->dma_handles[i] = 0;
}
if (mp) {
}
}
static boolean_t
{
return (B_TRUE);
if (status != XGE_HAL_OK) {
return (B_FALSE);
}
return (B_TRUE);
}
static void
{
return;
}
/*
* xgell_tx_open
* @lldev: the link layer object
*
* Initialize and open all TX channels.
*/
static boolean_t
{
int i;
if (lldev->live_tx_rings != 0)
return (B_TRUE);
lldev->live_tx_rings = 0;
/*
* Enable rings by reserve sequence to match the h/w sequences.
*/
for (i = 0; i < lldev->init_tx_rings; i++) {
if (!xgell_tx_ring_open(tx_ring))
return (B_FALSE);
lldev->live_tx_rings++;
}
return (B_TRUE);
}
static void
{
int i;
if (lldev->live_tx_rings == 0)
return;
/*
* Enable rings by reserve sequence to match the h/w sequences.
*/
for (i = 0; i < lldev->init_tx_rings; i++) {
lldev->live_tx_rings--;
}
}
}
static int
{
/* check initial mtu before enabling the device */
if (status != XGE_HAL_OK) {
return (EINVAL);
}
/* set initial mtu before enabling the device */
if (status != XGE_HAL_OK) {
return (EIO);
}
(maxpkt > XGE_HAL_DEFAULT_MTU) ?
(maxpkt > XGE_HAL_DEFAULT_MTU) ?
/* now, enable the device */
if (status != XGE_HAL_OK) {
return (EIO);
}
if (!xgell_rx_open(lldev)) {
if (status != XGE_HAL_OK) {
"the device. adaper status 0x%"PRIx64
" returned status %d",
}
xge_os_mdelay(1500);
return (ENOMEM);
}
if (!xgell_tx_open(lldev)) {
if (status != XGE_HAL_OK) {
"the device. adaper status 0x%"PRIx64
" returned status %d",
}
xge_os_mdelay(1500);
return (ENOMEM);
}
/* time to enable interrupts */
(void) xge_enable_intrs(lldev);
return (0);
}
static void
{
lldev->is_initialized = 0;
if (status != XGE_HAL_OK) {
}
/* disable OS ISR's */
"waiting for device irq to become quiescent...");
xge_os_mdelay(1500);
}
/*
* xgell_m_start
* @arg: pointer to device private strucutre(hldev)
*
* This function is called by MAC Layer to enable the XFRAME
* firmware to generate interrupts and also prepare the
* driver to call mac_rx for delivering receive packets
* to MAC Layer.
*/
static int
{
int ret;
if (lldev->is_initialized) {
return (EINVAL);
}
hldev->terminating = 0;
return (ret);
}
return (0);
}
/*
* xgell_m_stop
* @arg: pointer to device private data (hldev)
*
* This function is called by the MAC Layer to disable
* the XFRAME firmware for generating any interrupts and
* also stop the driver from calling mac_rx() for
* delivering data packets to the MAC Layer.
*/
static void
{
if (!lldev->is_initialized) {
return;
}
/* reset device */
if (lldev->timeout_id != 0) {
}
}
/*
* xgell_onerr_reset
* @lldev: pointer to xgelldev_t structure
*
* This function is called by HAL Event framework to reset the HW
* This function is must be called with genlock taken.
*/
int
{
int rc = 0;
if (!lldev->is_initialized) {
return (rc);
}
/* reset device */
return (rc);
}
/*
* xgell_m_multicst
* @arg: pointer to device private strucutre(hldev)
* @add:
* @mc_addr:
*
* This function is called by MAC Layer to enable or
* disable device-level reception of specific multicast addresses.
*/
static int
{
if (!lldev->is_initialized) {
return (EIO);
}
/* FIXME: missing HAL functionality: enable_one() */
if (status != XGE_HAL_OK) {
return (EIO);
}
return (0);
}
/*
* xgell_m_promisc
* @arg: pointer to device private strucutre(hldev)
* @on:
*
* This function is called by MAC Layer to enable or
* disable the reception of all the packets on the medium
*/
static int
{
if (!lldev->is_initialized) {
return (EIO);
}
if (on) {
} else {
}
return (0);
}
/*
* xgell_m_stat
* @arg: pointer to device private strucutre(hldev)
*
* This function is called by MAC Layer to get network statistics
* from the driver.
*/
static int
{
if (!lldev->is_initialized) {
return (EAGAIN);
}
return (EAGAIN);
}
switch (stat) {
case MAC_STAT_IFSPEED:
break;
case MAC_STAT_MULTIRCV:
break;
case MAC_STAT_BRDCSTRCV:
break;
case MAC_STAT_MULTIXMT:
break;
case MAC_STAT_BRDCSTXMT:
break;
case MAC_STAT_RBYTES:
break;
case MAC_STAT_NORCVBUF:
break;
case MAC_STAT_IERRORS:
break;
case MAC_STAT_OBYTES:
break;
case MAC_STAT_NOXMTBUF:
break;
case MAC_STAT_OERRORS:
break;
case MAC_STAT_IPACKETS:
break;
case MAC_STAT_OPACKETS:
break;
case ETHER_STAT_FCS_ERRORS:
break;
break;
case ETHER_STAT_LINK_DUPLEX:
*val = LINK_DUPLEX_FULL;
break;
default:
return (ENOTSUP);
}
return (0);
}
/*
* Retrieve a value for one of the statistics for a particular rx ring
*/
int
{
switch (stat) {
case MAC_STAT_RBYTES:
break;
case MAC_STAT_IPACKETS:
break;
default:
*val = 0;
return (ENOTSUP);
}
return (0);
}
/*
* Retrieve a value for one of the statistics for a particular tx ring
*/
int
{
switch (stat) {
case MAC_STAT_OBYTES:
break;
case MAC_STAT_OPACKETS:
break;
default:
*val = 0;
return (ENOTSUP);
}
return (0);
}
/*
* xgell_device_alloc - Allocate new LL device
*/
int
{
return (DDI_SUCCESS);
}
/*
* xgell_device_free
*/
void
{
}
/*
* xgell_ioctl
*/
static void
{
int err = 0;
int cmd;
int ret = 0;
switch (cmd) {
case ND_GET:
need_privilege = 0;
/* FALLTHRU */
case ND_SET:
break;
default:
return;
}
if (need_privilege) {
if (err != 0) {
"drv_priv(): rejected cmd 0x%x, err %d",
return;
}
}
switch (cmd) {
case ND_GET:
/*
* If nd_getset() returns B_FALSE, the command was
* not valid (e.g. unknown name), so we just tell the
* top-level ioctl code to send a NAK (with code EINVAL).
*
* Otherwise, nd_getset() will have built the reply to
* be sent (but not actually sent it), so we tell the
* caller to send the prepared reply.
*/
break;
case ND_SET:
break;
default:
break;
}
"nd_getset(): rejected cmd 0x%x, err %d",
} else {
}
}
static boolean_t
{
switch (cap) {
case MAC_CAPAB_HCKSUM: {
break;
}
case MAC_CAPAB_LSO: {
break;
} else {
return (B_FALSE);
}
}
case MAC_CAPAB_RINGS: {
case MAC_RING_TYPE_RX:
break;
case MAC_RING_TYPE_TX:
break;
default:
break;
}
break;
}
default:
return (B_FALSE);
}
return (B_TRUE);
}
static int
{
char *buf;
return (ENOSPC);
}
if (status != XGE_HAL_OK) {
return (EINVAL);
}
if (status != XGE_HAL_OK) {
return (EINVAL);
}
if (status != XGE_HAL_OK) {
return (EINVAL);
}
if (status != XGE_HAL_OK) {
return (EINVAL);
}
if (status != XGE_HAL_OK) {
return (EINVAL);
}
return (0);
}
static int
{
int retsize;
char *buf;
return (ENOSPC);
}
if (status != XGE_HAL_OK) {
return (EINVAL);
}
return (0);
}
static int
{
int retsize;
char *buf;
return (ENOSPC);
}
if (status != XGE_HAL_OK) {
return (EINVAL);
}
return (0);
}
static int
{
int retsize;
char *buf;
return (ENOSPC);
}
if (status != XGE_HAL_OK) {
return (EINVAL);
}
return (0);
}
static int
{
char *end;
value += 2;
}
return (EINVAL);
}
return (0);
}
static int
{
char *buf;
return (ENOSPC);
}
return (0);
}
static int
{
int level;
char *end;
return (EINVAL);
}
return (0);
}
static int
{
char *buf;
return (ENOSPC);
}
return (0);
}
static int
{
char *end;
value += 2;
}
return (EINVAL);
}
return (0);
}
static int
{
int retsize;
char *buf;
return (ENOSPC);
}
if (status != XGE_HAL_OK) {
status);
return (EINVAL);
}
return (0);
}
/*
* xgell_device_register
* @devh: pointer on HAL device
* @config: pointer on this network device configuration
* @ll_out: output pointer. Will be assigned to valid LL device.
*
* This function will allocate and register network device
*/
int
{
/*
* Initialize some NDD interface for internal debug.
*/
goto xgell_ndd_fail;
goto xgell_ndd_fail;
goto xgell_ndd_fail;
goto xgell_ndd_fail;
goto xgell_ndd_fail;
goto xgell_ndd_fail;
goto xgell_ndd_fail;
goto xgell_register_fail;
/*
* MAC Registration.
*/
goto xgell_register_fail;
/* Always free the macp after register */
/* Calculate tx_copied_max here ??? */
return (DDI_SUCCESS);
return (DDI_FAILURE);
return (DDI_FAILURE);
}
/*
* xgell_device_unregister
* @devh: pointer on HAL device
* @lldev: pointer to valid LL device.
*
* This function will unregister and free network device
*/
int
{
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}