a23fd118e437af0a7877dd313db8fdaa3537c675yl * CDDL HEADER START
a23fd118e437af0a7877dd313db8fdaa3537c675yl * The contents of this file are subject to the terms of the
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Common Development and Distribution License (the "License").
a23fd118e437af0a7877dd313db8fdaa3537c675yl * You may not use this file except in compliance with the License.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
a23fd118e437af0a7877dd313db8fdaa3537c675yl * See the License for the specific language governing permissions
a23fd118e437af0a7877dd313db8fdaa3537c675yl * and limitations under the License.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * When distributing Covered Code, include this CDDL HEADER in each
a23fd118e437af0a7877dd313db8fdaa3537c675yl * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * If applicable, add the following below this CDDL HEADER, with the
a23fd118e437af0a7877dd313db8fdaa3537c675yl * fields enclosed by brackets "[]" replaced with your own identifying
a23fd118e437af0a7877dd313db8fdaa3537c675yl * information: Portions Copyright [yyyy] [name of copyright owner]
a23fd118e437af0a7877dd313db8fdaa3537c675yl * CDDL HEADER END
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyer * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Use is subject to license terms.
f30c160edf27c0e40a231331fc03b8eae12e8f40Roamer * Copyright (c) 2002-2009 Neterion, Inc.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * All right Reserved.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * FileName : xgell.c
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Description: Xge Link Layer data path implementation
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb#define XGELL_MAX_FRAME_SIZE(hldev) ((hldev)->config.mtu + \
a23fd118e437af0a7877dd313db8fdaa3537c675yl sizeof (struct ether_vlan_header))
a23fd118e437af0a7877dd313db8fdaa3537c675yl/* DMA attributes used for Tx side */
7eced415e5dd557aef2d78483b5a7785f0e13670xw#if defined(__sparc)
a23fd118e437af0a7877dd313db8fdaa3537c675yl 0 /* dma_attr_flags */
a23fd118e437af0a7877dd313db8fdaa3537c675yl * DMA attributes used when using ddi_dma_mem_alloc to
a23fd118e437af0a7877dd313db8fdaa3537c675yl * allocat HAL descriptors and Rx buffers during replenish
7eced415e5dd557aef2d78483b5a7785f0e13670xw#if defined(__sparc)
ba2e4443695ee6a6f420a35cd4fc3d3346d22932sebstatic int xgell_m_start(void *);
ba2e4443695ee6a6f420a35cd4fc3d3346d22932sebstatic void xgell_m_stop(void *);
ba2e4443695ee6a6f420a35cd4fc3d3346d22932sebstatic int xgell_m_multicst(void *, boolean_t, const uint8_t *);
ba2e4443695ee6a6f420a35cd4fc3d3346d22932sebstatic boolean_t xgell_m_getcapab(void *, mac_capab_t, void *);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xge_device_poll
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Timeout should call me every 1s. xge_callback_event_queued should call me
a23fd118e437af0a7877dd313db8fdaa3537c675yl * when HAL hope event was rescheduled.
a23fd118e437af0a7877dd313db8fdaa3537c675yl/*ARGSUSED*/
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xge_device_poll_now
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Will call xge_device_poll() immediately
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_callback_link_up
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function called by HAL to notify HW link up state change.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_callback_link_down
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function called by HAL to notify HW link down state change.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_buffer_replenish_all
a23fd118e437af0a7877dd313db8fdaa3537c675yl * To replenish all freed dtr(s) with buffers in free pool. It's called by
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Must be called with pool_lock held.
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic void
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_rx_buffer_replenish_all(xgell_rx_ring_t *ring)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) == XGE_HAL_OK)) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_buffer_release
a23fd118e437af0a7877dd313db8fdaa3537c675yl * The only thing done here is to put the buffer back to the pool.
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * Calling this function need be protected by mutex, bf_pool.pool_lock.
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic void
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* Put the buffer back to pool */
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_buffer_recycle
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Called by desballoc() to "free" the resource.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * We will try to replenish all descripters.
7eced415e5dd557aef2d78483b5a7785f0e13670xw * Previously there were much lock contention between xgell_rx_1b_compl() and
7eced415e5dd557aef2d78483b5a7785f0e13670xw * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
7eced415e5dd557aef2d78483b5a7785f0e13670xw * effect on rx performance. A separate recycle list is introduced to overcome
7eced415e5dd557aef2d78483b5a7785f0e13670xw * this. The recycle list is used to record the rx buffer that has been recycled
7eced415e5dd557aef2d78483b5a7785f0e13670xw * and these buffers will be retuned back to the free list in bulk instead of
7eced415e5dd557aef2d78483b5a7785f0e13670xw * one-by-one.
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic void
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Before finding a good way to set this hiwat, just always call to
a23fd118e437af0a7877dd313db8fdaa3537c675yl * replenish_all. *TODO*
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if ((lldev->is_initialized != 0) && (ring->live) &&
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_buffer_alloc
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Allocate one rx buffer and return with the pointer to the buffer.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Return NULL if failed.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* reserve some space at the end of the buffer for recycling */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if (ddi_dma_mem_alloc(dma_handle, HEADROOM + bf_pool->size +
a23fd118e437af0a7877dd313db8fdaa3537c675yl sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
a23fd118e437af0a7877dd313db8fdaa3537c675yl DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if (HEADROOM + bf_pool->size + sizeof (xgell_rx_buffer_t) >
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
a23fd118e437af0a7877dd313db8fdaa3537c675yl DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if (ncookies != 1 || dma_cookie.dmac_size < bf_pool->size) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
a23fd118e437af0a7877dd313db8fdaa3537c675yl rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_destroy_buffer_pool
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Destroy buffer pool. If there is still any buffer hold by upper layer,
a23fd118e437af0a7877dd313db8fdaa3537c675yl * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_rx_destroy_buffer_pool(xgell_rx_ring_t *ring)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * If the pool has been destroied, just return B_TRUE
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng bf_pool->recycle_tail = bf_pool->recycle_head = NULL;
a23fd118e437af0a7877dd313db8fdaa3537c675yl * If there is any posted buffer, the driver should reject to be
a23fd118e437af0a7877dd313db8fdaa3537c675yl * detached. Need notice upper layer to release them.
a23fd118e437af0a7877dd313db8fdaa3537c675yl "%s%d has some buffers not be recycled, try later!",
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Release buffers one by one.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_create_buffer_pool
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_rx_create_buffer_pool(xgell_rx_ring_t *ring)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng bf_pool->post_hiwat = lldev->config.rx_buffer_post_hiwat;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng mutex_init(&bf_pool->pool_lock, NULL, MUTEX_DRIVER,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng mutex_init(&bf_pool->recycle_lock, NULL, MUTEX_DRIVER,
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Allocate buffers one by one. If failed, destroy whole pool by
a23fd118e437af0a7877dd313db8fdaa3537c675yl * call to xgell_rx_destroy_buffer_pool().
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_dtr_replenish
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Replenish descriptor with rx_buffer in RX buffer pool.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * The dtr should be post right away.
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
7eced415e5dd557aef2d78483b5a7785f0e13670xw rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, bf_pool->size);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_get_ip_offset
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Calculate the offset to IP header.
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic inline int
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* get IP-header offset */
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_hcksum_assoc
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Judge the packet type and then call to hcksum_assoc() to associate
a23fd118e437af0a7877dd313db8fdaa3537c675yl * h/w checksum information.
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic inline void
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Just pass the partial cksum up to IP.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_1b_msg_alloc
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Allocate message header for data buffer, and decide if copy the packet to
a23fd118e437af0a7877dd313db8fdaa3537c675yl * new data buffer to release big rx_buffer to save memory.
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
a23fd118e437af0a7877dd313db8fdaa3537c675yl * new message and copy the payload in.
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_rx_1b_msg_alloc(xgell_rx_ring_t *ring, xgell_rx_buffer_t *rx_buffer,
8347601bcb0a439f6e50fc36b4039a73d08700e1yl int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Copy packet into new allocated message buffer, if pkt_length
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * is less than XGELL_RX_DMA_LOWAT
8347601bcb0a439f6e50fc36b4039a73d08700e1yl if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (mp);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Just allocate mblk for current data buffer
3c785c4c5ac4bb72eb0cff3cda1255dfc9613aa1yl if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* Drop it */
3c785c4c5ac4bb72eb0cff3cda1255dfc9613aa1yl * Adjust the b_rptr/b_wptr in the mblk_t structure.
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (mp);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * xgell_rx_1b_callback
a23fd118e437af0a7877dd313db8fdaa3537c675yl * If the interrupt is because of a received frame or if the receive ring
a23fd118e437af0a7877dd313db8fdaa3537c675yl * contains fresh as yet un-processed frames, this function is called.
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_rx_1b_callback(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_ring_t *ring = (xgell_rx_ring_t *)userdata;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_TRACE, "xgell_rx_1b_callback on ring %d", ring->index);
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (t_code != 0) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Sync the DMA memory
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Allocate message for the packet.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng mp = xgell_rx_1b_msg_alloc(ring, rx_buffer, pkt_length,
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Release the buffer and recycle it later
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Count it since the buffer should be loaned up.
8347601bcb0a439f6e50fc36b4039a73d08700e1yl "%s%d: rx: can not allocate mp mblk",
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * Associate cksum_flags per packet type and h/w
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * cksum flags.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr + HEADROOM,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Inlined implemented polling function.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if ((ring->poll_mp == NULL) && (ring->poll_bytes > 0)) {
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng /* have polled enough packets. */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng /* continue polling packets. */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * We're not in polling mode, so try to chain more messages
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * or send the chain up according to pkt_burst.
8347601bcb0a439f6e50fc36b4039a73d08700e1yl /* Replenish rx buffers */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
a23fd118e437af0a7877dd313db8fdaa3537c675yl } while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Always call replenish_all to recycle rx_buffers.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * If we're not in polling cycle, call mac_rx(), otherwise
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * just return while leaving packets chained to ring->poll_mp.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if ((ring->poll_mp == NULL) && (mp_head != NULL)) {
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_TRACE, "xgell_rx_poll on ring %d", ring->index);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng (void) xge_hal_device_poll_rx_channel(ring->channelh, &got_rx);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_xmit_compl
a23fd118e437af0a7877dd313db8fdaa3537c675yl * If an interrupt was raised to indicate DMA complete of the Tx packet,
a23fd118e437af0a7877dd313db8fdaa3537c675yl * this function is called. It identifies the last TxD whose buffer was
a23fd118e437af0a7877dd313db8fdaa3537c675yl * freed and frees all skbs whose data have already DMA'ed into the NICs
a23fd118e437af0a7877dd313db8fdaa3537c675yl * internal memory.
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
a23fd118e437af0a7877dd313db8fdaa3537c675yl } while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng mac_tx_ring_update(lldev->mh, ring->ring_handle);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * If the free Tx dtrs count reaches the lower threshold,
a23fd118e437af0a7877dd313db8fdaa3537c675yl * inform the gld to stop sending more packets till the free
a23fd118e437af0a7877dd313db8fdaa3537c675yl * dtrs count exceeds higher threshold. Driver informs the
a23fd118e437af0a7877dd313db8fdaa3537c675yl * gld through gld_sched call, when the free dtrs count exceeds
a23fd118e437af0a7877dd313db8fdaa3537c675yl * the higher threshold.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng "free descriptors count at low threshold %d",
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng status = xge_hal_fifo_dtr_reserve(ring->channelh, &dtr);
a23fd118e437af0a7877dd313db8fdaa3537c675yl switch (status) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl * VLAN tag should be passed down along with MAC header, so h/w needn't
a23fd118e437af0a7877dd313db8fdaa3537c675yl * do insertion.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * For NIC driver that has to strip and re-insert VLAN tag, the example
a23fd118e437af0a7877dd313db8fdaa3537c675yl * is the other implementation for xge. The driver can simple bcopy()
a23fd118e437af0a7877dd313db8fdaa3537c675yl * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
a23fd118e437af0a7877dd313db8fdaa3537c675yl * automatically, since it's impossible that GLD sends down mp(s) with
a23fd118e437af0a7877dd313db8fdaa3537c675yl * splited ether_vlan_header.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * struct ether_vlan_header *evhp;
a23fd118e437af0a7877dd313db8fdaa3537c675yl * uint16_t tci;
a23fd118e437af0a7877dd313db8fdaa3537c675yl * evhp = (struct ether_vlan_header *)mp->b_rptr;
a23fd118e437af0a7877dd313db8fdaa3537c675yl * if (evhp->ether_tpid == htons(VLAN_TPID)) {
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * tci = ntohs(evhp->ether_tci);
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * (void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
a23fd118e437af0a7877dd313db8fdaa3537c675yl * 2 * ETHERADDRL);
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * mp->b_rptr += VLAN_TAGSZ;
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * xge_hal_fifo_dtr_vlan_set(dtr, tci);
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* skip zero-length message blocks */
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (mblen == 0) {
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * Check the message length to decide to DMA or bcopy() data
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * to tx descriptor(s).
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng rc = xge_hal_fifo_dtr_buffer_append(ring->channelh,
8347601bcb0a439f6e50fc36b4039a73d08700e1yl } else if (used_copy) {
8347601bcb0a439f6e50fc36b4039a73d08700e1yl } else if (used_copy) {
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
a23fd118e437af0a7877dd313db8fdaa3537c675yl switch (ret) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* everything's fine */
a23fd118e437af0a7877dd313db8fdaa3537c675yl "%s%d: can not bind dma address",
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* drop packet, don't retry */
a23fd118e437af0a7877dd313db8fdaa3537c675yl "%s%d: can not map message buffer",
8347601bcb0a439f6e50fc36b4039a73d08700e1yl if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* setup the descriptors for this data buffer */
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* Drop packet, don't retry */
a23fd118e437af0a7877dd313db8fdaa3537c675yl "%s%d: can not pullup message buffer",
8347601bcb0a439f6e50fc36b4039a73d08700e1yl /* finalize unfinished copies */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_hal_fifo_dtr_buffer_finalize(ring->channelh, dtr,
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
8347601bcb0a439f6e50fc36b4039a73d08700e1yl * do all necessary work.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_assert((mss != 0) && (mss <= XGE_HAL_DEFAULT_MTU));
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyer mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &hckflags);
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyer /* Update per-ring tx statistics */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Could not successfully transmit but have changed the message,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * so just free it and return NULL
a23fd118e437af0a7877dd313db8fdaa3537c675yl for (i = 0; i < handle_cnt; i++) {
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * xgell_ring_macaddr_init
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_TRACE, "%s", "xgell_rx_ring_maddr_init");
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng ring->mmac.naddr = XGE_RX_MULTI_MAC_ADDRESSES_MAX;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * For the default rx ring, the first MAC address is the factory one.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * This will be set by the framework, so need to clear it for now.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Read the MAC address Configuration Memory from HAL.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * The first slot will hold a factory MAC address, contents in other
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * slots will be FF:FF:FF:FF:FF:FF.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng (void) xge_hal_device_macaddr_get(hldev, slot_start + i,
da14cebe459d3275048785f25bd869cb09b5307fEric Chengstatic int xgell_maddr_set(xgelldev_t *, int, uint8_t *);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng /* First slot is for factory MAC address */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng for (slot = 0; slot < ring->mmac.naddr; slot++) {
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if (xgell_maddr_set(lldev, slot_start + slot, (uint8_t *)mac_addr) !=
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng /* Simply enable RTS for the whole section. */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng (void) xge_hal_device_rts_section_enable(hldev, slot_start + slot);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Read back the MAC address from HAL to keep the array up to date.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if (xge_hal_device_macaddr_get(hldev, slot_start + slot,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng (void) xge_hal_device_macaddr_clear(hldev, slot_start + slot);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng slot = xge_hal_device_macaddr_find(hldev, (uint8_t *)mac_addr);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Adjust slot to the offset in the MAC array of this ring (group).
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Only can remove a pre-set MAC address for this ring (group).
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * The result will be unexpected when reach here. WARNING!
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng "%s%d: caller is trying to remove an unset MAC address",
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng status = xge_hal_device_macaddr_clear(hldev, slot_start + slot);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * TODO: Disable MAC RTS if all addresses have been cleared.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Read back the MAC address from HAL to keep the array up to date.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng (void) xge_hal_device_macaddr_get(hldev, slot_start + slot,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Temporarily calling hal function.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * With MSI-X implementation, no lock is needed, so that the interrupt
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * handling could be faster.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_hal_device_rx_channel_disable_polling(ring->channelh);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_hal_device_rx_channel_enable_polling(ring->channelh);
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_rx_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_tx_ring_start(mac_ring_driver_t rh, uint64_t useless)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Callback funtion for MAC layer to register all rings.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Xframe hardware doesn't support grouping explicitly, so the driver needs
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * to pretend having resource groups. We may also optionally group all 8 rx
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * rings into a single group for increased scalability on CMT architectures,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * or group one rx ring per group for maximum virtualization.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * TX grouping is actually done by framework, so, just register all TX
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * resources without grouping them.
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Performance vs. Virtualization
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng if (lldev->init_rx_rings == lldev->init_rx_groups)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_assert((index >= 0) && (index < lldev->init_tx_rings));
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_fill_group(void *arg, mac_ring_type_t rtype, const int index,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng infop->mgi_count = lldev->init_rx_rings / lldev->init_rx_groups;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * xgell_macaddr_set
da14cebe459d3275048785f25bd869cb09b5307fEric Chengxgell_maddr_set(xgelldev_t *lldev, int index, uint8_t *macaddr)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_TRACE, "%s", "xgell_maddr_set");
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng status = xge_hal_device_macaddr_set(hldev, index, (uchar_t *)macaddr);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_rx_dtr_term
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Function will be called by HAL to terminate all DTRs for
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Ring(s) type of channels.
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic void
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
a23fd118e437af0a7877dd313db8fdaa3537c675yl ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * To open a rx ring.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng /* Create the buffer pool first */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_ERR, "can not create buffer pool for ring: %d",
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng /* Default ring initialization */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng status = xge_hal_channel_open(lldev->devh, &attr, &rx_ring->channelh,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng " code %d", XGELL_IFNAME, lldev->instance, status);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng mutex_init(&rx_ring->ring_lock, NULL, MUTEX_DRIVER,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_TRACE, "RX ring [%d] is opened successfully",
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_hal_channel_close(rx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng /* This may not clean up all used buffers, driver will handle it */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * xgell_rx_open
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * @lldev: the link layer object
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Initialize and open all RX channels.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Initialize all rings
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Close all rx rings
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_tx_term
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Function will be called by HAL to terminate all DTRs for
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Fifo(s) type of channels.
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic void
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
a23fd118e437af0a7877dd313db8fdaa3537c675yl * for Tx we must clean up the DTR *only* if it has been
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng status = xge_hal_channel_open(lldev->devh, &attr, &tx_ring->channelh,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel got status "
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng "code %d", XGELL_IFNAME, lldev->instance, status);
7eced415e5dd557aef2d78483b5a7785f0e13670xwstatic void
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_hal_channel_close(tx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * xgell_tx_open
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @lldev: the link layer object
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Initialize and open all TX channels.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Enable rings by reserve sequence to match the h/w sequences.
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Enable rings by reserve sequence to match the h/w sequences.
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* check initial mtu before enabling the device */
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* set initial mtu before enabling the device */
8347601bcb0a439f6e50fc36b4039a73d08700e1yl /* tune jumbo/normal frame UFC counters */
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_b =
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_c =
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* now, enable the device */
a23fd118e437af0a7877dd313db8fdaa3537c675yl " returned status %d",
a23fd118e437af0a7877dd313db8fdaa3537c675yl " returned status %d",
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* time to enable interrupts */
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic void
a23fd118e437af0a7877dd313db8fdaa3537c675yl (void) xge_hal_device_status(lldev->devh, &adapter_status);
a23fd118e437af0a7877dd313db8fdaa3537c675yl "the device. adaper status 0x%"PRIx64" returned status %d",
7eced415e5dd557aef2d78483b5a7785f0e13670xw /* disable OS ISR's */
a23fd118e437af0a7877dd313db8fdaa3537c675yl "waiting for device irq to become quiescent...");
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_m_start
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @arg: pointer to device private strucutre(hldev)
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function is called by MAC Layer to enable the XFRAME
a23fd118e437af0a7877dd313db8fdaa3537c675yl * firmware to generate interrupts and also prepare the
a23fd118e437af0a7877dd313db8fdaa3537c675yl * driver to call mac_rx for delivering receive packets
a23fd118e437af0a7877dd313db8fdaa3537c675yl * to MAC Layer.
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
a23fd118e437af0a7877dd313db8fdaa3537c675yl lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_m_stop
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @arg: pointer to device private data (hldev)
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function is called by the MAC Layer to disable
a23fd118e437af0a7877dd313db8fdaa3537c675yl * the XFRAME firmware for generating any interrupts and
a23fd118e437af0a7877dd313db8fdaa3537c675yl * also stop the driver from calling mac_rx() for
a23fd118e437af0a7877dd313db8fdaa3537c675yl * delivering data packets to the MAC Layer.
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic void
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* reset device */
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_onerr_reset
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @lldev: pointer to xgelldev_t structure
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function is called by HAL Event framework to reset the HW
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function is must be called with genlock taken.
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (rc);
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* reset device */
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (rc);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_m_multicst
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @arg: pointer to device private strucutre(hldev)
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @mc_addr:
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function is called by MAC Layer to enable or
a23fd118e437af0a7877dd313db8fdaa3537c675yl * disable device-level reception of specific multicast addresses.
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* FIXME: missing HAL functionality: enable_one() */
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_m_promisc
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @arg: pointer to device private strucutre(hldev)
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function is called by MAC Layer to enable or
a23fd118e437af0a7877dd313db8fdaa3537c675yl * disable the reception of all the packets on the medium
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb * xgell_m_stat
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @arg: pointer to device private strucutre(hldev)
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb * This function is called by MAC Layer to get network statistics
a23fd118e437af0a7877dd313db8fdaa3537c675yl * from the driver.
a23fd118e437af0a7877dd313db8fdaa3537c675yl switch (stat) {
8347601bcb0a439f6e50fc36b4039a73d08700e1yl *val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb return (0);
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyer * Retrieve a value for one of the statistics for a particular rx ring
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyerxgell_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyer xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyer * Retrieve a value for one of the statistics for a particular tx ring
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyerxgell_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
0dc2366f7b9f9f36e10909b1e95edbf2a261c2acVenugopal Iyer xgell_tx_ring_t *tx_ring = (xgell_tx_ring_t *)rh;
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_device_alloc - Allocate new LL device
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_device_free
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_ioctl
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic void
a23fd118e437af0a7877dd313db8fdaa3537c675yl switch (cmd) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl /* FALLTHRU */
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (err != 0) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl "drv_priv(): rejected cmd 0x%x, err %d",
a23fd118e437af0a7877dd313db8fdaa3537c675yl switch (cmd) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl * If nd_getset() returns B_FALSE, the command was
a23fd118e437af0a7877dd313db8fdaa3537c675yl * not valid (e.g. unknown name), so we just tell the
a23fd118e437af0a7877dd313db8fdaa3537c675yl * top-level ioctl code to send a NAK (with code EINVAL).
a23fd118e437af0a7877dd313db8fdaa3537c675yl * Otherwise, nd_getset() will have built the reply to
a23fd118e437af0a7877dd313db8fdaa3537c675yl * be sent (but not actually sent it), so we tell the
a23fd118e437af0a7877dd313db8fdaa3537c675yl * caller to send the prepared reply.
a23fd118e437af0a7877dd313db8fdaa3537c675yl "nd_getset(): rejected cmd 0x%x, err %d",
ba2e4443695ee6a6f420a35cd4fc3d3346d22932sebxgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng xge_debug_ll(XGE_TRACE, "xgell_m_getcapab: %x", cap);
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb switch (cap) {
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb *hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
a23fd118e437af0a7877dd313db8fdaa3537c675yl status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
a23fd118e437af0a7877dd313db8fdaa3537c675yl status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
a23fd118e437af0a7877dd313db8fdaa3537c675yl status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylstatic unsigned long bar0_offset = 0x110; /* adapter_control */
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
a23fd118e437af0a7877dd313db8fdaa3537c675yl (void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (level < XGE_NONE || level > XGE_ERR || end == value) {
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
a23fd118e437af0a7877dd313db8fdaa3537c675yl return (0);
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_device_register
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @devh: pointer on HAL device
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @config: pointer on this network device configuration
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @ll_out: output pointer. Will be assigned to valid LL device.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function will allocate and register network device
a23fd118e437af0a7877dd313db8fdaa3537c675ylxgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * Initialize some NDD interface for internal debug.
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
a23fd118e437af0a7877dd313db8fdaa3537c675yl xgell_debug_module_mask_get, xgell_debug_module_mask_set,
a23fd118e437af0a7877dd313db8fdaa3537c675yl if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
da14cebe459d3275048785f25bd869cb09b5307fEric Cheng * MAC Registration.
3c785c4c5ac4bb72eb0cff3cda1255dfc9613aa1yl /* Always free the macp after register */
8347601bcb0a439f6e50fc36b4039a73d08700e1yl /* Calculate tx_copied_max here ??? */
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
ba2e4443695ee6a6f420a35cd4fc3d3346d22932seb xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
a23fd118e437af0a7877dd313db8fdaa3537c675yl * xgell_device_unregister
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @devh: pointer on HAL device
a23fd118e437af0a7877dd313db8fdaa3537c675yl * @lldev: pointer to valid LL device.
a23fd118e437af0a7877dd313db8fdaa3537c675yl * This function will unregister and free network device
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
a23fd118e437af0a7877dd313db8fdaa3537c675yl xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",