igb_rx.c revision fa4e188e8e6bc718b1a096b3d1dc046952a69304
/*
* CDDL HEADER START
*
* Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "igb_sw.h"
/* function prototypes */
#ifndef IGB_DEBUG
#pragma inline(igb_rx_assoc_hcksum)
#endif
/*
* igb_rx_recycle - the call-back function to reclaim rx buffer
*
* This function is called when an mp is freed by the user thru
* freeb call (Only for mp constructed through desballoc call).
* It returns back the freed buffer to the free list.
*/
void
{
if (recycle_rcb->ref_cnt == 0) {
/*
* This case only happens when rx buffers are being freed
* in igb_stop() and freemsg() is called.
*/
return;
}
/*
* Using the recycled data buffer to generate a new mblk
*/
0, &recycle_rcb->free_rtn);
/*
* Put the recycled rx control block into free list
*/
/*
* The atomic operation on the number of the available rx control
* blocks in the free list is used to make the recycling mutual
* exclusive with the receiving.
*/
/*
* Considering the case that the interface is unplumbed
* and there are still some buffers held by the upper layer.
* When the buffer is returned back, we need to free it.
*/
if (ref_cnt == 0) {
}
/*
* When there is not any buffer belonging to this rx_data
* held by the upper layer, the rx_data can be freed.
*/
(rx_data->rcb_pending == 0))
}
}
/*
* igb_rx_copy - Use copy to process the received packet
*
* This function will use bcopy to process the packet
* and send the copied packet upstream
*/
static mblk_t *
{
if (igb_check_dma_handle(
return (NULL);
}
/*
* Allocate buffer to receive this packet
*/
"igb_rx_copy: allocate buffer failed");
return (NULL);
}
/*
* Copy the data received into the new cluster
*/
return (mp);
}
/*
* igb_rx_bind - Use existing DMA buffer to build mblk for receiving
*
* This function will use pre-bound DMA buffer to receive the packet
* and build mblk that will be sent upstream.
*/
static mblk_t *
{
/*
* If the free list is empty, we cannot proceed to send
* the current DMA buffer upstream. We'll have to return
* and use bcopy to process the packet.
*/
return (NULL);
/*
* If the mp of the rx control block is NULL, try to do
* desballoc again.
*/
0, ¤t_rcb->free_rtn);
/*
* If it is failed to built a mblk using the current
* DMA buffer, we have to return and use bcopy to
* process the packet.
*/
return (NULL);
}
}
/*
* Sync up the data received
*/
if (igb_check_dma_handle(
return (NULL);
}
/*
* Strip off one free rx control block from the free list
*/
/*
* Put the rx control block to the work list
*/
return (mp);
}
/*
* igb_rx_assoc_hcksum
*
* Check the rx hardware checksum status and associate the hcksum flags
*/
static void
{
uint32_t hcksum_flags = 0;
/* Ignore Checksum Indication */
if (status_error & E1000_RXD_STAT_IXSM)
return;
/*
*/
if (((status_error & E1000_RXD_STAT_TCPCS) ||
(status_error & E1000_RXD_STAT_UDPCS)) &&
/*
* Check IP Checksum
*/
if ((status_error & E1000_RXD_STAT_IPCS) &&
if (hcksum_flags != 0) {
}
}
mblk_t *
{
return (NULL);
return (mp);
}
/*
* igb_rx - Receive the data of one ring
*
* This function goes throught h/w descriptor in one specified rx ring,
* receives the data if the descriptor status shows the data is ready.
* It returns a chain of mblks containing the received data, to be
* passed up to mac_rx().
*/
mblk_t *
{
union e1000_adv_rx_desc *current_rbd;
return (NULL);
/*
* Sync the receive descriptors before
* accepting the packets
*/
if (igb_check_dma_handle(
return (NULL);
}
/*
* Get the start point of rx bd ring which should be examined
* during this cycle.
*/
pkt_num = 0;
total_bytes = 0;
while (status_error & E1000_RXD_STAT_DD) {
/*
* If hardware has found the errors, but the error
* is hardware checksum error, here does not discard the
* packet, and let upper layer compute the checksum;
* Otherwise discard the packet.
*/
if ((status_error & E1000_RXDEXT_ERR_FRAME_ERR_MASK) ||
!(status_error & E1000_RXD_STAT_EOP)) {
goto rx_discard;
}
if ((poll_bytes != IGB_NO_POLL) &&
break;
total_bytes += pkt_len;
/*
* For packets with length more than the copy threshold,
* we'll firstly try to use the existed DMA buffer to built
* a mblk and send the mblk upstream.
*
* If the first method fails, or the packet length is less
* than the copy threshold, we'll allocate a new mblk and
* copy the packet data to the mblk.
*/
/*
* Check h/w checksum offload status
*/
if (igb->rx_hcksum_enable)
}
/* Update per-ring rx statistics */
/*
* Reset rx descriptor read bits
*/
/*
* The receive function is in interrupt context, so here
* rx_limit_per_intr is used to avoid doing receiving too long
* per interrupt.
*/
break;
}
}
/*
* Update the h/w tail accordingly
*/
}
return (mblk_head);
}