rge_rxtx.c revision ba2e4443695ee6a6f420a35cd4fc3d3346d22932
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include "rge.h"
/*
* ========== RX side routines ==========
*/
#pragma inline(rge_atomic_reserve)
static uint32_t
{
/* ATOMICALLY */
do {
if (oldval <= n)
return (0); /* no resources left */
return (newval);
}
/*
* Atomically increment a counter
*/
#pragma inline(rge_atomic_renounce)
static void
{
/* ATOMICALLY */
do {
}
/*
* Callback code invoked from STREAMs when the recv data buffer is free
* for recycling.
*/
void
{
/*
* If rge_unattach() is called, this callback function will also
* be called when we try to free the mp in rge_fini_rings().
* In such situation, we needn't do below desballoc(), otherwise,
* there'll be memory leak.
*/
return;
/*
* Recycle the data buffer again
* and fill them in free ring
*/
return;
}
} else {
/*
* This situation shouldn't happen
*/
}
}
#pragma inline(rge_rx_refill)
static int
{
return (1);
} else {
/*
* This situation shouldn't happen
*/
return (0);
}
}
#pragma inline(rge_receive_packet)
static mblk_t *
{
struct ether_vlan_header *ehp;
/*
* Read receive status
*/
/*
* Handle error packet
*/
if (!(rx_status & BD_FLAG_PKT_END)) {
RGE_DEBUG(("rge_receive_packet: not a complete packat"));
return (NULL);
}
if (rx_status & RBD_FLAG_ERROR) {
if (rx_status & RBD_FLAG_CRC_ERR)
if (rx_status & RBD_FLAG_RUNT)
/*
* Set chip_error flag to reset chip:
* (suggested in Realtek programming guide.)
*/
RGE_DEBUG(("rge_receive_packet: error packet, status = %x",
rx_status));
return (NULL);
}
/*
* Handle size error packet
*/
return (NULL);
}
/*
* Allocate buffer to receive this good packet
*/
RGE_DEBUG(("rge_receive_packet: allocate buffer fail"));
return (NULL);
}
/*
* Copy the data found into the new cluster
*/
} else {
/*
* Refill the current receive bd buffer
* if fails, will just keep the mp.
*/
return (NULL);
}
/*
* VLAN packet ?
*/
if (pflags & RBD_VLAN_PKT)
if (vtag) {
/*
* As h/w strips the VLAN tag from incoming packet, we need
* insert VLAN tag into this packet before send up here.
*/
2 * ETHERADDRL);
}
/*
* Check h/w checksum offload status
*/
pflags = 0;
if (pflags != 0) {
}
return (mp);
}
/*
* Accept the packets received in rx ring.
*
* Returns a chain of mblks containing the received data, to be
* passed up to mac_rx().
* The routine returns only when a complete scan has been performed
* without finding any packets to receive.
* This function must SET the OWN bit of BD to indicate the packets
* it has accepted from the ring.
*/
#pragma inline(rge_receive_ring)
static mblk_t *
{
/*
* Sync (all) the receive ring descriptors
* before accepting the packets they describe
*/
}
/*
* Clear RBD flags
*/
}
return (head);
}
/*
* Receive all ready packets.
*/
#pragma no_inline(rge_receive)
void
{
}
/*
* ========== Send-side recycle routines ==========
*/
#pragma inline(rge_send_claim)
static uint32_t
{
/*
* We check that our invariants still hold:
* + the slot and next indexes are in range
* + the slot must not be the last one (i.e. the *next*
* index must not match the next-recycle index), 'cos
* there must always be at least one free slot in a ring
*/
return (slot);
}
/*
* We don't want to call this function every time after a successful
* h/w transmit done in ISR. Instead, we call this function in the
* rge_send() when there're few or no free tx BDs remained.
*/
#pragma inline(rge_send_recycle)
static void
{
uint32_t n;
return;
do {
/*
* Bump the watchdog counter, thus guaranteeing
* that it's nonzero (watchdog activated).
*/
return;
}
break;
}
n += RGE_SEND_SLOTS;
if (rgep->resched_needed) {
rgep->resched_needed = 0;
}
}
/*
* Send a message by copying it into a preallocated (and premapped) buffer
*/
#pragma inline(rge_send_copy)
static void
{
char *txb;
/*
* IMPORTANT:
* Up to the point where it claims a place, a send_msg()
* routine can indicate failure by returning B_FALSE. Once it's
* claimed a place, it mustn't fail.
*
* In this version, there's no setup to be done here, and there's
* nothing that can fail, so we can go straight to claiming our
* already-reserved place on the train.
*
* This is the point of no return!
*/
/*
* Copy the data into a pre-mapped buffer, which avoids the
* buffers and keeping hold of them until the DMA has completed.
*
* Because all buffers are the same size, and larger than the
* longest single valid message, we don't have to bother about
* splitting the message across multiple buffers either.
*/
}
}
/*
* We'e reached the end of the chain; and we should have
* collected no more than ETHERMAX bytes into our buffer.
*/
/*
* Update the hardware send buffer descriptor; then we're done
* and return. The message can be freed right away in rge_send(),
* as we've already copied the contents ...
*/
if (tci != 0) {
} else {
}
if (pflags & HCK_FULLCKSUM) {
switch (proto) {
case IS_UDP_PKT:
proto = IS_IPV4_PKT;
break;
case IS_TCP_PKT:
proto = IS_IPV4_PKT;
break;
default:
break;
}
}
}
static boolean_t
{
struct ether_vlan_header *ehp;
struct ether_header *ethhdr;
/*
* Determine if the packet is VLAN tagged.
*/
/*
* Need to preserve checksum flags across pullup.
*/
sizeof (struct ether_vlan_header))) {
RGE_DEBUG(("rge_send: pullup failure"));
return (B_FALSE);
}
}
need_strip = B_TRUE;
}
/*
* Try to reserve a place in the transmit ring.
*/
RGE_DEBUG(("rge_send: no free slots"));
return (B_FALSE);
}
/*
* We've reserved a place :-)
* These ASSERTions check that our invariants still hold:
* there must still be at least one free place
* there must be at least one place NOT free (ours!)
*/
/*
* Now that we know that there is space to transmit the packet
* strip any VLAN tag that is present.
*/
if (need_strip) {
2 * ETHERADDRL);
}
/*
* Check the packet protocol type for according h/w checksum offload
*/
sizeof (struct ip)) {
/*
* Is the packet an IP(v4) packet?
*/
proto = IS_IPV4_PKT;
sizeof (struct ether_header));
proto = IS_TCP_PKT;
proto = IS_UDP_PKT;
}
}
/*
* Trigger chip h/w transmit ...
*/
}
return (B_TRUE);
}
{
}
return (rslt);
}
/*
* rge_m_tx() - send a chain of packets
*/
mblk_t *
{
RGE_DEBUG(("rge_m_tx: chip not running"));
return (mp);
}
break;
}
}
return (mp);
}