rge_rxtx.c revision 343c26163d86b7f3b861ae03b20226fecee1ab99
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include "rge.h"
/*
* ========== RX side routines ==========
*/
#pragma inline(rge_atomic_reserve)
static uint32_t
{
/* ATOMICALLY */
do {
if (oldval <= n)
return (0); /* no resources left */
return (newval);
}
/*
* Atomically increment a counter
*/
#pragma inline(rge_atomic_renounce)
static void
{
/* ATOMICALLY */
do {
}
/*
* Callback code invoked from STREAMs when the recv data buffer is free
* for recycling.
*/
void
{
/*
* In rge_unattach() and rge_attach(), this callback function will
* also be called to free mp in rge_fini_rings() and rge_init_rings().
* In such situation, we shouldn't do below desballoc(), otherwise,
* there'll be memory leak.
*/
return;
/*
* Recycle the data buffer again
* and fill them in free ring
*/
return;
}
}
#pragma inline(rge_rx_refill)
static int
{
return (1);
} else {
/*
* This situation shouldn't happen
*/
return (0);
}
}
#pragma inline(rge_receive_packet)
static mblk_t *
{
struct ether_vlan_header *ehp;
/*
* Read receive status
*/
/*
* Handle error packet
*/
if (!(rx_status & BD_FLAG_PKT_END)) {
RGE_DEBUG(("rge_receive_packet: not a complete packat"));
return (NULL);
}
if (rx_status & RBD_FLAG_ERROR) {
if (rx_status & RBD_FLAG_CRC_ERR)
if (rx_status & RBD_FLAG_RUNT)
/*
* Set chip_error flag to reset chip:
* (suggested in Realtek programming guide.)
*/
RGE_DEBUG(("rge_receive_packet: error packet, status = %x",
rx_status));
return (NULL);
}
/*
* Handle size error packet
*/
packet_len -= ETHERFCSL;
if (pflags & RBD_VLAN_PKT)
minsize -= VLAN_TAGSZ;
return (NULL);
}
/*
* Allocate buffer to receive this good packet
*/
RGE_DEBUG(("rge_receive_packet: allocate buffer fail"));
return (NULL);
}
/*
* Copy the data found into the new cluster
*/
} else {
/*
* Refill the current receive bd buffer
* if fails, will just keep the mp.
*/
return (NULL);
}
/*
* VLAN packet ?
*/
if (pflags & RBD_VLAN_PKT)
if (vtag) {
/*
* As h/w strips the VLAN tag from incoming packet, we need
* insert VLAN tag into this packet before send up here.
*/
2 * ETHERADDRL);
}
/*
* Check h/w checksum offload status
*/
pflags = 0;
if (pflags != 0) {
}
return (mp);
}
/*
* Accept the packets received in rx ring.
*
* Returns a chain of mblks containing the received data, to be
* passed up to mac_rx().
* The routine returns only when a complete scan has been performed
* without finding any packets to receive.
* This function must SET the OWN bit of BD to indicate the packets
* it has accepted from the ring.
*/
#pragma inline(rge_receive_ring)
static mblk_t *
{
/*
* Sync (all) the receive ring descriptors
* before accepting the packets they describe
*/
}
/*
* Clear RBD flags
*/
}
return (head);
}
/*
* Receive all ready packets.
*/
#pragma no_inline(rge_receive)
void
{
}
/*
* ========== Send-side recycle routines ==========
*/
#pragma inline(rge_send_claim)
static uint32_t
{
/*
* We check that our invariants still hold:
* + the slot and next indexes are in range
* + the slot must not be the last one (i.e. the *next*
* index must not match the next-recycle index), 'cos
* there must always be at least one free slot in a ring
*/
return (slot);
}
/*
* We don't want to call this function every time after a successful
* h/w transmit done in ISR. Instead, we call this function in the
* rge_send() when there're few or no free tx BDs remained.
*/
#pragma inline(rge_send_recycle)
static void
{
uint32_t n;
goto resched;
do {
/*
* Recyled nothing: bump the watchdog counter,
* thus guaranteeing that it's nonzero
* (watchdog activated).
*/
return;
}
break;
}
/*
* Recyled something :-)
*/
n += RGE_SEND_SLOTS;
if (rgep->resched_needed &&
}
}
/*
* Send a message by copying it into a preallocated (and premapped) buffer
*/
#pragma inline(rge_send_copy)
static void
{
char *txb;
struct ether_header *ethhdr;
/*
* IMPORTANT:
* Up to the point where it claims a place, a send_msg()
* routine can indicate failure by returning B_FALSE. Once it's
* claimed a place, it mustn't fail.
*
* In this version, there's no setup to be done here, and there's
* nothing that can fail, so we can go straight to claiming our
* already-reserved place on the train.
*
* This is the point of no return!
*/
/*
* Copy the data into a pre-mapped buffer, which avoids the
* buffers and keeping hold of them until the DMA has completed.
*
* Because all buffers are the same size, and larger than the
* longest single valid message, we don't have to bother about
* splitting the message across multiple buffers either.
*/
totlen = 0;
if (tci != 0) {
/*
* Do not copy the vlan tag
*/
}
}
}
}
else
/*
* We'e reached the end of the chain; and we should have
* collected no more than ETHERMAX bytes into our buffer.
*/
/*
* Update the hardware send buffer descriptor flags
*/
if (tci != 0) {
} else {
}
/*
* h/w checksum offload flags
*/
if (pflags & HCK_FULLCKSUM) {
sizeof (struct ip));
/*
* Is the packet an IP(v4) packet?
*/
sizeof (struct ether_header));
}
}
if (pflags & HCK_IPV4_HDRCKSUM)
/*
* We're done.
* The message can be freed right away, as we've already
* copied the contents ...
*/
}
static boolean_t
{
struct ether_vlan_header *ehp;
/*
* Try to reserve a place in the transmit ring.
*/
RGE_DEBUG(("rge_send: no free slots"));
return (B_FALSE);
}
/*
* Determine if the packet is VLAN tagged.
*/
tci = 0;
/*
* We've reserved a place :-)
* These ASSERTions check that our invariants still hold:
* there must still be at least one free place
* there must be at least one place NOT free (ours!)
*/
/*
* Trigger chip h/w transmit ...
*/
/*
* It's observed that in current Realtek PCI-E chips, tx
* request of the second fragment for upper layer packets
* will be ignored if the hardware transmission is in
* progress and will not be processed when the tx engine
* is idle. So one solution is to re-issue the requests
* if the hardware and the software tx packets statistics
* are inconsistent.
*/
else
break;
}
}
}
return (B_TRUE);
}
{
return (DDI_INTR_CLAIMED);
}
/*
* rge_m_tx() - send a chain of packets
*/
mblk_t *
{
RGE_DEBUG(("rge_m_tx: tx doesn't work"));
return (mp);
}
break;
}
}
return (mp);
}