/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "bge_impl.h"
/*
* ========== RX side routines ==========
*/
#pragma inline(bge_refill)
/*
* Return the specified buffer (srbdp) to the ring it came from (brp).
*
* Note:
* If the driver is compiled with only one buffer ring *and* one
* return ring, then the buffers must be returned in sequence.
* In this case, we don't have to consider anything about the
* buffer at all; we can simply advance the cyclic counter. And
* we don't even need the refill mutex <rf_lock>, as the caller
* will already be holding the (one-and-only) <rx_lock>.
*
* If the driver supports multiple buffer rings, but only one
* return ring, the same still applies (to each buffer ring
* separately).
*/
static void
{
}
recv_ring_t *rrp);
#pragma inline(bge_receive_packet)
static mblk_t *
{
default:
/* error, this shouldn't happen */
goto error;
case RBD_FLAG_JUMBO_RING:
break;
#if (BGE_BUFF_RINGS_USED > 2)
case RBD_FLAG_MINI_RING:
break;
#endif /* BGE_BUFF_RINGS_USED > 2 */
case 0:
break;
}
/* error, this shouldn't happen */
goto error;
}
/* bogus, drop the packet */
goto refill;
}
/* bogus, drop the packet */
goto refill;
}
/* bogus, drop the packet */
goto refill;
}
#ifdef BGE_IPMI_ASF
/*
*/
else
#endif
/*
* H/W will not strip the VLAN tag from incoming packet
* now, as RECEIVE_MODE_KEEP_VLAN_TAG bit is set in
* RECEIVE_MAC_MODE_REG register.
*/
/* bogus, drop the packet */
goto refill;
}
#ifdef BGE_IPMI_ASF
else
#endif
/* bogus, drop the packet */
goto refill;
}
/*
* Packet looks good; get a buffer to copy it into.
* We want to leave some space at the front of the allocated
* buffer in case any upstream modules want to prepend some
* sort of header. This also has the side-effect of making
* the packet *contents* 4-byte aligned, as required by NCA!
*/
#ifdef BGE_IPMI_ASF
} else {
#endif
#ifdef BGE_IPMI_ASF
}
#endif
/* Nothing to do but drop the packet */
goto refill;
}
/*
* Sync the data and copy it to the STREAMS buffer.
*/
return (NULL);
}
#ifdef BGE_IPMI_ASF
/*
* As VLAN tag has been stripped from incoming packet in ASF
* scenario, we insert it into this packet again.
*/
} else {
#endif
#ifdef BGE_IPMI_ASF
}
} else
#endif
/*
* Special check for one specific type of data corruption;
* in a good packet, the first 8 bytes are *very* unlikely
* to be the same as the second 8 bytes ... but we let the
* packet through just in case.
*/
pflags = 0;
pflags |= HCK_FULLCKSUM;
if (pflags != 0)
/* Update per-ring rx statistics */
/*
* Replace the buffer in the ring it came from ...
*/
return (mp);
/*
* We come here if the integrity of the ring descriptors
* (rather than merely packet data) appears corrupted.
* The factotum will attempt to reset-and-recover.
*/
return (NULL);
}
/*
* Accept the packets received in the specified ring up to
* (but not including) the producer index in the status block.
*
* Returns a chain of mblks containing the received data, to be
* passed up to gld_recv() (we can't call gld_recv() from here,
* 'cos we're holding the per-ring receive lock at this point).
*
* This function must advance (rrp->rx_next) and write it back to
* the chip to indicate the packets it has accepted from the ring.
*/
#ifndef DEBUG
#pragma inline(bge_receive_ring)
#endif
static mblk_t *
{
int recv_cnt = 0;
/*
* Sync (all) the receive ring descriptors
* before accepting the packets they describe
*/
return (NULL);
}
return (NULL);
}
(recv_cnt < BGE_MAXPKT_RCVED)) {
!= NULL) {
recv_cnt++;
}
}
return (head);
}
/*
* XXX: Poll a particular ring. The implementation is incomplete.
* Once the ring interrupts are disabled, we need to do bge_recyle()
* for the ring as well and re enable the ring interrupt automatically
* if the poll doesn't find any packets in the ring. We need to
* have MSI-X interrupts support for this.
*
* The basic poll policy is that rings that are dealing with explicit
* flows (like TCP or some service) and are marked as such should
* have their own MSI-X interrupt per ring. bge_intr() should leave
* that interrupt disabled after an upcall. The ring is in poll mode.
* When a poll thread comes down and finds nothing, the MSI-X interrupt
* is automatically enabled. Squeue needs to deal with the race of
* a new interrupt firing and reaching before poll thread returns.
*/
mblk_t *
{
/*
* Sync (all) the receive ring descriptors
* before accepting the packets they describe
*/
return (NULL);
}
return (NULL);
}
/* Note: volatile */
!= NULL) {
}
}
return (head);
}
/*
* Receive all packets in all rings.
*/
#pragma no_inline(bge_receive)
void
{
/*
* Start from the first ring.
*/
/*
* For each ring, (rrp->prod_index_p) points to the
* proper index within the status block (which has
* already been sync'd by the caller)
*/
continue; /* no packets */
continue; /* already in process */
rrp->ring_gen_num);
}
}