mac.c revision ee94b1c37a34b758315666dcd0bc7c46d1aea15c
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* MAC Services Module
*
* The GLDv3 framework locking - The MAC layer
* --------------------------------------------
*
* The MAC layer is central to the GLD framework and can provide the locking
* framework needed for itself and for the use of MAC clients. MAC end points
* are fairly disjoint and don't share a lot of state. So a coarse grained
* type of control operations on a per mac end point while allowing data threads
* concurrently.
*
* Control operations (set) that modify a mac end point are always serialized on
* a per mac end point basis, We have at most 1 such thread per mac end point
* at a time.
*
* All other operations that are not serialized are essentially multi-threaded.
* For example a control operation (get) like getting statistics which may not
* care about reading values atomically or data threads sending or receiving
* data. Mostly these type of operations don't modify the control state. Any
* state these operations care about are protected using traditional locks.
*
* The perimeter only serializes serial operations. It does not imply there
* aren't any other concurrent operations. However a serialized operation may
* sometimes need to make sure it is the only thread. In this case it needs
* to use reference counting mechanisms to cv_wait until any current data
* threads are done.
*
* The mac layer itself does not hold any locks across a call to another layer.
* The perimeter is however held across a down call to the driver to make the
* whole control operation atomic with respect to other control operations.
* Also the data path and get type control operations may proceed concurrently.
* These operations synchronize with the single serial operation on a given mac
* end point using regular locks. The perimeter ensures that conflicting
* operations like say a mac_multicast_add and a mac_multicast_remove on the
* same mac end point don't interfere with each other and also ensures that the
* changes in the mac layer and the call to the underlying driver to say add a
* multicast address are done atomically without interference from a thread
* trying to delete the same address.
*
* For example, consider
* mac_multicst_add()
* {
* mac_perimeter_enter(); serialize all control operations
*
* grab list lock protect against access by data threads
* add to list
* drop list lock
*
* call driver's mi_multicst
*
* mac_perimeter_exit();
* }
*
* To lessen the number of serialization locks and simplify the lock hierarchy,
* we serialize all the control operations on a per mac end point by using a
* single serialization lock called the perimeter. We allow recursive entry into
* the perimeter to facilitate use of this mechanism by both the mac client and
* the MAC layer itself.
*
* MAC client means an entity that does an operation on a mac handle
* obtained from a mac_open/mac_client_open. Similarly MAC driver means
* an entity that does an operation on a mac handle obtained from a
* mac_register. An entity could be both client and driver but on different
* handles eg. aggr. and should only make the corresponding mac interface calls
* i.e. mac driver interface or mac client interface as appropriate for that
* mac handle.
*
* General rules.
* -------------
*
* R1. The lock order of upcall threads is natually opposite to downcall
* threads. Hence upcalls must not hold any locks across layers for fear of
* recursive lock enter and lock order violation. This applies to all layers.
*
* R2. The perimeter is just another lock. Since it is held in the down
* direction, acquiring the perimeter in an upcall is prohibited as it would
* cause a deadlock. This applies to all layers.
*
* Note that upcalls that need to grab the mac perimeter (for example
* mac_notify upcalls) can still achieve that by posting the request to a
* thread, which can then grab all the required perimeters and locks in the
* right global order. Note that in the above example the mac layer iself
* won't grab the mac perimeter in the mac_notify upcall, instead the upcall
* to the client must do that. Please see the aggr code for an example.
*
* MAC client rules
* ----------------
*
* R3. A MAC client may use the MAC provided perimeter facility to serialize
* control operations on a per mac end point. It does this by by acquring
* and holding the perimeter across a sequence of calls to the mac layer.
* This ensures atomicity across the entire block of mac calls. In this
* model the MAC client must not hold any client locks across the calls to
* the mac layer. This model is the preferred solution.
*
* R4. However if a MAC client has a lot of global state across all mac end
* points the per mac end point serialization may not be sufficient. In this
* case the client may choose to use global locks or use its own serialization.
* To avoid deadlocks, these client layer locks held across the mac calls
* in the control path must never be acquired by the data path for the reason
* mentioned below.
*
* (Assume that a control operation that holds a client lock blocks in the
* mac layer waiting for upcall reference counts to drop to zero. If an upcall
* data thread that holds this reference count, tries to acquire the same
* client lock subsequently it will deadlock).
*
* A MAC client may follow either the R3 model or the R4 model, but can't
* mix both. In the former, the hierarchy is Perim -> client locks, but in
* the latter it is client locks -> Perim.
*
* R5. MAC clients must make MAC calls (excluding data calls) in a cv_wait'able
* context since they may block while trying to acquire the perimeter.
* In addition some calls may block waiting for upcall refcnts to come down to
* zero.
*
* R6. MAC clients must make sure that they are single threaded and all threads
* from the top (in particular data threads) have finished before calling
* mac_client_close. The MAC framework does not track the number of client
* threads using the mac client handle. Also mac clients must make sure
* they have undone all the control operations before calling mac_client_close.
* For example mac_unicast_remove/mac_multicast_remove to undo the corresponding
*
* MAC framework rules
* -------------------
*
* R7. The mac layer itself must not hold any mac layer locks (except the mac
* perimeter) across a call to any other layer from the mac layer. The call to
* any other layer could be via mi_* entry points, classifier entry points into
* the driver or via upcall pointers into layers above. The mac perimeter may
* be acquired or held only in the down direction, for e.g. when calling into
* a mi_* driver enty point to provide atomicity of the operation.
*
* R8. Since it is not guaranteed (see R14) that drivers won't hold locks across
* mac driver interfaces, the MAC layer must provide a cut out for control
* interfaces like upcall notifications and start them in a separate thread.
*
* R9. Note that locking order also implies a plumbing order. For example
* VNICs are allowed to be created over aggrs, but not vice-versa. An attempt
* to plumb in any other order must be failed at mac_open time, otherwise it
* could lead to deadlocks due to inverse locking order.
*
* R10. MAC driver interfaces must not block since the driver could call them
* in interrupt context.
*
* R11. Walkers must preferably not hold any locks while calling walker
* callbacks. Instead these can operate on reference counts. In simple
* callbacks it may be ok to hold a lock and call the callbacks, but this is
* harder to maintain in the general case of arbitrary callbacks.
*
* R12. The MAC layer must protect upcall notification callbacks using reference
* counts rather than holding locks across the callbacks.
*
* R13. Given the variety of drivers, it is preferable if the MAC layer can make
* sure that any pointers (such as mac ring pointers) it passes to the driver
* remain valid until mac unregister time. Currently the mac layer achieves
* this by using generation numbers for rings and freeing the mac rings only
* at unregister time. The MAC layer must provide a layer of indirection and
* must not expose underlying driver rings or driver data structures/pointers
* directly to MAC clients.
*
* MAC driver rules
* ----------------
*
* R14. It would be preferable if MAC drivers don't hold any locks across any
* mac call. However at a minimum they must not hold any locks across data
* upcalls. They must also make sure that all references to mac data structures
* are cleaned up and that it is single threaded at mac_unregister time.
*
* R15. MAC driver interfaces don't block and so the action may be done
* asynchronously in a separate thread as for example handling notifications.
* The driver must not assume that the action is complete when the call
* returns.
*
* R16. Drivers must maintain a generation number per Rx ring, and pass it
* back to mac_rx_ring(); They are expected to increment the generation
* number whenever the ring's stop routine is invoked.
* See comments in mac_rx_ring();
*
* R17 Similarly mi_stop is another synchronization point and the driver must
* ensure that all upcalls are done and there won't be any future upcall
* before returning from mi_stop.
*
* the mi_* entry points are single threaded on a per mac end point.
*
* Lock and Perimeter hierarchy scenarios
* ---------------------------------------
*
* i_mac_impl_lock -> mi_rw_lock -> srs_lock -> s_ring_lock[i_mac_tx_srs_notify]
*
* ft_lock -> fe_lock [mac_flow_lookup]
*
* mi_rw_lock -> fe_lock [mac_bcast_send]
*
* srs_lock -> mac_bw_lock [mac_rx_srs_drain_bw]
*
* cpu_lock -> mac_srs_g_lock -> srs_lock -> s_ring_lock [mac_walk_srs_and_bind]
*
* i_dls_devnet_lock -> mac layer locks [dls_devnet_rename]
*
* Perimeters are ordered P1 -> P2 -> P3 from top to bottom in order of mac
* client to driver. In the case of clients that explictly use the mac provided
* perimeter mechanism for its serialization, the hierarchy is
* Perimeter -> mac layer locks, since the client never holds any locks across
* the mac calls. In the case of clients that use its own locks the hierarchy
* is Client locks -> Mac Perim -> Mac layer locks. The client never explicitly
* calls mac_perim_enter/exit in this case.
*
* Subflow creation rules
* ---------------------------
* o In case of a user specified cpulist present on underlying link and flows,
* the flows cpulist must be a subset of the underlying link.
* o In case of a user specified fanout mode present on link and flow, the
* subflow fanout count has to be less than or equal to that of the
* underlying link. The cpu-bindings for the subflows will be a subset of
* the underlying link.
* o In case if no cpulist specified on both underlying link and flow, the
* underlying link relies on a MAC tunable to provide out of box fanout.
* The subflow will have no cpulist (the subflow will be unbound)
* o In case if no cpulist is specified on the underlying link, a subflow can
* carry either a user-specified cpulist or fanout count. The cpu-bindings
* for the subflow will not adhere to restriction that they need to be subset
* of the underlying link.
* o In case where the underlying link is carrying either a user specified
* cpulist or fanout mode and for a unspecified subflow, the subflow will be
* created unbound.
* o While creating unbound subflows, bandwidth mode changes attempt to
* figure a right fanout count. In such cases the fanout count will override
* the unbound cpu-binding behavior.
* o In addition to this, while cycling between flow and link properties, we
* impose a restriction that if a link property has a subflow with
* user-specified attributes, we will not allow changing the link property.
* The administrator needs to reset all the user specified properties for the
* subflows before attempting a link property change.
* Some of the above rules can be overridden by specifying additional command
* line options while creating or modifying link or subflow properties.
*/
#include <sys/id_space.h>
#include <sys/mac_provider.h>
#include <sys/mac_client_impl.h>
#include <sys/mac_soft_ring.h>
#include <sys/mac_impl.h>
#include <sys/mac_flow.h>
#include <sys/ddi_intr_impl.h>
#include <sys/vnic_impl.h>
#include <sys/exacct_impl.h>
#include <sys/ethernet.h>
static kmem_cache_t *mac_ring_cache;
static id_space_t *minor_ids;
static uint32_t minor_count;
/*
* Logging stuff. Perhaps mac_logging_interval could be broken into
* mac_flow_log_interval and mac_link_log_interval if we want to be
* able to schedule them differently.
*/
/* for debugging, see MAC_DBG_PRT() in mac_impl.h */
int mac_dbg = 0;
#define MACTYPE_KMODDIR "mac"
#define MACTYPE_HASHSZ 67
static mod_hash_t *i_mactype_hash;
/*
* i_mactype_lock synchronizes threads that obtain references to mactype_t
* structures through i_mactype_getplugin().
*/
static kmutex_t i_mactype_lock;
/*
* mac_tx_percpu_cnt
*
* Number of per cpu locks per mac_client_impl_t. Used by the transmit side
* in mac_tx to reduce lock contention. This is sized at boot time in mac_init.
* Per cpu locks may be disabled by setting mac_tx_percpu_cnt_max to 1.
*/
int mac_tx_percpu_cnt;
int mac_tx_percpu_cnt_max = 128;
/*
* Call back functions for the bridge module. These are guaranteed to be valid
* when holding a reference on a link or when holding mip->mi_bridge_lock and
* mi_bridge_link is non-NULL.
*/
static int i_mac_constructor(void *, void *, int);
static void i_mac_destructor(void *, void *);
static int i_mac_ring_ctor(void *, void *, int);
static void i_mac_ring_dtor(void *, void *);
void mac_tx_client_flush(mac_client_impl_t *);
void mac_tx_client_block(mac_client_impl_t *);
static int mac_start_group_and_rings(mac_group_t *);
static void mac_stop_group_and_rings(mac_group_t *);
/*
* Module initialization functions.
*/
void
mac_init(void)
{
/* Upper bound is mac_tx_percpu_cnt_max */
if (mac_tx_percpu_cnt < 1) {
/* Someone set max_tx_percpu_cnt_max to 0 or less */
mac_tx_percpu_cnt = 1;
}
/*
* Make it of the form 2**N - 1 in the range
* [0 .. mac_tx_percpu_cnt_max - 1]
*/
i_mac_impl_count = 0;
/*
* Allocate an id space to manage minor numbers. The range of the
* space will be from MAC_MAX_MINOR+1 to MAC_PRIVATE_MINOR-1. This
* leaves half of the 32-bit minors available for driver private use.
*/
minor_count = 0;
/* Let's default to 20 seconds */
mac_logging_interval = 20;
mac_logging_timer = 0;
}
int
mac_fini(void)
{
if (i_mac_impl_count > 0 || minor_count > 0)
return (EBUSY);
return (0);
}
/*
* Initialize a GLDv3 driver's device ops. A driver that manages its own ops
* (e.g. softmac) may pass in a NULL ops argument.
*/
void
{
/*
* By returning on error below, we are not letting the driver continue
* in an undefined context. The mac_register() function will faill if
* DN_GLDV3_DRIVER isn't set.
*/
if (major == DDI_MAJOR_T_NONE)
return;
}
void
{
}
/*ARGSUSED*/
static int
{
return (0);
}
/*ARGSUSED*/
static void
{
}
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static void
{
}
/*
* Common functions to do mac callback addition and deletion. Currently this is
* used by promisc callbacks and notify callbacks. List addition and deletion
* need to take care of list walkers. List walkers in general, can't hold list
* locks and make upcall callbacks due to potential lock order and recursive
* reentry issues. Instead list walkers increment the list walker count to mark
* the presence of a walker thread. Addition can be carefully done to ensure
* that the list walker always sees either the old list or the new list.
* However the deletion can't be done while the walker is active, instead the
* deleting thread simply marks the entry as logically deleted. The last walker
* physically deletes and frees up the logically deleted entries when the walk
* is complete.
*/
void
{
mac_cb_t *p;
/* Verify it is not already in the list */
if (p == mcb_elem)
break;
}
/*
* Add it to the head of the callback list. The membar ensures that
* the following list pointer manipulations reach global visibility
* in exactly the program order below.
*/
}
/*
* Mark the entry as logically deleted. If there aren't any walkers unlink
* from the list. In either case return the corresponding status.
*/
{
mac_cb_t *p;
/*
* Search the callback list for the entry to be removed
*/
if (p == mcb_elem)
break;
}
/*
* If there are walkers just mark it as deleted and the last walker
* will remove from the list and free it.
*/
if (mcbi->mcbi_walker_cnt != 0) {
p->mcb_flags |= MCB_CONDEMNED;
mcbi->mcbi_del_cnt++;
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Wait for all pending callback removals to be completed
*/
void
{
while (mcbi->mcbi_del_cnt != 0) {
}
}
/*
* The last mac callback walker does the cleanup. Walk the list and unlik
* all the logically deleted entries and construct a temporary list of
* removed entries. Return the list of removed entries to the caller.
*/
mac_cb_t *
{
mac_cb_t *p;
int cnt = 0;
p = *pp;
rmlist = p;
cnt++;
continue;
}
}
mcbi->mcbi_del_cnt = 0;
return (rmlist);
}
{
/* Verify it is not already in the list */
return (B_TRUE);
}
return (B_FALSE);
}
{
return (found);
}
/* Free the list of removed callbacks */
void
{
}
}
/*
* The promisc callbacks are in 2 lists, one off the 'mip' and another off the
* 'mcip' threaded by mpi_mi_link and mpi_mci_link respectively. However there
* is only a single shared total walker count, and an entry can't be physically
* unlinked if a walker is active on either list. The last walker does this
* cleanup of logically deleted entries.
*/
void
{
/*
* Construct a temporary list of deleted callbacks by walking the
* the mi_promisc_list. Then for each entry in the temporary list,
* remove it from the mci_promisc_list and free the entry.
*/
&mip->mi_promisc_list);
}
}
void
{
/*
* Signal the notify thread even after mi_ref has become zero and
* mi_disabled is set. The synchronization with the notify thread
* happens in mac_unregister and that implies the driver must make
* sure it is single-threaded (with respect to mac calls) and that
* all pending mac calls have returned before it calls mac_unregister
*/
goto exit;
/*
* Guard against incorrect notifications. (Running a newer
* mac client against an older implementation?)
*/
goto exit;
exit:
}
/*
* Mac serialization primitives. Please see the block comment at the
* top of the file.
*/
void
{
/*
* This is a VNIC. Return the lower mac since that is what
* we want to serialize on.
*/
}
mip->mi_perim_ocnt++;
return;
}
mip->mi_perim_ocnt++;
#ifdef DEBUG
#endif
}
int
{
/*
* The vnic is a special case, since the serialization is done based
* on the lower mac. If the lower mac is busy, it does not imply the
* vnic can't be unregistered. But in the case of other drivers,
* a busy perimeter or open mac handles implies that the mac is busy
* and can't be unregistered.
*/
return (0);
}
return (EBUSY);
}
mip->mi_perim_ocnt++;
return (0);
}
void
{
/*
* This is a VNIC. Return the lower mac since that is what
* we want to serialize on.
*/
}
if (--mip->mi_perim_ocnt == 0) {
}
}
/*
* Returns whether the current thread holds the mac perimeter. Used in making
* assertions.
*/
{
/*
* This is a VNIC. Return the lower mac since that is what
* we want to serialize on.
*/
}
}
/*
* mac client interfaces to enter the mac perimeter of a mac end point, given
* its mac handle, or macname or linkid.
*/
void
{
/*
* The mac_perim_handle_t returned encodes the 'mip' and whether a
* mac_open has been done internally while entering the perimeter.
* This information is used in mac_perim_exit
*/
}
int
{
int err;
return (err);
return (0);
}
int
{
int err;
return (err);
return (0);
}
void
{
if (need_close)
}
int
{
int err;
/*
* Check the device name length to make sure it won't overflow our
* buffer.
*/
return (EINVAL);
/*
* Look up its entry in the global hash table.
*/
(mod_hash_val_t *)&mip);
if (err != 0) {
return (ENOENT);
}
return (ENOENT);
}
return (EBUSY);
}
return (0);
}
void
{
}
}
/*
* Private GLDv3 function to start a MAC instance.
*/
int
{
int err = 0;
/*
* Check whether the device is already started.
*/
/*
* Start the device.
*/
if (err != 0) {
return (err);
}
/*
* Start the default tx ring.
*/
if (err != 0) {
return (err);
}
}
/*
* Start the default ring, since it will be needed
* to receive broadcast and multicast traffic for
* both primary and non-primary MAC clients.
*/
if (err != 0) {
}
return (err);
}
}
}
return (err);
}
/*
* Private GLDv3 function to stop a MAC instance.
*/
void
{
/*
* Check whether the device is still needed.
*/
/*
* There should be no more active clients since the
* MAC is being stopped. Stop the default RX group
* and transition it back to registered state.
*/
/*
* When clients are torn down, the groups
* are release via mac_release_rx_group which
* knows the the default group is always in
* started mode since broadcast uses it. So
* we can assert that their are no clients
* (since mac_bcast_add doesn't register itself
* as a client) and group is in SHARED state.
*/
mip->mi_nactiveclients == 0);
}
}
/*
* Stop the device.
*/
}
}
int
{
int err = 0;
if (on) {
/*
* Enable promiscuous mode on the device if not yet enabled.
*/
if (mip->mi_devpromisc++ == 0) {
if (err != 0) {
mip->mi_devpromisc--;
return (err);
}
}
} else {
if (mip->mi_devpromisc == 0)
return (EPROTO);
/*
* Disable promiscuous mode on the device if this is the last
* enabling.
*/
if (--mip->mi_devpromisc == 0) {
if (err != 0) {
mip->mi_devpromisc++;
return (err);
}
}
}
return (0);
}
/*
* The promiscuity state can change any time. If the caller needs to take
* actions that are atomic with the promiscuity state, then the caller needs
* to bracket the entire sequence with mac_perim_enter/exit
*/
{
/*
* Return the current promiscuity.
*/
return (mip->mi_devpromisc != 0);
}
/*
* Invoked at MAC instance attach time to initialize the list
* of factory MAC addresses supported by a MAC instance. This function
* builds a local cache in the mac_impl_t for the MAC addresses
* supported by the underlying hardware. The MAC clients themselves
* use the mac_addr_factory*() functions to query and reserve
* factory MAC addresses.
*/
void
{
int i;
/*
* First round to see how many factory MAC addresses are available.
*/
/*
* The MAC instance doesn't support multiple factory
* MAC addresses, we're done here.
*/
return;
}
/*
* Allocate the space and get all the factory addresses.
*/
sizeof (mac_factory_addr_t), KM_SLEEP);
}
}
void
{
return;
}
sizeof (mac_factory_addr_t));
mip->mi_factory_addr_num = 0;
}
/*
* Reserve a factory MAC address. If *slot is set to -1, the function
* attempts to reserve any of the available factory MAC addresses and
* returns the reserved slot id. If no slots are available, the function
* returns ENOSPC. If *slot is not set to -1, the function reserves
* the specified slot if it is available, or returns EBUSY is the slot
* is already used. Returns ENOTSUP if the underlying MAC does not
* support multiple factory addresses. If the slot number is not -1 but
* is invalid, returns EINVAL.
*/
int
{
int i, ret = 0;
/*
* Protect against concurrent readers that may need a self-consistent
* view of the factory addresses
*/
if (mip->mi_factory_addr_num == 0) {
goto bail;
}
if (*slot != -1) {
/* check the specified slot */
goto bail;
}
goto bail;
}
} else {
/* pick the next available slot */
for (i = 0; i < mip->mi_factory_addr_num; i++) {
break;
}
if (i == mip->mi_factory_addr_num) {
goto bail;
}
*slot = i+1;
}
bail:
return (ret);
}
/*
* Release the specified factory MAC address slot.
*/
void
{
/*
* Protect against concurrent readers that may need a self-consistent
* view of the factory addresses
*/
}
/*
* Stores in mac_addr the value of the specified MAC address. Returns
* 0 on success, or EINVAL if the slot number is not valid for the MAC.
* The caller must provide a string of at least MAXNAMELEN bytes.
*/
void
{
/*
* Readers need to hold mi_rw_lock. Writers need to hold mac perimeter
* and mi_rw_lock
*/
}
if (in_use_arg != NULL)
*in_use_arg = in_use;
}
/*
* Returns the number of factory MAC addresses (in addition to the
* primary MAC address), 0 if the underlying MAC doesn't support
* that feature.
*/
{
return (mip->mi_factory_addr_num);
}
void
{
}
/*
* The following mac_hwrings_xxx() functions are private mac client functions
* used by the aggr driver to access and control the underlying HW Rx group
* and rings. In this case, the aggr driver has exclusive control of the
* addresses, or set up the Rx callback.
*/
/* ARGSUSED */
static void
{
void *arg1;
}
/*
* This function is called to get the list of HW rings that are reserved by
* an exclusive mac client.
*
* Return value: the number of HW rings.
*/
int
{
int cnt = 0;
switch (rtype) {
case MAC_RING_TYPE_RX: {
/*
* The mac client did not reserve any RX group, return directly.
* This is probably because the underlying MAC does not support
* any groups.
*/
return (0);
/*
* This group must be reserved by this mac client.
*/
(mch == (mac_client_handle_t)
(MAC_RX_GROUP_ONLY_CLIENT(grp))));
}
return (cnt);
}
case MAC_RING_TYPE_TX: {
return (cnt);
}
default:
return (-1);
}
}
/*
* Setup the RX callback of the mac client which exclusively controls HW ring.
*/
void
{
}
void
{
}
int
{
}
int
{
}
int
{
return (0);
}
void
{
}
mblk_t *
{
}
/*
* Send packets through the selected tx ring.
*/
mblk_t *
{
}
int
{
}
int
{
}
/*
*/
void
{
/*
* If there is no change in the group state, just return.
*/
return;
switch (state) {
case MAC_GROUP_STATE_RESERVED:
/*
* Successfully reserved the group.
*
* Given that there is an exclusive client controlling this
* group, we enable the group level polling when available,
* assigned to.
*/
break;
case MAC_GROUP_STATE_SHARED:
/*
* Set all rings of this group to software classified.
* If the group has an overriding interrupt, then re-enable it.
*/
/* The ring is not available for reservations any more */
break;
/* Also callable from mac_register, perim is not held */
break;
default:
break;
}
}
/*
* Quiesce future hardware classified packets for the specified Rx ring
*/
static void
{
}
/*
* Please see mac_tx for details about the per cpu locking scheme
*/
static void
{
int i;
for (i = 0; i <= mac_tx_percpu_cnt; i++)
}
static void
{
int i;
for (i = mac_tx_percpu_cnt; i >= 0; i--)
}
static void
{
int i;
for (i = mac_tx_percpu_cnt; i > 0; i--)
}
static int
{
int i;
int refcnt = 0;
for (i = 0; i <= mac_tx_percpu_cnt; i++)
return (refcnt);
}
/*
* Stop future Tx packets coming down from the client in preparation for
* quiescing the Tx side. This is needed for dynamic reclaim and reassignment
* of rings between clients
*/
void
{
while (mac_tx_sum_refcnt(mcip) != 0) {
}
}
void
{
/*
* We may fail to disable flow control for the last MAC_NOTE_TX
* notification because the MAC client is quiesced. Send the
* notification again.
*/
}
/*
* Wait for an SRS to quiesce. The SRS worker will signal us when the
* quiesce is done.
*/
static void
{
}
/*
* Quiescing an Rx SRS is achieved by the following sequence. The protocol
* works bottom up by cutting off packet flow from the bottommost point in the
* mac, then the SRS, and then the soft rings. There are 2 use cases of this
* mechanism. One is a temporary quiesce of the SRS, such as say while changing
* the Rx callbacks. Another use case is Rx SRS teardown. In the former case
* for the SRS and MR flags. In the former case the threads pause waiting for
* a restart, while in the latter case the threads exit. The Tx SRS teardown
* is also mostly similar to the above.
*
* 1. Stop future hardware classified packets at the lowest level in the mac.
* Remove any hardware classification rule (CONDEMNED case) and mark the
* rings as CONDEMNED or QUIESCE as appropriate. This prevents the mr_refcnt
* from increasing. Upcalls from the driver that come through hardware
* classification will be dropped in mac_rx from now on. Then we wait for
* the mr_refcnt to drop to zero. When the mr_refcnt reaches zero we are
* sure there aren't any upcall threads from the driver through hardware
* classification. In the case of SRS teardown we also remove the
* classification rule in the driver.
*
* 2. Stop future software classified packets by marking the flow entry with
* FE_QUIESCE or FE_CONDEMNED as appropriate which prevents the refcnt from
* increasing. We also remove the flow entry from the table in the latter
* case. Then wait for the fe_refcnt to reach an appropriate quiescent value
* that indicates there aren't any active threads using that flow entry.
*
* 3. Quiesce the SRS and softrings by signaling the SRS. The SRS poll thread,
* SRS worker thread, and the soft ring threads are quiesced in sequence
* with the SRS worker thread serving as a master controller. This
* mechansim is explained in mac_srs_worker_quiesce().
*
* The restart mechanism to reactivate the SRS and softrings is explained
* in mac_srs_worker_restart(). Here we just signal the SRS worker to start the
* restart sequence.
*/
void
{
if (srs_quiesce_flag == SRS_CONDEMNED) {
} else {
}
} else {
/*
* SRS is driven by software classification. In case
* of CONDEMNED, the top level teardown functions will
* deal with flow removal.
*/
if (srs_quiesce_flag != SRS_CONDEMNED) {
}
}
/*
* Signal the SRS to quiesce itself, and then cv_wait for the
* SRS quiesce to complete. The SRS worker thread will wake us
* up when the quiesce is complete
*/
}
/*
* Remove an SRS.
*/
void
{
int i;
/*
* Locate and remove our entry in the fe_rx_srs[] array, and
* adjust the fe_rx_srs array entries and array count by
* moving the last entry into the vacated spot.
*/
for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
break;
}
}
flent->fe_rx_srs_cnt--;
}
static void
{
}
void
{
mac_ring_t *mr;
/*
* This handles a change in the number of SRSs between the quiesce and
* and restart operation of a flow.
*/
if (!SRS_QUIESCED(srs))
return;
/*
* Signal the SRS to restart itself. Wait for the restart to complete
* Note that we only restart the SRS if it is not marked as
* permanently quiesced.
*/
if (!SRS_QUIESCED_PERMANENT(srs)) {
}
/* Finally clear the flags to let the packets in */
/* In case the ring was stopped, safely restart it */
(void) mac_start_ring(mr);
} else {
}
}
/*
* Temporary quiesce of a flow and associated Rx SRS.
* Please see block comment above mac_rx_classify_flow_rem.
*/
/* ARGSUSED */
int
{
int i;
for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
}
return (0);
}
/*
* Restart a flow and associated Rx SRS that has been quiesced temporarily
* Please see block comment above mac_rx_classify_flow_rem
*/
/* ARGSUSED */
int
{
int i;
for (i = 0; i < flent->fe_rx_srs_cnt; i++)
return (0);
}
void
{
int i;
return;
for (i = 0; i < flent->fe_rx_srs_cnt; i++) {
if (on)
else
}
}
void
{
if (MCIP_DATAPATH_SETUP(mcip)) {
NULL);
}
}
void
{
if (MCIP_DATAPATH_SETUP(mcip)) {
}
}
/*
* This function only quiesces the Tx SRS and softring worker threads. Callers
* need to make sure that there aren't any mac client threads doing current or
* future transmits in the mac before calling this function.
*/
void
{
/*
* Signal the SRS to quiesce itself, and then cv_wait for the
* SRS quiesce to complete. The SRS worker thread will wake us
* up when the quiesce is complete
*/
}
void
{
/*
* Resizing the fanout could result in creation of new SRSs.
* They may not necessarily be in the quiesced state in which
* case it need be restarted
*/
if (!SRS_QUIESCED(srs))
return;
}
/*
* Temporary quiesce of a flow and associated Rx SRS.
* Please see block comment above mac_rx_srs_quiesce
*/
/* ARGSUSED */
int
{
/*
* The fe_tx_srs is null for a subflow on an interface that is
* not plumbed
*/
return (0);
}
/* ARGSUSED */
int
{
/*
* The fe_tx_srs is null for a subflow on an interface that is
* not plumbed
*/
return (0);
}
void
{
}
}
void
{
}
}
void
{
}
void
{
}
void
{
}
/*
* Allocate a minor number.
*/
{
/*
* Grab a value from the arena.
*/
if (sleep)
else
if (minor == 0) {
return (0);
}
return (minor);
}
/*
* Release a previously allocated minor number.
*/
void
{
/*
* Return the value to the arena.
*/
}
{
}
/*
* Prevent any new opens of this mac in preparation for unregister
*/
int
{
/* Already disabled, return success */
return (0);
}
/*
* See if there are any other references to this mac_t (e.g., VLAN's).
* If so return failure. If all the other checks below pass, then
* set mi_disabled atomically under the i_mac_impl_lock to prevent
* any new VLAN's from being created or new mac client opens of this
* mac end point.
*/
return (EBUSY);
}
/*
* mac clients must delete all multicast groups they join before
* closing. bcast groups are reference counted, the last client
* to delete the group will wait till the group is physically
* deleted. Since all clients have closed this mac end point
* mi_bcast_ngrps must be zero at this point
*/
/*
* Don't let go of this if it has some flows.
* All other code guarantees no flows are added to a disabled
* mac, therefore it is sufficient to check for the flow table
* only here.
*/
return (ENOTEMPTY);
}
return (0);
}
int
{
int err;
return (err);
return (err);
}
int
{
int err;
/*
* Clean up notification thread and wait for it to exit.
*/
if (err == 0)
return (err);
}
/*
* Called when the MAC instance has a non empty flow table, to de-multiplex
* incoming packets to the right flow.
* The MAC's rw lock is assumed held as a READER.
*/
/* ARGSUSED */
static mblk_t *
{
int err;
/*
* If the mac is a port of an aggregation, pass FLOW_IGNORE_VLAN
* to mac_flow_lookup() so that the VLAN packets can be successfully
* passed to the non-VLAN aggregation flows.
*
* Note that there is possibly a race between this and
* mac_unicast_remove/add() and VLAN packets could be incorrectly
* classified to non-VLAN flows of non-aggregation mac clients. These
* VLAN packets will be then filtered out by the mac module.
*/
if (err != 0) {
/* no registered receive function */
return (mp);
} else {
/*
* This flent might just be an additional one on the MAC client,
* i.e. for classification purposes (different fdesc), however
* the resources, SRS et. al., are in the mci_flent, so if
* this isn't the mci_flent, we need to get it.
*/
if (err != 0)
return (mp);
}
B_FALSE);
}
return (NULL);
}
mblk_t *
{
/*
* We walk the chain and attempt to classify each packet.
* The packets that couldn't be classified will be returned
* back to the caller.
*/
}
}
return (list);
}
static int
{
return (0);
}
void
{
/*
* After grabbing the mi_rw_lock, the list of clients can't change.
* If there are any clients mi_disabled must be B_FALSE and can't
* get set since there are clients. If there aren't any clients we
* don't do anything. In any case the mip has to be valid. The driver
* must make sure that it goes single threaded (with respect to mac
* calls) and wait for all pending mac calls to finish before calling
* mac_unregister.
*/
return;
}
/*
* Get MAC tx srs from walking mac_client_handle list.
*/
}
}
/* ARGSUSED */
void
{
/*
* If no specific refresh function was given then default to the
* driver's m_multicst entry point.
*/
}
}
void
{
/*
* If no specific refresh function was given then default to the
* driver's m_promisc entry point.
*/
}
/*
* Call the refresh function with the current promiscuity.
*/
}
/*
* The mac client requests that the mac not to change its margin size to
* be less than the specified value. If "current" is B_TRUE, then the client
* requests the mac not to change its margin size to be smaller than the
* current size. Further, return the current margin size value in this case.
*
* We keep every requested size in an ordered list from largest to smallest.
*/
int
{
mac_margin_req_t **pp, *p;
int err = 0;
if (current)
/*
* If the current margin value cannot satisfy the margin requested,
* return ENOTSUP directly.
*/
goto done;
}
/*
* Check whether the given margin is already in the list. If so,
* bump the reference count.
*/
if (p->mmr_margin == *marginp) {
/*
* The margin requested is already in the list,
* so just bump the reference count.
*/
p->mmr_ref++;
goto done;
}
if (p->mmr_margin < *marginp)
break;
}
p->mmr_margin = *marginp;
p->mmr_ref++;
*pp = p;
done:
return (err);
}
/*
* The mac client requests to cancel its previous mac_margin_add() request.
* We remove the requested margin size from the list.
*/
int
{
mac_margin_req_t **pp, *p;
int err = 0;
/*
* Find the entry in the list for the given margin.
*/
if (p->mmr_margin == margin) {
if (--p->mmr_ref == 0)
break;
/*
* There is still a reference to this address so
* there's nothing more to do.
*/
goto done;
}
}
/*
* We did not find an entry for the given margin.
*/
if (p == NULL) {
goto done;
}
/*
* Remove it from the list.
*/
kmem_free(p, sizeof (mac_margin_req_t));
done:
return (err);
}
{
uint32_t margin_needed = 0;
if (margin_needed <= margin)
if (margin_needed <= margin)
return (margin_needed <= margin);
}
/*
* MAC Type Plugin functions.
*/
mactype_getplugin(const char *pname)
{
(mod_hash_val_t *)&mtype) != 0) {
if (!tried_modload) {
/*
* If the plugin has not yet been loaded, then
* attempt to load it now. If modload() succeeds,
* the plugin should have registered using
* mactype_register(), in which case we can go back
* and attempt to find it again.
*/
goto find_registered_mactype;
}
}
} else {
/*
* Note that there's no danger that the plugin we've loaded
* could be unloaded between the modload() step and the
* reference count bump here, as we're holding
* i_mactype_lock, which mactype_unregister() also holds.
*/
}
return (mtype);
}
{
/*
* Make sure there isn't a version mismatch between the plugin and
* the framework. In the future, if multiple versions are
* supported, this check could become more sophisticated.
*/
if (mactype_version != MACTYPE_VERSION)
return (NULL);
return (mtrp);
}
void
{
}
int
{
/* Do some sanity checking before we register this MAC type. */
return (EINVAL);
/*
* Verify that all mandatory callbacks are set in the ops
* vector.
*/
return (EINVAL);
}
mtrp->mtr_addrlen);
}
return (EEXIST);
}
return (0);
}
int
mactype_unregister(const char *ident)
{
int err;
/*
* Let's not allow MAC drivers to use this plugin while we're
* trying to unregister it. Holding i_mactype_lock also prevents a
* plugin from unregistering while a MAC driver is attempting to
* hold a reference to it in i_mactype_getplugin().
*/
(mod_hash_val_t *)&mtp)) != 0) {
/* A plugin is trying to unregister, but it never registered. */
goto done;
}
goto done;
}
if (err != 0) {
/* This should never happen, thus the ASSERT() above. */
goto done;
}
done:
return (err);
}
/*
* mac_set_prop() sets mac or hardware driver properties:
* MAC resource properties include maxbw, priority, and cpu binding list.
* Driver properties are private properties to the hardware, such as mtu
* and speed. There's one other MAC property -- the PVID.
* If the property is a driver property, mac_set_prop() calls driver's callback
* function to set it.
* If the property is a mac resource property, mac_set_prop() invokes
* mac_set_resources() which will cache the property value in mac_impl_t and
* may call mac_client_set_resource() to update property value of the primary
* mac client, if it exists.
*/
int
{
case MAC_PROP_MAXBW:
case MAC_PROP_PRIO:
case MAC_PROP_PROTECT:
case MAC_PROP_BIND_CPU: {
/* If it is mac property, call mac_set_resources() */
if (valsize < sizeof (mac_resource_props_t))
return (EINVAL);
break;
}
case MAC_PROP_PVID:
return (EINVAL);
break;
case MAC_PROP_MTU: {
return (EINVAL);
break;
}
case MAC_PROP_LLIMIT:
case MAC_PROP_LDECAY: {
return (EINVAL);
return (EINVAL);
else
err = 0;
break;
}
default:
/* For other driver properties, call driver's callback */
}
}
return (err);
}
/*
* mac_get_prop() gets mac or hardware driver properties.
*
* If the property is a driver property, mac_get_prop() calls driver's callback
* function to get it.
* If the property is a mac property, mac_get_prop() invokes mac_get_resources()
* which returns the cached value in mac_impl_t.
*/
int
{
case MAC_PROP_MAXBW:
case MAC_PROP_PRIO:
case MAC_PROP_PROTECT:
case MAC_PROP_BIND_CPU: {
/* If mac property, read from cache */
if (valsize < sizeof (mac_resource_props_t))
return (EINVAL);
return (0);
}
case MAC_PROP_PVID:
return (EINVAL);
return (0);
case MAC_PROP_LLIMIT:
case MAC_PROP_LDECAY:
return (EINVAL);
else
return (0);
case MAC_PROP_MTU: {
if (valsize < sizeof (mac_propval_range_t))
return (EINVAL);
if (is_getprop) {
}
/*
* If the driver doesn't have *_m_getprop defined or
* if the driver doesn't support setting MTU then
* return the CURRENT value as POSSIBLE value.
*/
err = 0;
}
return (err);
}
return (EINVAL);
*perm = MAC_PROP_PERM_RW;
} else {
}
return (0);
} else {
return (0);
}
/*
* ask driver for its default.
*/
break;
}
}
case MAC_PROP_STATUS:
if (valsize < sizeof (link_state))
return (EINVAL);
return (0);
default:
break;
}
/* If driver property, request from driver */
if (is_getprop) {
}
return (err);
}
int
{
return (0);
}
void
{
return;
}
void
{
return;
}
void
{
}
mip->mi_priv_prop_count = 0;
}
/*
* mac_ring_t 'mr' macros. Some rogue drivers may access ring structure
* (by invoking mac_rx()) even after processing mac_stop_ring(). In such
* cases if MAC free's the ring structure after mac_stop_ring(), any
* illegal access to the ring structure coming from the driver will panic
* the system. In order to protect the system from such inadverent access,
* we maintain a cache of rings in the mac_impl_t after they get free'd up.
* When packets are received on free'd up rings, MAC (through the generation
* count mechanism) will drop such packets.
*/
static mac_ring_t *
{
} else {
}
} else {
}
return (ring);
}
static void
{
} else {
}
}
static void
{
}
}
int
{
int rv = 0;
return (rv);
}
void
{
/*
* Increment the ring generation number for this ring.
*/
ring->mr_gen_num++;
}
int
{
int rv = 0;
return (rv);
}
void
{
}
/*
* Called from mac_start() on the default Rx group. Broadcast and multicast
* packets are received only on the default group. Hence the default group
* needs to be up even if the primary client is not up, for the other groups
* to be functional. We do this by calling this function at mac_start time
* itself. However the broadcast packets that are received can't make their
* way beyond mac_rx until a mac client creates a broadcast flow.
*/
static int
{
int rv = 0;
return (rv);
goto error;
}
return (0);
return (rv);
}
/* Called from mac_stop on the default Rx group */
static void
{
}
}
}
static mac_ring_t *
{
/* Prepare basic information of ring */
/* Insert the new ring to the list. */
/* Zero to reuse the info data structure */
/* Query ring information from driver */
/* Update ring's status */
/* Update the ring count of the group */
group->mrg_cur_count++;
return (ring);
}
/*
* Rings are chained together for easy regrouping.
*/
static void
{
int index;
/*
* Initialize all ring members of this group. Size of zero will not
* enter the loop, so it's safe for initializing an empty group.
*/
}
int
{
uint_t group_free = 0;
int g, err = 0;
switch (rtype) {
case MAC_RING_TYPE_RX:
break;
case MAC_RING_TYPE_TX:
break;
default:
}
return (0);
/*
* Allocate a contiguous buffer for all groups.
*/
KM_SLEEP);
/*
* Get all ring groups if any, and get their ring members
* if any.
*/
/* Prepare basic information of the group */
/* Zero to reuse the info data structure */
/* Query group information from driver */
switch (cap_rings->mr_group_type) {
case MAC_GROUP_TYPE_DYNAMIC:
goto bail;
}
switch (rtype) {
case MAC_RING_TYPE_RX:
/*
* The first RX group must have non-zero
* rings, and the following groups must
* have zero rings.
*/
if (g == 0 && group_info.mgi_count == 0) {
goto bail;
}
if (g > 0 && group_info.mgi_count != 0) {
int, g, int, group_info.mgi_count);
goto bail;
}
break;
case MAC_RING_TYPE_TX:
/*
* All TX ring groups must have zero rings.
*/
if (group_info.mgi_count != 0) {
int, g, int, group_info.mgi_count);
goto bail;
}
break;
}
break;
case MAC_GROUP_TYPE_STATIC:
/*
* Note that an empty group is allowed, e.g., an aggr
* would start with an empty group.
*/
break;
default:
/* unknown group type */
int, cap_rings->mr_group_type);
goto bail;
}
/*
* Driver must register group->mgi_addmac/remmac() for rx groups
* to support multiple MAC addresses.
*/
if (rtype == MAC_RING_TYPE_RX) {
goto bail;
}
/* Cache driver-supplied information */
/* Update the group's status and group count. */
group_free++;
group->mrg_cur_count = 0;
/* The current group size should be equal to default value */
}
/* Build up a dummy group for free resources as a pool */
/* Prepare basic information of the group */
/*
* If there are ungrouped rings, allocate a continuous buffer for
* remaining resources.
*/
if (ring_left != 0) {
group->mrg_cur_count = 0;
/* The current group size should be equal to ring_left */
ring_left = 0;
/* Update this group's status */
} else
bail:
/* Cache other important information to finalize the initialization */
switch (rtype) {
case MAC_RING_TYPE_RX:
break;
case MAC_RING_TYPE_TX:
/*
* Ring 0 is used as the default one and it could be assigned
* to a client as well.
*/
break;
default:
}
if (err != 0)
return (err);
}
/*
* Called to free all ring groups with particular type. It's supposed all groups
* have been released by clinet.
*/
void
{
switch (rtype) {
case MAC_RING_TYPE_RX:
return;
mip->mi_rx_group_count = 0;
break;
case MAC_RING_TYPE_TX:
return;
mip->mi_tx_group_count = 0;
mip->mi_tx_group_free = 0;
break;
default:
}
if (group->mrg_cur_count == 0)
continue;
}
}
/* Free all the cached rings */
/* Free the block of group data strutures */
}
/*
* Associate a MAC address with a receive group.
*
* The return value of this function should always be checked properly, because
* any type of failure could cause unexpected results. A group can be added
* or removed with a MAC address only after it has been reserved. Ideally,
* a successful reservation always leads to calling mac_group_addmac() to
* steer desired traffic. Failure of adding an unicast MAC address doesn't
* always imply that the group is functioning abnormally.
*
* Currently this function is called everywhere, and it reflects assumptions
* about MAC addresses in the implementation. CR 6735196.
*/
int
{
}
/*
* Remove the association between MAC address and receive group.
*/
int
{
}
/*
* Release a ring in use by marking it MR_FREE.
* Any other client may reserve it for its use.
*/
void
{
/*
* Default tx ring will be released by mac_stop().
*/
return;
}
/*
* This is the entry point for packets transmitted through the bridging code.
* If no bridge is in place, MAC_RING_TX transmits using tx ring. The 'rh'
* pointer may be NULL to select the default ring.
*/
mblk_t *
{
/*
* Once we take a reference on the bridge link, the bridge
* module itself can't unload, so the callback pointers are
* stable.
*/
} else {
}
return (mp);
}
/*
* Find a ring from its index.
*/
{
break;
return (ring);
}
/*
* Add a ring to an existing group.
*
* The ring must be either passed directly (for example if the ring
* movement is initiated by the framework), or specified through a driver
* index (for example when the ring is added by the driver.
*
* The caller needs to call mac_perim_enter() before calling this function.
*/
int
{
int ret = 0;
case MAC_RING_TYPE_RX:
break;
case MAC_RING_TYPE_TX:
break;
default:
}
/*
* There should be no ring with the same ring index in the target
* group.
*/
NULL);
if (driver_call) {
/*
* The function is called as a result of a request from
* a driver to add a ring to an existing group, for example
* from the aggregation driver. Allocate a new mac_ring_t
* for that ring.
*/
} else {
/*
* The function is called as a result of a MAC layer request
* to add a ring to an existing group. In this case the
* ring is being moved between groups, which requires
* the underlying driver to support dynamic grouping,
* and the mac_ring_t already exists.
*/
}
/*
* At this point the ring should not be in use, and it should be
* of the right for the target group.
*/
if (!driver_call) {
/*
* Add the driver level hardware ring if the process was not
* initiated by the driver, and the target group is not the
* group.
*/
}
/*
* Insert the ring ahead existing rings.
*/
group->mrg_cur_count++;
}
/*
* If the group has not been actively used, we're done.
*/
return (0);
/*
*/
case MAC_RING_TYPE_RX:
/*
* Setup SRS on top of the new ring if the group is
* reserved for someones exclusive use.
*/
}
break;
case MAC_RING_TYPE_TX:
/*
* For TX this function is only invoked during the
* initial creation of a group when a share is
* associated with a MAC client. So the datapath is not
* yet setup, and will be setup later after the
* group has been reserved and populated.
*/
break;
default:
}
/*
* Start the ring if needed. Failure causes to undo the grouping action.
*/
}
}
if (!driver_call) {
}
group->mrg_cur_count--;
if (driver_call)
return (ret);
}
/*
* Update the ring's state.
*/
return (0);
}
/*
* Remove a ring from it's current group. MAC internal function for dynamic
* grouping.
*
* The caller needs to call mac_perim_enter() before calling this function.
*/
void
{
case MAC_RING_TYPE_RX:
/*
* Only hardware classified packets hold a reference to the
* ring all the way up the Rx path. mac_rx_srs_remove()
* will take care of quiescing the Rx path and removing the
* SRS. The software classified path neither holds a reference
* nor any association with the ring in mac_rx.
*/
}
break;
case MAC_RING_TYPE_TX:
/*
* For TX this function is only invoked in two
* cases:
*
* 1) In the case of a failure during the
* initial creation of a group when a share is
* associated with a MAC client. So the SRS is not
* yet setup, and will be setup later after the
* group has been reserved and populated.
*
* 2) From mac_release_tx_group() when freeing
* a TX SRS.
*
* In both cases the SRS and its soft rings are
* already quiesced.
*/
break;
default:
}
/*
* Remove the ring from the group.
*/
else {
}
group->mrg_cur_count--;
if (!driver_call) {
/*
* Remove the driver level hardware ring.
*/
}
}
if (driver_call) {
} else {
}
}
/*
* Move a ring to the target group. If needed, remove the ring from the group
* that it currently belongs to.
*
* The caller need to enter MAC's perimeter by calling mac_perim_enter().
*/
static int
{
int rv;
return (0);
/*
* Remove it from current group first.
*/
/*
* Add it to the new group.
*/
if (rv != 0) {
/*
* Failed to add ring back to source group. If
* that fails, the ring is stuck in limbo, log message.
*/
}
}
return (rv);
}
/*
* Find a MAC address according to its value.
*/
{
break;
}
return (map);
}
/*
* Check whether the MAC address is shared by multiple clients.
*/
{
}
/*
* Remove the specified MAC address from the MAC address list and free it.
*/
static void
{
} else {
}
}
/*
* Add a MAC address reference for a client. If the desired MAC address
* exists, add a reference to it. Otherwise, add the new address by adding
* it to a reserved group or setting promiscuous mode. Won't try different
* group is the group is non-NULL, so the caller must explictly share
* default group when needed.
*
* Note, the primary MAC address is initialized at registration time, so
* to add it to default group only need to activate it if its reference
* count is still zero. Also, some drivers may not have advertised RINGS
* capability.
*/
int
{
int err = 0;
/*
* If the new MAC address has not been added. Allocate a new one
* and set it up.
*/
/* add the new MAC address to the head of the address list */
}
/*
* If the MAC address is already in use, simply account for the
* new client.
*/
return (0);
/*
* Activate this MAC address by adding it to the reserved group.
*/
if (err == 0) {
return (0);
}
}
/*
* The MAC address addition failed. If the client requires a
* hardware classified MAC address, fail the operation.
*/
if (use_hw) {
goto bail;
}
/*
* Try promiscuous mode.
*
* For drivers that don't advertise RINGS capability, do
* nothing for the primary address.
*/
return (0);
}
/*
* Enable promiscuous mode in order to receive traffic
* to the new MAC address.
*/
return (0);
}
/*
* Free the MAC address that could not be added. Don't free
* a pre-existing address, it could have been the entry
* for the primary MAC address which was pre-allocated by
* mac_init_macaddr(), and which must remain on the list.
*/
bail:
if (allocated_map)
return (err);
}
/*
* Remove a reference to a MAC address. This may cause to remove the MAC
* address from an associated group or to turn off promiscuous mode.
* The caller needs to handle the failure properly.
*/
int
{
int err = 0;
/*
* If it's not the last client using this MAC address, only update
* the MAC clients count.
*/
return (0);
/*
* The MAC address is no longer used by any MAC client, so remove
* it from its associated group, or turn off promiscuous mode
* if it was enabled for the MAC address.
*/
/*
* Don't free the preset primary address for drivers that
* don't advertise RINGS capability.
*/
return (0);
break;
break;
default:
}
if (err != 0)
return (err);
/*
* We created MAC address for the primary one at registration, so we
* won't free it here. mac_fini_macaddr() will take care of it.
*/
return (0);
}
/*
* Update an existing MAC address. The caller need to make sure that the new
* value has not been used.
*/
int
{
int err = 0;
/*
* Update the primary address for drivers that are not
* RINGS capable.
*/
mac_addr);
if (err != 0)
return (err);
break;
}
/*
* If this MAC address is not currently in use,
* simply break out and update the value.
*/
break;
/*
* Need to replace the MAC address associated with a group.
*/
if (err != 0)
return (err);
/*
* Failure hints hardware error. The MAC layer needs to
* have error notification facility to handle this.
* Now, simply try to restore the value.
*/
if (err != 0)
break;
/*
* Need to do nothing more if in promiscuous mode.
*/
break;
default:
}
/*
* Successfully replaced the MAC address.
*/
if (err == 0)
return (err);
}
/*
* Freshen the MAC address with new value. Its caller must have updated the
* hardware MAC address before calling this function.
* This funcitons is supposed to be used to handle the MAC address change
* notification from underlying drivers.
*/
void
{
/*
* Freshen the MAC address with new value.
*/
/*
* Update all MAC clients that share this MAC address.
*/
}
/*
* Set up the primary MAC address.
*/
void
{
/*
* The reference count is initialized to zero, until it's really
* activated.
*/
/*
* If driver advertises RINGS capability, it shouldn't have initialized
* its primary MAC address. For other drivers, including VNIC, the
* primary address must work after registration.
*/
/*
* The primary MAC address is reserved for default group according
* to current design.
*/
}
/*
* Clean up the primary MAC address. Note, only one primary MAC address
* is allowed. All other MAC addresses must have been freed appropriately.
*/
void
{
return;
/*
* If mi_addresses is initialized, there should be exactly one
* entry left on the list with no users.
*/
}
/*
* Logging related functions.
*/
/* Write the Flow description to the log file */
int
{
/*
* Grab the fe_lock to see a self-consistent fe_flow_desc.
* Updates to the fe_flow_desc are done under the fe_lock
*/
} else {
}
}
/* Write the Flow statistics to the log file */
int
{
}
/* Write the Link Description to the log file */
int
{
/*
* Grab the fe_lock to see a self-consistent fe_flow_desc.
* Updates to the fe_flow_desc are done under the fe_lock
* after removing the flent from the flow table.
*/
}
/* Write the Link statistics to the log file */
int
{
}
/*
* For a given flow, if the descrition has not been logged before, do it now.
* If it is a VNIC, then we have collected information about it from the MAC
* table, so skip it.
*/
/*ARGSUSED*/
static int
{
return (0);
/*
* If the name starts with "vnic", and fe_user_generated is true (to
* exclude the mcast and active flow entries created implicitly for
* a vnic, it is a VNIC flow. i.e. vnic1 is a vnic flow,
*/
return (0);
}
if (!flent->fe_desc_logged) {
/*
* We don't return error because we want to continu the
* walk in case this is the last walk which means we
* need to reset fe_desc_logged in all the flows.
*/
return (0);
}
/*
* Regardless of the error, we want to proceed in case we have to
* reset fe_desc_logged.
*/
(void) mac_write_flow_stats(flent);
return (0);
}
typedef struct i_mac_log_state_s {
int mi_fenable;
int mi_lenable;
/*
* Walk the mac_impl_ts and log the description for each mac client of this mac,
* if it hasn't already been done. Additionally, log statistics for the link as
* well. Walk the flow table and log information for each flow as well.
* If it is the last walk (mci_last), then we turn off mci_desc_logged (and
* also fe_desc_logged, if flow logging is on) since we want to log the
* description if and when logging is restarted.
*/
/*ARGSUSED*/
static uint_t
{
int ret;
/*
* Only walk the client list for NIC and etherstub
*/
return (MH_WALK_CONTINUE);
if (!MCIP_DATAPATH_SETUP(mcip))
continue;
if (lstate->mi_lenable) {
if (ret != 0) {
/*
* We can't terminate it if this is the last
* walk, else there might be some links with
* mi_desc_logged set to true, which means
* their description won't be logged the next
* time logging is started (similarly for the
* flows within such links). We can continue
* without walking the flow table (i.e. to
* set fe_desc_logged to false) because we
* won't have written any flow stuff for this
* link as we haven't logged the link itself.
*/
return (MH_WALK_CONTINUE);
else
return (MH_WALK_TERMINATE);
}
}
}
return (MH_WALK_TERMINATE);
if (lstate->mi_fenable) {
}
}
}
return (MH_WALK_CONTINUE);
}
/*
* The timer thread that runs every mac_logging_interval seconds and logs
*/
/* ARGSUSED */
void
mac_log_linkinfo(void *arg)
{
if (!mac_flow_log_enable && !mac_link_log_enable) {
return;
}
if (mac_flow_log_enable || mac_link_log_enable) {
}
}
typedef struct i_mac_fastpath_state_s {
int mf_err;
/*ARGSUSED*/
static uint_t
void *arg)
{
if (state->mf_disable)
else
}
/*
* Start the logging timer.
*/
int
{
int err;
switch (type) {
case MAC_LOGTYPE_FLOW:
if (mac_flow_log_enable) {
return (0);
}
/* FALLTHRU */
case MAC_LOGTYPE_LINK:
if (mac_link_log_enable) {
return (0);
}
break;
default:
ASSERT(0);
}
/* Disable fastpath */
/* Reenable fastpath */
return (err);
}
switch (type) {
case MAC_LOGTYPE_FLOW:
/* FALLTHRU */
case MAC_LOGTYPE_LINK:
break;
}
return (0);
}
/*
* Stop the logging timer if both Link and Flow logging are turned off.
*/
void
{
/* Last walk */
switch (type) {
case MAC_LOGTYPE_FLOW:
if (lstate.mi_fenable) {
break;
}
/* FALLTHRU */
case MAC_LOGTYPE_LINK:
return;
}
break;
default:
ASSERT(0);
}
/* Reenable fastpath */
(void) untimeout(mac_logging_timer);
mac_logging_timer = 0;
/* Last walk */
}
/*
*/
void
{
int count;
if (flent->fe_rx_srs_cnt <= 0)
return;
SRST_FLOW) {
} else {
}
}
/*
* If we have a Tx SRS, we need to modify all the threads associated
* with it.
*/
}
/*
* RX and TX rings are reserved according to different semantics depending
* on the requests from the MAC clients and type of rings:
*
* On the Tx side, by default we reserve individual rings, independently from
* the groups.
*
* On the Rx side, the reservation is at the granularity of the group
* of rings, and used for v12n level 1 only. It has a special case for the
* primary client.
*
* If a share is allocated to a MAC client, we allocate a TX group and an
* RX group to the client, and assign TX rings and RX rings to these
* groups according to information gathered from the driver through
* the share capability.
*
* The foreseable evolution of Rx rings will handle v12n level 2 and higher
* to allocate individual rings out of a group and program the hw classifier
* based on IP address or higher level criteria.
*/
/*
* mac_reserve_tx_ring()
* Reserve a unused ring by marking it with MR_INUSE state.
* As reserved, the ring is ready to function.
*
* Notes for Hybrid I/O:
*
* If a specific ring is needed, it is specified through the desired_ring
* argument. Otherwise that argument is set to NULL.
* If the desired ring was previous allocated to another client, this
* function swaps it with a new ring from the group of unassigned rings.
*/
{
return (NULL);
/*
* Find an available ring and start it before changing its status.
* The unassigned rings are at the end of the mi_tx_groups
* array.
*/
if (desired_ring == NULL) {
/* wanted any free ring and found one */
break;
} else {
if (ring != desired_ring)
/* wants a desired ring but this one ain't it */
continue;
break;
/*
* Found the desired ring but it's already in use.
* Swap it with a new ring.
*/
/* find the client which owns that ring */
desired_ring)) {
/* found our ring */
break;
}
}
/*
* The TX ring is in use, but it's not
* associated with any clients, so it
* has to be the default ring. In that
* case we can simply assign a new ring
* as the default ring, and we're done.
*/
/*
* Quiesce all clients on top of
* the NIC to make sure there are no
* pending threads still relying on
* that default ring, for example
* the multicast path.
*/
}
/* resume the clients */
break;
}
/*
* Note that we cannot simply invoke the group
* the rings from the SRS.
*/
/* first quiece the client */
/* give a new ring to the client... */
/*
* There are no other available ring
* on that MAC instance. The client
* will fallback to the shared TX
* ring.
*/
}
/* ... in exchange for our desired ring */
/* restart the client */
if (mip->mi_default_tx_ring ==
/*
* The desired ring is the default ring,
* and there are one or more clients
* using that default ring directly.
*/
/*
* Find clients using default ring and
* swap it with the new default ring.
*/
desired_ring)) {
/* first quiece the client */
/*
* Give it the new default
* ring, and remove the old
* one.
*/
sring);
}
/* restart the client */
}
}
}
break;
}
}
if (mac_start_ring(ring) != 0)
return (NULL);
}
return (ring);
}
/*
* Minimum number of rings to leave in the default TX group when allocating
* rings to new clients.
*/
/*
* Populate a zero-ring group with rings. If the share is non-NULL,
* the rings are chosen according to that share.
* Invoked after allocating a new RX or TX group through
* mac_reserve_rx_group() or mac_reserve_tx_group(), respectively.
* Returns zero on success, an errno otherwise.
*/
int
{
int rv, i, j;
/*
* First find the rings to allocate to the group.
*/
/* get rings through ms_squery() */
KM_SLEEP);
} else {
/* this function is called for TX only with a share */
/*
* Pick one ring from default group.
*
* for now pick the second ring which requires the first ring
* at index 0 to stay in the default group, since it is the
* ring which carries the multicast traffic.
* We need a better way for a driver to indicate this,
* for example a per-ring flag.
*/
break;
}
nrings = 1;
}
switch (ring_type) {
case MAC_RING_TYPE_RX:
/* we ran out of rings */
return (ENOSPC);
}
/* move receive rings to new group */
for (i = 0; i < nrings; i++) {
if (rv != 0) {
/* move rings back on failure */
for (j = 0; j < i; j++) {
(void) mac_group_mov_ring(mip,
}
return (rv);
}
}
break;
case MAC_RING_TYPE_TX: {
/* move the TX rings to the new group */
for (i = 0; i < nrings; i++) {
/* get the desired ring */
if (rv != 0) {
/* cleanup on failure */
for (j = 0; j < i; j++) {
(void) mac_group_mov_ring(mip,
mip->mi_tx_groups +
}
}
}
break;
}
}
/* add group to share */
/* free temporary array of rings */
}
return (0);
}
void
{
break;
}
}
void
{
break;
}
}
/*
* mac_reserve_rx_group()
*
* Finds an available group and exclusively reserves it for a client.
* The group is chosen to suit the flow's resource controls (bandwidth and
* fanout requirements) and the address type.
* If the requestor is the pimary MAC then return the group with the
* largest number of rings, otherwise the default ring when available.
*/
{
int err;
/* Check if a group already has this mac address (case of VLANs) */
return (NULL);
/*
* Try to exclusively reserve a RX group.
*
* For flows requires SW_RING it always goes to the default group
* (Until we can explicitely call out default groups (CR 6695600),
* we assume that the default group is always at position zero);
*
* For flows requires HW_DEFAULT_RING (unicast flow of the primary
* client), try to reserve the default RX group only.
*
* For flows requires HW_RING (unicast flow of other clients), try
* to reserve non-default RX group then the default group.
*/
switch (rtype) {
case MAC_RX_RESERVE_DEFAULT:
start = 0;
loopcount = 1;
break;
start = 1;
}
/*
* Check to see whether this mac client is the only client
* on this RX group. If not, we cannot exclusively reserve
* this RX group.
*/
if (!MAC_RX_GROUP_NO_CLIENT(grp) &&
continue;
}
/*
* This group could already be SHARED by other multicast
* flows on this client. In that case, the group would
* be shared and has already been started.
*/
(mac_start_group(grp) != 0)) {
continue;
}
if ((i % mip->mi_rx_group_count) == 0 ||
break;
}
/*
* Populate the group. Rings should be taken
* from the default group at position 0 for now.
*/
if (err == 0)
break;
/*
* It's a dynamic group but the grouping operation failed.
*/
}
return (NULL);
return (grp);
}
/*
* mac_rx_release_group()
*
* This is called when there are no clients left for the group.
* The group is stopped and marked MAC_GROUP_STATE_REGISTERED,
* and if it is a non default group, the shares are removed and
* all rings are assigned back to default group.
*/
void
{
/*
* This is the case where there are no clients left. Any
* SRS etc on this group have also be quiesced.
*/
/*
* Remove the SRS associated with the HW ring.
* As a result, polling will be disabled.
*/
}
}
/* remove group from share */
group->mrg_driver);
}
/*
* Rings were dynamically allocated to group.
* Move rings back to default group.
*/
(void) mac_group_mov_ring(mip,
}
}
/*
* Possible improvement: See if we can assign the group just released
* to a another client of the mip
*/
}
/*
* Reserves a TX group for the specified share. Invoked by mac_tx_srs_setup()
* when a share was allocated to the client.
*/
{
int rv, i;
/*
* TX groups are currently allocated only to MAC clients
* which are associated with a share. Since we have a fixed
* number of share and groups, and we already successfully
* allocated a share, find an available TX group.
*/
for (i = 0; i < mip->mi_tx_group_count; i++) {
continue;
break;
}
/*
* Populate the group. Rings should be taken from the group
* of unassigned rings, which is past the array of TX
* groups adversized by the driver.
*/
if (rv != 0) {
return (NULL);
}
mip->mi_tx_group_free--;
return (grp);
}
void
{
/* move the ring back to the pool */
}
mip->mi_tx_group_free++;
}
/*
* This is a 1-time control path activity initiated by the client (IP).
* The mac perimeter protects against other simultaneous control activities,
* for example an ioctl that attempts to change the degree of fanout and
* increase or decrease the number of softrings associated with this Tx SRS.
*/
static mac_tx_notify_cb_t *
{
return (mtnfp);
}
static void
{
"mac_client_tx_notify_remove: callback not "
return;
}
else
}
/*
* mac_client_tx_notify():
* call to add and remove flow control callback routine.
*/
void *ptr)
{
if (callb_func != NULL) {
/* Add a notify callback */
} else {
}
return ((mac_tx_notify_handle_t)mtnfp);
}
void
{
}
int
{
int retv;
retv = 0;
} else {
}
if (retv == 0) {
}
return (retv);
}
/*
* Disable bridging on the indicated link.
*/
void
{
}
void
{
}