netstack.c revision 23f4867fdff96a11dd674de6259a5a0d0a13251c
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <sys/tuneable.h>
#include <vm/seg_kmem.h>
#include <sys/netstack.h>
/*
* What we use so that the zones framework can tell us about new zones,
* which we use to create new stacks.
*/
static zone_key_t netstack_zone_key;
static int netstack_initialized = 0;
/*
* Track the registered netstacks.
* The global lock protects
* - ns_reg
* - the list starting at netstack_head and following the netstack_next
* pointers.
*/
static kmutex_t netstack_g_lock;
/*
*/
/*
* Global list of existing stacks. We use this when a new zone with
* an exclusive IP instance is created.
*
* Note that in some cases a netstack_t needs to stay around after the zone
* has gone away. This is because there might be outstanding references
* (from TCP TIME_WAIT connections, IPsec state, etc). The netstack_t data
* structure and all the foo_stack_t's hanging off of it will be cleaned up
* when the last reference to it is dropped.
* However, the same zone might be rebooted. That is handled using the
* assumption that the zones framework picks a new zoneid each time a zone
* is (re)booted. We assert for that condition in netstack_zone_create().
* Thus the old netstack_t can take its time for things to time out.
*/
static netstack_t *netstack_head;
/*
* To support kstat_create_netstack() using kstat_zone_add we need
* to track both
* - all kstats that have been added for the shared stack
*/
struct shared_zone_list {
struct shared_zone_list *sz_next;
};
struct shared_kstat_list {
struct shared_kstat_list *sk_next;
};
static struct shared_zone_list *netstack_shared_zones;
static struct shared_kstat_list *netstack_shared_kstats;
void
netstack_init(void)
{
netstack_initialized = 1;
/*
* We want to be informed each time a zone is created or
* destroyed in the kernel, so we can maintain the
* stack instance information.
*/
}
/*
* Register a new module with the framework.
* This registers interest in changes to the set of netstacks.
* The createfn and destroyfn are required, but the shutdownfn can be
* NULL.
* Note that due to the current zsd implementation, when the create
* function is called the zone isn't fully present, thus functions
* like zone_find_by_* will fail, hence the create function can not
* use many zones kernel functions including zcmn_err().
*/
void
void (*module_shutdown)(netstackid_t, void *),
void (*module_destroy)(netstackid_t, void *))
{
netstack_t *ns;
/*
* Determine the set of stacks that exist before we drop the lock.
* Set CREATE_NEEDED for each of those.
* netstacks which have been deleted will have NSS_CREATE_COMPLETED
* set, but check NSF_CLOSING to be sure.
*/
}
}
/*
* Call the create function for each stack that has CREATE_NEEDED
* for this moduleid.
* Set CREATE_INPROGRESS, drop lock, and after done,
* set CREATE_COMPLETE
*/
}
void
{
netstack_t *ns;
/*
* Determine the set of stacks that exist before we drop the lock.
* Set SHUTDOWN_NEEDED and DESTROY_NEEDED for each of those.
*/
}
}
}
/*
* Clear the netstack_m_state so that we can handle this module
* being loaded again.
*/
}
}
}
/*
*/
static void *
{
netstack_t *ns;
netstack_t **nsp;
int i;
} else {
/* Look for the stack instance for the global */
}
/* Allocate even if it isn't needed; simplifies locking */
/* Look if there is a matching stack instance */
/*
* Should never find a pre-existing exclusive stack
*/
ns->netstack_numzones++;
netstack_t *, ns);
/* Record that we have a new shared stack zone */
return (ns);
}
}
/* Not found */
/*
* Determine the set of module create functions that need to be
* called before we drop the lock.
*/
for (i = 0; i < NS_MAX; i++) {
netstack_t *, ns, int, i);
}
}
return (ns);
}
/* ARGSUSED */
static void
{
int i;
/* Stack instance being used by other zone */
return;
}
/*
* Determine the set of stacks that exist before we drop the lock.
* Set SHUTDOWN_NEEDED for each of those.
*/
for (i = 0; i < NS_MAX; i++) {
netstack_t *, ns, int, i);
}
}
/*
* Call the shutdown function for all registered modules for this
* netstack.
*/
}
/*
* Common routine to release a zone.
* If this was the last zone using the stack instance then prepare to
* have the refcnt dropping to zero free the zone.
*/
/* ARGSUSED */
static void
{
ns->netstack_numzones--;
if (ns->netstack_numzones != 0) {
/* Stack instance being used by other zone */
/* Record that we a shared stack zone has gone away */
return;
}
/*
* Set CLOSING so that netstack_find_by will not find it.
*/
/* No other thread can call zone_destroy for this stack */
/*
* Decrease refcnt to account for the one in netstack_zone_init()
*/
}
/*
* Called when the reference count drops to zero.
* Call the destroy functions for each registered module.
*/
static void
{
int i;
/*
* If the shutdown callback wasn't called earlier (e.g., if this is
* a netstack shared between multiple zones), then we call it now.
*/
for (i = 0; i < NS_MAX; i++) {
netstack_t *, ns, int, i);
}
}
/*
* Determine the set of stacks that exist before we drop the lock.
* Set DESTROY_NEEDED for each of those.
*/
for (i = 0; i < NS_MAX; i++) {
netstack_t *, ns, int, i);
}
}
/*
* Call the shutdown and destroy functions for all registered modules
* for this netstack.
*/
}
/*
* Call the create function for the ns and moduleid if CREATE_NEEDED
* is set.
* When it calls it, it drops the netstack_lock held by the caller,
* and returns true to tell the caller it needs to re-evalute the
* state..
*/
static boolean_t
{
void *result;
netstack_t *, ns);
return (B_TRUE);
} else {
return (B_FALSE);
}
}
/*
* Call the shutdown function for the ns and moduleid if SHUTDOWN_NEEDED
* is set.
* When it calls it, it drops the netstack_lock held by the caller,
* and returns true to tell the caller it needs to re-evalute the
* state..
*/
static boolean_t
{
void * netstack_module;
void *, netstack_module);
netstack_t *, ns);
return (B_TRUE);
} else {
return (B_FALSE);
}
}
/*
* Call the destroy function for the ns and moduleid if DESTROY_NEEDED
* is set.
* When it calls it, it drops the netstack_lock held by the caller,
* and returns true to tell the caller it needs to re-evalute the
* state..
*/
static boolean_t
{
void * netstack_module;
/* XXX race against unregister? */
void *, netstack_module);
netstack_t *, ns);
return (B_TRUE);
} else {
return (B_FALSE);
}
}
/*
* Apply a function to all netstacks for a particular moduleid.
*
* The applyfn has to drop netstack_g_lock if it does some work.
* In that case we don't follow netstack_next after reacquiring the
* lock, even if it is possible to do so without any hazards. This is
* because we want the design to allow for the list of netstacks threaded
* by netstack_next to change in any arbitrary way during the time the
* lock was dropped.
*
* It is safe to restart the loop at netstack_head since the applyfn
* changes netstack_m_state as it processes things, so a subsequent
* pass through will have no effect in applyfn, hence the loop will terminate
* in at worst O(N^2).
*/
static void
{
netstack_t *ns;
ns = netstack_head;
/* Lock dropped - restart at head */
#ifdef NS_DEBUG
(void) printf("apply_all_netstacks: "
"LD for %p/%d, %d\n",
#endif
ns = netstack_head;
} else {
}
}
}
/*
* Apply a function to all moduleids for a particular netstack.
*
* Since the netstack linkage doesn't matter in this case we can
* ignore whether the function drops the lock.
*/
static void
{
int i;
for (i = 0; i < NS_MAX; i++) {
/*
* Lock dropped but since we are not iterating over
* netstack_head we can just reacquire the lock.
*/
}
}
}
/* Like the above but in reverse moduleid order */
static void
{
int i;
for (i = NS_MAX-1; i >= 0; i--) {
/*
* Lock dropped but since we are not iterating over
* netstack_head we can just reacquire the lock.
*/
}
}
}
/*
*
* If ns is non-NULL we restrict it to that particular instance.
* If moduleid is a particular one (not NS_ALL), then we restrict it
* to that particular moduleid.
* When walking the moduleid, the reverse argument specifies that they
* should be walked in reverse order.
* The applyfn returns true if it had dropped the locks.
*/
static void
{
if (reverse)
else
} else {
}
}
/*
* Run the create function for all modules x stack combinations
* that have NSS_CREATE_NEEDED set.
*
* Call the create function for each stack that has CREATE_NEEDED.
* Set CREATE_INPROGRESS, drop lock, and after done,
* set CREATE_COMPLETE
*/
static void
{
}
/*
* Run the shutdown function for all modules x stack combinations
* that have NSS_SHUTDOWN_NEEDED set.
*
* Call the shutdown function for each stack that has SHUTDOWN_NEEDED.
* Set SHUTDOWN_INPROGRESS, drop lock, and after done,
* set SHUTDOWN_COMPLETE
*/
static void
{
}
/*
* Run the destroy function for all modules x stack combinations
* that have NSS_DESTROY_NEEDED set.
*
* Call the destroy function for each stack that has DESTROY_NEEDED.
* Set DESTROY_INPROGRESS, drop lock, and after done,
* set DESTROY_COMPLETE
*
* Since a netstack_t is never reused (when a zone is rebooted it gets
* a new zoneid == netstackid i.e. a new netstack_t is allocated) we leave
* netstack_m_state the way it is i.e. with NSS_DESTROY_COMPLETED set.
*/
static void
{
/*
* Have to walk the moduleids in reverse order since some
* modules make implicit assumptions about the order
*/
}
/*
* Get the stack instance used in caller's zone.
* Increases the reference count, caller must do a netstack_rele.
* It can't be called after zone_destroy() has started.
*/
netstack_get_current(void)
{
netstack_t *ns;
return (NULL);
return (ns);
}
/*
* Find a stack instance given the cred.
* This is used by the modules to potentially allow for a future when
* something other than the zoneid is used to determine the stack.
*/
{
/* Handle the case when cr_zone is NULL */
/* For performance ... */
return (netstack_get_current());
else
return (netstack_find_by_zoneid(zoneid));
}
/*
* Find a stack instance given the zoneid.
* Increases the reference count if found; caller must do a
* netstack_rele().
*
* If there is no exact match then assume the shared stack instance
* matches.
*
* Skip the unitialized ones.
*/
{
netstack_t *ns;
return (NULL);
else
return (ns);
}
/*
* Find a stack instance given the zoneid.
* Increases the reference count if found; caller must do a
* netstack_rele().
*
* If there is no exact match then assume the shared stack instance
* matches.
*
* Skip the unitialized ones.
*
* NOTE: The caller must hold zonehash_lock.
*/
{
netstack_t *ns;
return (NULL);
else
return (ns);
}
/*
* Find a stack instance given the stackid with exact match?
* Increases the reference count if found; caller must do a
* netstack_rele().
*
* Skip the unitialized ones.
*/
{
netstack_t *ns;
return (ns);
}
}
return (NULL);
}
void
{
netstack_t **nsp;
ns->netstack_refcnt--;
/*
* As we drop the lock additional netstack_rele()s can come in
* and decrement the refcnt to zero and free the netstack_t.
* Store pointers in local variables and if we were not the last
* then don't reference the netstack_t after that.
*/
/*
* Time to call the destroy functions and free up
* the structure
*/
/* Make sure nothing increased the references */
/* Finally remove from list of netstacks */
break;
}
}
/* Make sure nothing increased the references */
}
}
void
{
ns->netstack_refcnt++;
}
/*
* To support kstat_create_netstack() using kstat_zone_add we need
* to track both
* - all kstats that have been added for the shared stack
*/
kstat_t *
{
if (ks_netstackid == GLOBAL_NETSTACKID) {
return (ks);
} else {
}
}
void
{
if (ks_netstackid == GLOBAL_NETSTACKID) {
}
}
static void
{
struct shared_zone_list *sz;
struct shared_kstat_list *sk;
/* Insert in list */
/*
* Perform kstat_zone_add for each existing shared stack kstat.
* Note: Holds netstack_shared_lock lock across kstat_zone_add.
*/
}
}
static void
{
struct shared_kstat_list *sk;
/* Find in list */
break;
}
}
/* We must find it */
/*
* Perform kstat_zone_remove for each existing shared stack kstat.
* Note: Holds netstack_shared_lock lock across kstat_zone_remove.
*/
}
}
static void
{
struct shared_zone_list *sz;
struct shared_kstat_list *sk;
/* Insert in list */
/*
* Perform kstat_zone_add for each existing shared stack zone.
* Note: Holds netstack_shared_lock lock across kstat_zone_add.
*/
}
}
static void
{
struct shared_zone_list *sz;
/* Find in list */
break;
}
}
/* Must find it */
/*
* Perform kstat_zone_remove for each existing shared stack kstat.
* Note: Holds netstack_shared_lock lock across kstat_zone_remove.
*/
}
}
/*
* If a zoneid is part of the shared zone, return true
*/
static boolean_t
{
struct shared_zone_list *sz;
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Hide the fact that zoneids and netstackids are allocated from
* the same space in the current implementation.
*/
{
return (stackid);
}
{
return (GLOBAL_ZONEID);
else
return (zoneid);
}
/*
* Simplistic support for walking all the handles.
* Example usage:
* netstack_handle_t nh;
* netstack_t *ns;
*
* netstack_next_init(&nh);
* while ((ns = netstack_next(&nh)) != NULL) {
* do something;
* netstack_rele(ns);
* }
* netstack_next_fini(&nh);
*/
void
{
*handle = 0;
}
/* ARGSUSED */
void
{
}
{
netstack_t *ns;
int i, end;
/* Walk skipping *handle number of instances */
/* Look if there is a matching stack instance */
ns = netstack_head;
for (i = 0; i < end; i++) {
break;
}
/* skip those with that aren't really here */
break;
}
end++;
}
}
return (ns);
}