/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Starcat Management Network Driver
*
* ****** NOTICE **** This file also resides in the SSC gate as
* ****** NOTICE **** made here must be propogated there as well.
*
*/
#include <sys/ethernet.h>
#include <sys/byteorder.h>
#include <netinet/igmp_var.h>
#include <sys/autoconf.h>
#if defined(DEBUG)
static void man_print_msp(manstr_t *);
static void man_print_man(man_t *);
static void man_print_mdp(man_dest_t *);
static void man_print_dev(man_dev_t *);
static void man_print_mip(mi_path_t *);
static void man_print_mtp(mi_time_t *);
static void man_print_mpg(man_pg_t *);
static void man_print_path(man_path_t *);
static void man_print_work(man_work_t *);
/*
* Set manstr_t dlpistate (upper half of multiplexor)
*/
/*
* Set man_dest_t dlpistate (lower half of multiplexor)
*/
"not used", /* 0x00 */
"DL_PROMISC_PHYS", /* 0x01 */
"DL_PROMISC_SAP", /* 0x02 */
"DL_PROMISC_MULTI" /* 0x03 */
};
"DL_INFO_REQ", /* 0x00 */
"DL_BIND_REQ", /* 0x01 */
"DL_UNBIND_REQ", /* 0x02 */
"DL_INFO_ACK", /* 0x03 */
"DL_BIND_ACK", /* 0x04 */
"DL_ERROR_ACK", /* 0x05 */
"DL_OK_ACK", /* 0x06 */
"DL_UNITDATA_REQ", /* 0x07 */
"DL_UNITDATA_IND", /* 0x08 */
"DL_UDERROR_IND", /* 0x09 */
"DL_UDQOS_REQ", /* 0x0a */
"DL_ATTACH_REQ", /* 0x0b */
"DL_DETACH_REQ", /* 0x0c */
"DL_CONNECT_REQ", /* 0x0d */
"DL_CONNECT_IND", /* 0x0e */
"DL_CONNECT_RES", /* 0x0f */
"DL_CONNECT_CON", /* 0x10 */
"DL_TOKEN_REQ", /* 0x11 */
"DL_TOKEN_ACK", /* 0x12 */
"DL_DISCONNECT_REQ", /* 0x13 */
"DL_DISCONNECT_IND", /* 0x14 */
"DL_SUBS_UNBIND_REQ", /* 0x15 */
"DL_LIARLIARPANTSONFIRE", /* 0x16 */
"DL_RESET_REQ", /* 0x17 */
"DL_RESET_IND", /* 0x18 */
"DL_RESET_RES", /* 0x19 */
"DL_RESET_CON", /* 0x1a */
"DL_SUBS_BIND_REQ", /* 0x1b */
"DL_SUBS_BIND_ACK", /* 0x1c */
"DL_ENABMULTI_REQ", /* 0x1d */
"DL_DISABMULTI_REQ", /* 0x1e */
"DL_PROMISCON_REQ", /* 0x1f */
"DL_PROMISCOFF_REQ", /* 0x20 */
"DL_DATA_ACK_REQ", /* 0x21 */
"DL_DATA_ACK_IND", /* 0x22 */
"DL_DATA_ACK_STATUS_IND", /* 0x23 */
"DL_REPLY_REQ", /* 0x24 */
"DL_REPLY_IND", /* 0x25 */
"DL_REPLY_STATUS_IND", /* 0x26 */
"DL_REPLY_UPDATE_REQ", /* 0x27 */
"DL_REPLY_UPDATE_STATUS_IND", /* 0x28 */
"DL_XID_REQ", /* 0x29 */
"DL_XID_IND", /* 0x2a */
"DL_XID_RES", /* 0x2b */
"DL_XID_CON", /* 0x2c */
"DL_TEST_REQ", /* 0x2d */
"DL_TEST_IND", /* 0x2e */
"DL_TEST_RES", /* 0x2f */
"DL_TEST_CON", /* 0x30 */
"DL_PHYS_ADDR_REQ", /* 0x31 */
"DL_PHYS_ADDR_ACK", /* 0x32 */
"DL_SET_PHYS_ADDR_REQ", /* 0x33 */
"DL_GET_STATISTICS_REQ", /* 0x34 */
"DL_GET_STATISTICS_ACK", /* 0x35 */
};
"DL_UNBOUND", /* 0x00 */
"DL_BIND_PENDING", /* 0x01 */
"DL_UNBIND_PENDING", /* 0x02 */
"DL_IDLE", /* 0x03 */
"DL_UNATTACHED", /* 0x04 */
"DL_ATTACH_PENDING", /* 0x05 */
"DL_DETACH_PENDING", /* 0x06 */
"DL_UDQOS_PENDING", /* 0x07 */
"DL_OUTCON_PENDING", /* 0x08 */
"DL_INCON_PENDING", /* 0x09 */
"DL_CONN_RES_PENDING", /* 0x0a */
"DL_DATAXFER", /* 0x0b */
"DL_USER_RESET_PENDING", /* 0x0c */
"DL_PROV_RESET_PENDING", /* 0x0d */
"DL_RESET_RES_PENDING", /* 0x0e */
"DL_DISCON8_PENDING", /* 0x0f */
"DL_DISCON9_PENDING", /* 0x10 */
"DL_DISCON11_PENDING", /* 0x11 */
"DL_DISCON12_PENDING", /* 0x12 */
"DL_DISCON13_PENDING", /* 0x13 */
"DL_SUBS_BIND_PND", /* 0x14 */
"DL_SUBS_UNBIND_PND", /* 0x15 */
};
static const char *lss[] = {
"UNKNOWN", /* 0x0 */
"INIT", /* 0x1 */
"GOOD", /* 0x2 */
"STALE", /* 0x3 */
"FAIL", /* 0x4 */
};
static char *_mw_type[] = {
"OPEN_CTL", /* 0x0 */
"CLOSE_CTL", /* 0x1 */
"SWITCH", /* 0x2 */
"PATH_UPDATE", /* 0x3 */
"CLOSE", /* 0x4 */
"CLOSE_STREAM", /* 0x5 */
"DRATTACH", /* 0x6 */
"DRDETACH", /* 0x7 */
"STOP", /* 0x8 */
"DRSWITCH", /* 0x9 */
"KSTAT_UPDATE" /* 0xA */
};
#else /* DEBUG */
/*
* Set manstr_t dlpistate (upper half of multiplexor)
*/
/*
* Set man_dest_t dlpistate (lower half of multiplexor)
*/
#endif /* DEBUG */
/*
* Start of kstat-related declarations
*/
typedef struct man_kstat_info_s {
int mk_flags;
/*
* Master declaration macro, note that it uses token pasting
*/
/*
* Obsolete forms don't have the _sinceswitch forms, they are all errors
*/
/*
* The only non-counters don't have any other aliases
*/
/*
* Normal counter forms
*/
/*
* Error counters need special MK_ERROR flag only for the non-AP form
*/
/*
* These AP-specific stats are not backed by physical statistics
*/
/*
* START of the actual man_kstat_info declaration using above macros
*/
/*
*/
MK_NOTCOUNTER64("ifspeed"),
/*
* These are new MIB-II stats, per PSARC 1997/198
*/
/*
* Error values
*/
/*
* These are the 64-bit values, they fallback to 32-bit values
*/
/* New AP switching statistics */
MK_NOTPHYS64("man_switches"),
MK_NOTPHYS64("man_link_fails"),
MK_NOTPHYS64("man_link_stales"),
MK_NOTPHYS64("man_icmpv4_probes"),
MK_NOTPHYS64("man_icmpv6_probes"),
};
/*
* Miscellaneous ethernet stuff.
*
* MANs DL_INFO_ACK template.
*/
DL_INFO_ACK, /* dl_primitive */
ETHERMTU, /* dl_max_sdu */
0, /* dl_min_sdu */
MAN_ADDRL, /* dl_addr_length */
DL_ETHER, /* dl_mac_type */
0, /* dl_reserved */
0, /* dl_current_state */
-2, /* dl_sap_length */
DL_CLDLS, /* dl_service_mode */
0, /* dl_qos_length */
0, /* dl_qos_offset */
0, /* dl_range_length */
0, /* dl_range_offset */
DL_STYLE2, /* dl_provider_style */
sizeof (dl_info_ack_t), /* dl_addr_offset */
DL_VERSION_2, /* dl_version */
ETHERADDRL, /* dl_brdcst_addr_length */
0 /* dl_growth */
};
/*
* Ethernet broadcast address definition.
*/
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/*
* Set via MAN_SET_SC_IPADDRS ioctl.
*/
/*
* Set via MAN_SET_SC_IP6ADDRS ioctl.
*/
/*
* IP & ICMP constants
*/
#ifndef ETHERTYPE_IPV6
#endif
/*
* Function prototypes.
*
* Upper multiplexor functions.
*/
static int man_configure(queue_t *);
static int man_deconfigure(void);
static void man_set_optimized_dest(manstr_t *);
static void man_cancel_timers(man_adest_t *);
static void man_dl_clean(mblk_t **);
static int man_open_ctl();
static void man_close_ctl();
/*
*/
/*
* Lower multiplexor functions.
*/
static int man_dlioc_replay(man_dest_t *);
/*
* Link failover routines.
*/
static int man_gettimer(int, man_dest_t *);
static void man_linkcheck_timer(void *);
static int man_needs_linkcheck(man_dest_t *);
static int man_do_autoswitch(man_dest_t *);
/*
* Functions normally executing outside of the STREAMs perimeter.
*/
/*
* Functions supporting/processing work requests.
*/
static void man_bwork(void);
static void man_iwork(void); /* inside perimeter */
man_work_t *man_work_alloc(int, int);
void man_work_free(man_work_t *);
/*
* Functions implementing/supporting failover.
*
* Executed inside perimeter.
*/
static int man_do_dr_attach(man_work_t *);
static int man_do_dr_switch(man_work_t *);
static void man_do_dr_detach(man_work_t *);
static int man_iswitch(man_work_t *);
static void man_ifail_dest(man_dest_t *);
static void man_add_dests(man_pg_t *);
static void man_reset_dlpi(void *);
static mblk_t *man_alloc_ubreq_dreq();
/*
* Executed outside perimeter (us man_lock for synchronization).
*/
static void man_bclose(man_adest_t *);
static int man_plumb(man_dest_t *);
static void man_unplumb(man_dest_t *);
static void man_linkrec_insert(man_linkrec_t *);
static queue_t *man_linkrec_find(int);
/*
* Functions supporting pathgroups
*/
static int man_path_kstat_init(man_path_t *);
static void man_path_kstat_uninit(man_path_t *);
/*
* Functions supporting kstat reporting.
*/
static int man_kstat_update(kstat_t *, int);
static void man_do_kstats(man_work_t *);
static void man_update_path_kstats(man_t *);
static void man_kstat_named_init(kstat_named_t *, int);
/*
* Functions supporting ndd.
*/
static int man_param_register(param_t *, int);
cred_t *);
static char *man_inet_ntoa(in_addr_t);
static void man_param_cleanup(void);
/*
*/
extern int man_get_iosram(manc_t *);
extern int man_domain_configure(void);
extern int man_domain_deconfigure(void);
extern int man_dossc_switch(uint32_t);
extern int man_is_on_domain;
/*
* Driver Globals protected by inner perimeter.
*/
/*
* Driver globals protected by man_lock.
*/
/*
* These parameters are accessed via ndd to report the link configuration
* for the MAN driver. They can also be used to force configuration changes.
*/
/* ------------------------------------------------------------------------- */
/* min max value name */
{ 0, 0xFFFF, 0, "man_debug_level"},
};
/* DISPLAY */
MAN_NDD_SETABLE, /* man_debug_level */
};
/*
* STREAMs information.
*/
MAN_IDNUM, /* mi_idnum */
MAN_IDNAME, /* mi_idname */
MAN_MINPSZ, /* mi_minpsz */
MAN_MAXPSZ, /* mi_maxpsz */
MAN_HIWAT, /* mi_hiwat */
MAN_LOWAT /* mi_lowat */
};
/*
* Upper read queue does not do anything.
*/
NULL, /* qi_putp */
NULL, /* qi_srvp */
man_open, /* qi_qopen */
man_close, /* qi_qclose */
NULL, /* qi_qadmin */
&man_m_info, /* qi_minfo */
NULL /* qi_mstat */
};
man_lrput, /* qi_putp */
man_lrsrv, /* qi_srvp */
man_open, /* qi_qopen */
man_close, /* qi_qclose */
NULL, /* qi_qadmin */
&man_m_info, /* qi_minfo */
NULL /* qi_mstat */
};
man_uwput, /* qi_putp */
man_uwsrv, /* qi_srvp */
man_open, /* qi_qopen */
man_close, /* qi_qclose */
NULL, /* qi_qadmin */
&man_m_info, /* qi_minfo */
NULL /* qi_mstat */
};
NULL, /* qi_putp */
man_lwsrv, /* qi_srvp */
man_open, /* qi_qopen */
man_close, /* qi_qclose */
NULL, /* qi_qadmin */
&man_m_info, /* qi_minfo */
NULL /* qi_mstat */
};
&man_urinit, /* st_rdinit */
&man_uwinit, /* st_wrinit */
&man_lrinit, /* st_muxrinit */
&man_lwinit /* st_muxwrinit */
};
/*
* Module linkage information for the kernel.
*
* Locking Theory:
* D_MTPERMOD - Only an inner perimeter: All routines single
* threaded (except put, see below).
* D_MTPUTSHARED - Put routines enter inner perimeter shared (not
* exclusive) for concurrency/performance reasons.
*
* Anyone who needs exclusive outer perimeter permission (changing
* global data structures) does so via qwriter() calls. The
* background thread does all his work outside of perimeter and
* submits work via qtimeout() when data structures need to be
* modified.
*/
&mod_driverops, /* Module type. This one is a pseudo driver */
"MAN MetaDriver",
&man_ops, /* driver ops */
};
(void *) &modldrv,
};
/* Virtual Driver loader entry points */
int
_init(void)
{
if (status != 0) {
" error = %d", status);
return (status);
}
if (status != 0) {
" error = %d", status);
(void) mod_remove(&modlinkage);
return (status);
}
return (0);
}
/*
* _info is called by modinfo().
*/
int
{
int status;
return (status);
}
/*
* _fini called by modunload() just before driver is unloaded from memory.
*/
int
_fini(void)
{
int status = 0;
/*
* The only upper stream left should be man_ctl_lh. Note that
* man_close (upper stream) is synchronous (i.e. it waits for
* all STREAMS framework associated with the upper stream to be
* torn down). This guarantees that man_ctl_lh will never become
* NULL until noone is around to notice. This assumption is made
* in a few places like man_plumb, man_unplumb, etc.
*/
return (EBUSY);
/*
* Deconfigure the driver.
*/
status = man_deconfigure();
if (status)
goto exit;
/*
* need to detach every instance of the driver
*/
if (status != 0)
goto exit;
/*
* Free up locks.
*/
exit:
return (status);
}
/*
* Deconfigure the MAN driver.
*/
static int
{
int status = 0;
if (man_is_on_domain) {
if (status != 0)
goto exit;
}
man_param_cleanup(); /* Free up NDD resources */
/*
* I may have to handle straggling work requests. Just qwait?
* or cvwait? Called from _fini - TBD
*/
if (man_ctl_lh != NULL) {
}
}
if (man_bwork_id != NULL) {
}
}
exit:
return (status);
}
/*
* man_attach - allocate resources and attach an instance of the MAN driver
* The <man>.conf file controls how many instances of the MAN driver are
* available.
*
* dip - devinfo of node
* cmd - one of DDI_ATTACH | DDI_RESUME
*
* returns - success - DDI_SUCCESS
* - failure - DDI_FAILURE
*/
static int
{
int minor_node_created = 0;
int instance;
if (cmd != DDI_ATTACH) {
return (DDI_FAILURE);
}
if (man_get_our_etheraddr(&man_eaddr))
return (DDI_FAILURE);
/*
* we assume that instance is always equal to zero.
* and there will always only be one instance.
* this is done because when dman opens itself via DMAN_INT_PATH,
* the path assumes that the instance number is zero.
* if we ever need to support multiple instances of the dman
* driver or non-zero instances, this will have to change.
*/
/*
* Allocate per device info pointer and link in to global list of
* MAN devices.
*/
return (DDI_FAILURE);
}
/*
* Set ethernet address. Note that this address is duplicated
* at md_src_eaddr.
*/
/*
* Initialize failover-related fields (timers and such),
* taking values from properties if present.
*/
"init_time", MAN_INIT_TIME);
"linkcheck_time", MAN_LINKCHECK_TIME);
"man_linkstale_time", MAN_LINKSTALE_TIME);
"man_linkstale_retries", MAN_LINKSTALE_RETRIES);
"man_dr_delay", MAN_DR_DELAY);
"man_dr_retries", MAN_DR_RETRIES);
"man_kstat_waittime", MAN_KSTAT_WAITTIME);
"man_dlpireset_time", MAN_DLPIRESET_TIME);
minor_node_created = 1;
} else {
goto exit;
}
minor_node_created = 1;
} else {
goto exit;
}
/*
* Allocate meta kstat_t for this instance of the driver.
* Note that each of man_path_t keeps track of the kstats
* for the real devices via mp_last_knp.
*/
#ifdef kstat
#endif
(void *)manp);
goto exit;
}
ddi_get_instance(dip)));
return (DDI_SUCCESS);
exit:
if (minor_node_created)
ddi_get_instance(dip)));
return (DDI_FAILURE);
}
static int
{
int status = 0;
if (man_is_on_domain) {
return (status);
} else {
}
return (status);
}
/*
* man_detach - detach an instance of a driver
*
* dip - devinfo of node
* cmd - one of DDI_DETACH | DDI_SUSPEND
*
* returns - success - DDI_SUCCESS
* - failure - DDI_FAILURE
*/
static int
{
int instance;
if (cmd != DDI_DETACH) {
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
" for instance = %d, dip = 0x%p!\n", instance,
(void *)dip);
return (DDI_FAILURE);
}
if (manp->man_refcnt != 0) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* man_info:
* As a standard DLPI style-2, man_info() should always return
* DDI_FAILURE.
*
* However, man_open() has special treatment for a direct open
* via kstr_open() without going through the CLONE driver.
* To make this special kstr_open() work, we need to map
* minor of 0 to instance 0.
*/
/*ARGSUSED*/
static int
{
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
break;
case DDI_INFO_DEVT2INSTANCE:
if (minor == 0) {
return (DDI_SUCCESS);
}
break;
default:
break;
}
return (DDI_FAILURE);
}
/* Standard Device Driver entry points */
/*
* man_open - open the device
*
* rq - upper read queue of the stream
* devp - pointer to a device number
* flag - information passed from the user program open(2) system call
* sflag - stream flags
* credp - pointer to the cred(9S) user credential structure
*
* returns - success - 0
* - failure - errno value for failure
*/
/*ARGSUSED*/
static int
{
int status = 0;
/*
* reopen; q_ptr set to msp at open completion.
*/
return (0);
}
/*
* Allocate and initialize manstr_t for this device.
*/
/*
* Get the MAN driver configured on 1st open. Note that the only way
* we get sflag != CLONEOPEN is via the call in man_plumbctl(). All
* CLONEOPEN calls to man_open will be via the file system
*/
/*
* First open calls man_configure. Everyone qwaits until
* we get it open. See man_open_ctl() comments for mutex
* lock/synchronization info.
*/
if (man_config_state == MAN_UNCONFIGURED) {
if (status != 0)
goto exit;
} else {
while (man_config_state == MAN_CONFIGURING) {
if (status == 0) {
goto exit;
}
}
if (man_config_error) {
goto exit;
}
}
}
/*
* Determine minor device number. man_open serialized by
* D_MTPERMOD.
*/
minordev = 0;
break;
minordev++;
}
} else {
/*
* Should only get here from man_plumbctl().
*/
/*LINTED E_ASSIGN_UINT_TO_SIGNED_INT*/
/*
* No need to protect this here as all opens are
* qwaiting, and the bgthread (who is doing this open)
* is the only one who mucks with this variable.
*/
}
if (minordev == 0)
/*
* Link new entry into global list of active entries.
*/
/*
* Disable automatic enabling of our write service procedure.
* We control this explicitly.
*/
exit:
/*
* Clean up on error.
*/
if (status) {
} else
return (status);
}
/*
* Get the driver configured. Called from first man_open with exclusive
* inner perimeter.
*/
static int
{
int status = 0;
/*
* Initialize NDD parameters.
*/
if (!man_ndlist &&
goto exit;
}
/*
* Start up background thread.
*/
if (man_bwork_id == NULL)
/*
* Submit work to get control stream opened. Qwait until its
* done. See man_open_ctl for mutex lock/synchronization info.
*/
if (man_ctl_lh == NULL) {
/*
* Submit work and wait. When man_open_ctl exits
* man_open, it will cause qwait below to return.
*/
}
}
/*
* If on domain, setup IOSRAM and build the pathgroups
* automatically.
*/
if ((status == 0) && man_is_on_domain)
exit:
if (status != 0)
else
return (status);
}
/*
* man_close - close the device
*
* rq - upper read queue of the stream
*
* returns - success - 0
* - failure - errno value for failure
*/
static int
{
/*
* Unlink the per-Stream entry from the active list and free it.
*/
else {
return (ENODEV);
}
}
/*
* Still DL_ATTACHED
*/
}
/*
* Driver about to unload.
*/
man_ctl_wq = NULL;
}
return (0);
}
/*
* Ask bgthread to tear down lower stream and qwait
* until its done.
*/
static void
{
int i;
/*
* Excise lower dests array, set it closing and hand it to
* background thread to dispose of.
*/
for (i = 0; i < MAN_MAX_DESTS; i++) {
if (mdp[i].md_lc_timer_id != 0) {
mdp[i].md_lc_timer_id = 0;
}
}
}
mdp[i].md_dmp_count = 0;
}
/*
* Dump any DL type messages previously caught.
*/
/*
* We need to clear fast path flag when dlioc messages are cleaned.
*/
/*
* MAN_WORK_CLOSE_STREAM work request preallocated by caller.
*/
}
/*
* man_uwput - handle DLPI messages issued from upstream, the write
* side of the upper half of multiplexor. Called with shared access to
* the inner perimeter.
*
* wq - upper write queue of mxx
* mp - mblk ptr to DLPI request
*/
static int
{
" msp(0x%p)\n",
#if DEBUG
("man_uwput: M_IOCTL ioc_cmd(0x%x)\n",
("man_uwput: M_CTL ioc_cmd(0x%x)\n",
}
}
#endif /* DEBUG */
case M_DATA:
break;
}
} else {
}
break;
case M_PROTO:
case M_PCPROTO:
} else {
}
break;
case M_IOCTL:
case M_IOCDATA:
break;
case M_CTL:
break;
case M_FLUSH:
} else {
}
break;
default:
("man_uwput: illegal mblk(0x%p) type(0x%x)\n",
break;
} /* End switch */
return (0);
}
/*
* man_start - handle data messages issued from upstream. Send down
* to particular man_dest based on ether_addr, otherwise send out to all
* valid man_dests.
*
* wq - upper write queue of mxx
* mp - mblk ptr to DLPI request
* caller - Caller ID for decision making on canput failure
*
* Returns:
* 0 - Data xmitted or No flow control situation detected.
* 1 - Flow control situation detected.
*
* STREAMS Flow Control: can be used if there is only one destination
* for a stream (1 to 1 multiplexor). In this case, we will use the upper
* write queue to store mblks when in flow control. If there are multiple
* destinations, we cannot use the STREAMs based flow control (1 to many
* multiplexor). In this case, we will use the lower write queue to store
* mblks when in flow control. Since destinations come and go, we may
* transition between 1-to-1 and 1-to-m. So it may be the case that we have
* some mblks stored on the upper queue, and some on the lower queue. However,
* we will never send mblks out of order. See man_uwput and man_start_lower().
*
* A simple flow control mechanism is implemented for the deferred mblk list,
* as this list is expected to be used temporarily for a very short
* period required for switching paths. This flow control mechanism is
* used only as a defensive approach to avoid infinite growth of this list.
*/
static int
{
int i;
int status = 0;
return (0);
}
/*
* Optimization if only one valid destination.
*/
if (IS_UNICAST(eap)) {
/*
* TDB - This needs to be optimized (some bits in
* ehp->dhost will act as an index.
*/
for (i = 0; i < MAN_MAX_DESTS; i++) {
break;
}
} else {
/*
* 1 to 1 multiplexing, use upper wq for flow control.
*/
}
/*
* Its going somewhere specific
*/
} else {
}
} else {
/*
* Broadcast or multicast - send everone a copy.
*/
for (i = 0; i < MAN_MAX_DESTS; i++) {
continue;
} else {
" failed!"));
}
}
} else {
else
}
}
return (status);
}
/*
* Send a DL_UNITDATA or M_DATA fastpath data mblk to a particular
* destination. Others mblk types sent down via * man_dlpi_senddown().
*
* Returns:
* 0 - Data xmitted
* 1 - Data not xmitted due to flow control.
*/
static int
{
int status = 0;
/*
* Lower stream ready for data transmit.
*/
/*
* Check for flow control conditions for lower
* stream.
*/
} else {
/*
* A simple flow control mechanism.
*/
} else {
/*
* Add 'mp' to the deferred
* msg list.
*/
mdp->md_dmp_count +=
}
/*
* Inform flow control situation
* to the caller.
*/
status = 1;
goto exit;
}
/*
* If 1 to 1 mux, use upper write queue for
* flow control.
*/
/*
* putbq() message and indicate
* flow control situation to the
* caller.
*/
status = 1;
goto exit;
}
/*
* 1 to many mux, use lower write queue for
* flow control. Be mindful not to overflow
* the lower MAN STREAM q.
*/
} else {
" lower q flow controlled -"
" discarding packet"));
goto exit;
}
}
} else {
/*
* man_lwsrv is draining flow controlled mblks.
*/
if (canputnext(wq))
else
status = 1;
}
goto exit;
}
/*
* Lower stream in transition, do flow control.
*/
status = 1;
"man_start_lower: no dest for mdp(0x%p), caller(%d)!",
goto exit;
}
(void *)mdp));
goto exit;
}
/*
* Defer until PLUMBED and DL_IDLE. See man_lwsrv().
*/
/*
* Upper stream sending data down, add to defered mblk
* list for stream.
*/
} else {
} else {
}
}
}
goto exit;
}
exit:
return (status);
}
/*
* or pass thru to the physical driver below. Note that most M_IOCTLs we
* care about come down the control msp, but the IOC ones come down the IP.
* Called with exclusive inner perimeter.
*
* wq - upper write queue of mxx
* mp - mblk ptr to DLPI ioctl request
*/
static void
{
#ifdef DEBUG
{
("man_ioctl: wq(0x%p) mp(0x%p) cmd(%s)\n",
}
#endif /* DEBUG */
/*
* Handle the requests...
*/
case I_PLINK:
break;
case I_PUNLINK:
break;
case MAN_SETPATH:
break;
case MAN_GETEADDR:
break;
case MAN_SET_LINKCHECK_TIME:
break;
case MAN_SET_SC_IPADDRS:
break;
case MAN_SET_SC_IP6ADDRS:
break;
case DLIOCRAW:
else {
}
break;
case DL_IOC_HDR_INFO:
break;
case MAN_ND_GET:
case MAN_ND_SET:
break;
default:
break;
}
exit:
}
/*
* man_plink: handle I_PLINK requests on the control stream
*/
void
{
int status = 0;
/*
* Create a record to hold lower stream info. man_plumb will
* retrieve it after calling ldi_ioctl(I_PLINK)
*/
goto exit;
}
exit:
if (status)
else
}
/*
* man_unplink - handle I_PUNLINK requests on the control stream
*/
void
{
}
void
{
}
static queue_t *
{
if (man_linkrec_head == NULL)
goto exit;
} else {
break;
}
}
goto exit;
exit:
return (wq);
}
/*
* Set instance linkcheck timer value.
*/
static void
{
int error;
if (error != 0)
goto exit;
goto exit;
}
exit:
if (error)
else
}
/*
* Man path ioctl processing. Should only happen on the SSC. Called
* with exclusive inner perimeter.
*/
static void
{
int error;
if (error != 0)
goto exit;
exit:
if (error)
else
}
/*
* Get the local ethernet address of this machine.
*/
static void
{
int error;
if (error != 0) {
return;
}
}
/*
* Set my SC and other SC IPv4 addresses for use in man_pinger routine.
*/
static void
{
int error;
if (error != 0)
goto exit;
#ifdef DEBUG
{
(void *) &man_sc_ipaddrs.ip_other_sc_ipaddr,
(void *) &man_sc_ipaddrs.ip_my_sc_ipaddr,
}
#endif /* DEBUG */
exit:
if (error)
else
}
/*
* Set my SC and other SC IPv6 addresses for use in man_pinger routine.
*/
static void
{
int error;
if (error != 0)
goto exit;
#ifdef DEBUG
{
(void *) &man_sc_ip6addrs.ip6_other_sc_ipaddr,
(void *) &man_sc_ip6addrs.ip6_my_sc_ipaddr,
}
#endif /* DEBUG */
exit:
if (error)
else
}
/*
* M_DATA fastpath info request.
*/
static void
{
int status = 0;
goto exit;
}
if (status != 0)
goto exit;
/*
* Sanity check the DL_UNITDATA_REQ destination address
* offset and length values.
*/
goto exit;
}
/*
* Allocate a new mblk to hold the ether header.
*/
goto exit;
}
/* We only need one dl_ioc_hdr mblk for replay */
/* Forward the packet to all lower destinations. */
goto exit;
}
/*
* Fill in the ether header.
*/
/*
* Link new mblk in after the "request" mblks.
*/
exit:
status));
if (status) {
} else {
}
}
/*
* man_uwsrv - Upper write queue service routine to handle deferred
* DLPI messages issued from upstream, the write side of the upper half
* of multiplexor. It is also used by man_bwork to switch the lower
* multiplexor.
*
* wq - upper write queue of mxx
*/
static int
{
int status;
goto done;
/*
* Can probably remove this as I never put data messages
* here.
*/
case M_DATA:
if (manp) {
if (status) {
/*
* man_start() indicated flow control
* situation, stop processing now.
*/
goto break_loop;
}
} else
break;
case M_PROTO:
case M_PCPROTO:
if (status) {
/*
* man_proto() indicated flow control
* situation detected by man_start(),
* stop processing now.
*/
goto break_loop;
}
break;
default:
(void *)mp));
break;
}
}
/*
* Check to see if bgthread wants us to do something inside the
* perimeter.
*/
man_iwork();
}
done:
return (0);
}
/*
* man_proto - handle DLPI protocol requests issued from upstream.
* Called by man_uwsrv(). We disassociate upper and lower multiplexor
* DLPI state transitions. The upper stream here (manstr_t) transitions
* appropriately, saves the DLPI requests via man_dlpi(), and then
* arranges for the DLPI request to be sent down via man_dlpi_senddown() if
* appropriate.
*
* wq - upper write queue of mxx
* mp - mbl ptr to protocol request
*/
static int
{
int flow_status = 0;
("man_proto: mp(0x%p) prim(%s)\n", (void *)mp,
switch (dlp->dl_primitive) {
case DL_UNITDATA_REQ:
break;
case DL_ATTACH_REQ:
break;
case DL_DETACH_REQ:
break;
case DL_BIND_REQ:
break;
case DL_UNBIND_REQ:
break;
case DL_INFO_REQ:
break;
case DL_PROMISCON_REQ:
break;
case DL_PROMISCOFF_REQ:
break;
case DL_ENABMULTI_REQ:
break;
case DL_DISABMULTI_REQ:
break;
case DL_PHYS_ADDR_REQ:
break;
case DL_SET_PHYS_ADDR_REQ:
break;
default:
dlp->dl_primitive));
break;
} /* End switch */
return (flow_status);
}
static int
{
int flow_status = 0;
return (flow_status);
}
/*
* Validate destination address format.
*/
return (flow_status);
}
/*
* Error if no M_DATA follows.
*/
return (flow_status);
}
return (flow_status);
}
/*
* Handle DL_ATTACH_REQ.
*/
static void
{
short ppa;
int dlerror = 0;
int status = 0;
/*
* Attach us to MAN PPA (device instance).
*/
goto exit;
}
goto exit;
}
goto exit;
}
manp->man_refcnt++;
did_refcnt = TRUE;
/*
* Create a DL replay list for the lower stream. These wont
* actually be sent down until the lower streams are made active
* (sometime after the call to man_init_dests below).
*/
goto exit;
}
/*
* Make copy for dlpi resync of upper and lower streams.
*/
goto exit;
}
/* TBD - need to clean off ATTACH req on failure here. */
goto exit;
}
/*
* man_init_dests/man_start_dest needs these set before call.
*/
/*
* Allocate and init lower destination structures.
*/
/*
* If we cant get the lower streams ready, then
* remove the messages from the DL replay list and
* fail attach.
*/
}
goto exit;
}
exit:
if (dlerror == 0) {
} else {
if (did_refcnt) {
manp->man_refcnt--;
}
}
}
/*
* Called at DL_ATTACH time.
* Man_lock is held to protect pathgroup list(man_pg).
*/
static int
{
int i;
return (ENOMEM);
for (i = 0; i < MAN_MAX_DESTS; i++) {
}
return (0);
}
/*
* Get a destination ready for use.
*/
static void
{
/*
* Set up linktimers so that first time through, we will do
* a failover.
*/
/*
* As an optimization, if there is only one destination,
* remember the destination pointer. Used by man_start().
*/
}
static void
{
int count = 0;
int i;
for (i = 0; i < MAN_MAX_DESTS; i++) {
count++;
}
}
if (count == 1)
else
}
/*
* Catch dlpi message for replaying, and arrange to send it down
* to any destinations not PLUMBING. See man_dlpi_replay().
*/
static int
{
int status;
if (status == 0)
return (status);
}
/*
* Catch IOCTL type DL_ messages.
*/
static int
{
int status;
if (status == 0)
return (status);
}
/*
* We catch all DLPI messages that we have to resend to a new AP'ed
* device to put him in the right state. We link these messages together
* w/ their b_next fields and hang it off of msp->ms_dl_mp. We
*
* msp - pointer of stream struct to process
* mblk - pointer to DLPI request to catch
*/
static int
{
unsigned prim;
int status = 0;
goto exit;
}
else {
}
("man_dl_catch: adding %s\n",
exit:
return (status);
}
/*
* Send down a single DLPI M_[PC]PROTO to all currently valid dests.
*
* msp - ptr to NDM stream structure DL_ messages was received on.
* mp - ptr to mblk containing DL_ request.
*/
static int
{
int i;
int status = 0;
goto exit;
for (i = 0; i < MAN_MAX_DESTS; i++) {
} else {
}
}
if (no_dests)
goto exit;
/*
* Build replay and duplicate list for all possible destinations.
*/
for (i = 0; i < MAN_MAX_DESTS; i++) {
if (dstate[i]) {
break;
}
}
}
if (status == 0) {
for (i = 0; i < MAN_MAX_DESTS; i++)
if (dstate[i]) {
}
} else {
for (; i >= 0; i--)
}
exit:
return (status);
}
/*
* man_dlpi_replay - traverse the list of DLPI requests and reapply them to
* get the upper and lower streams into the same state. Called holding inner
* perimeter lock exclusive. Note thet we defer M_IOCTL type dlpi messages
* until we get an OK_ACK to our ATTACH (see man_lrsrv and
* man_dlioc_replay).
*
* mdp - pointer to lower queue (destination)
* rmp - list of mblks to send down stream.
*/
static void
{
while (rmp) {
("man_dlpi_replay: mdp(0x%p) sending %s\n",
(void *)mdp,
/*
* insert the lower devices ppa.
*/
}
}
}
static void
{
return;
}
return;
}
return;
}
}
static void
{
/*
* Toss everything.
*/
}
}
/*
* man_dl_release - Remove the corresponding DLPI request from the
* catch list. Walk thru the catch list looking for the other half of
* the pair and delete it. If we are detaching, delete the entire list.
*
* msp - pointer of stream struct to process
* mp - pointer to mblk to first half of pair. We will delete other
* half of pair based on this.
*/
static void
{
goto exit;
/*
* Currently we only clean DL_ PROTO type messages. There is
* no way to turn off M_CTL or DL_IOC stuff other than sending
* down a DL_DETACH, which resets everything.
*/
goto exit;
}
/*
* Selectively find a caught mblk that matches this one and
* remove it from the list
*/
if (matched) {
} else {
break;
}
if (matched) {
}
}
exit:
if (matched) {
}
}
/*
* Compare two DL_ messages. If they are complimentary (e.g. DL_UNBIND
* compliments DL_BIND), return true.
*/
static int
{
/*
* Primitive to clean off list.
*/
switch (prim1) {
case DL_UNBIND_REQ:
if (prim2 == DL_BIND_REQ)
break;
case DL_PROMISCOFF_REQ:
if (prim2 == DL_PROMISCON_REQ) {
}
break;
case DL_DISABMULTI_REQ:
if (prim2 == DL_ENABMULTI_REQ) {
matched = 1;
}
break;
default:
break;
}
return (matched);
}
/*
* Bind upper stream to a particular SAP. Called with exclusive innerperim
* QPAIR, shared outerperim.
*/
static void
{
return;
}
return;
}
if (xidtest) {
return;
}
if (sap > ETHERTYPE_MAX) {
return;
}
return;
}
}
static void
{
return;
}
return;
}
return;
}
}
static void
{
return;
}
/* Exchange current msg for a DL_INFO_ACK. */
return;
}
/* Fill in the DL_INFO_ACK fields and reply. */
*dlip = man_infoack;
/*
* If attached, return physical address.
*/
} else {
}
}
static void
{
int flag;
return;
}
case DL_PROMISC_PHYS:
break;
case DL_PROMISC_SAP:
break;
case DL_PROMISC_MULTI:
break;
default:
return;
}
/*
* Catch request for replay, and forward down to any lower
* lower stream.
*/
return;
}
}
static void
{
int flag;
return;
}
case DL_PROMISC_PHYS:
break;
case DL_PROMISC_SAP:
break;
case DL_PROMISC_MULTI:
break;
default:
return;
}
return;
}
return;
}
}
/*
* Enable multicast requests. We might need to track addresses instead of
* just passing things through (see eri_dmreq) - TBD.
*/
static void
{
return;
}
return;
}
if ((len != ETHERADDRL) ||
return;
}
/*
* Catch request for replay, and forward down to any lower
* lower stream.
*/
return;
}
}
static void
{
return;
}
return;
}
if ((len != ETHERADDRL) ||
return;
}
return;
}
}
static void
{
return;
}
return;
}
switch (type) {
case DL_FACT_PHYS_ADDR:
break;
case DL_CURR_PHYS_ADDR:
break;
default:
return;
}
}
/*
* TBD - this routine probably should be protected w/ an ndd
* tuneable, or a man.conf parameter.
*/
static void
{
return;
}
return;
}
/*
* Error if length of address isn't right or the address
* specified is a multicast or broadcast address.
*/
if ((len != ETHERADDRL) ||
return;
}
/*
* Error if this stream is not attached to a device.
*/
return;
}
/*
* We will also resend DL_SET_PHYS_ADDR_REQ for each dest
* when it is linked under us.
*/
return;
}
}
/*
* These routines make up the lower part of the MAN streams framework.
*/
/*
* man_lwsrv - Deferred mblks for down stream. We end up here when
* the destination is not DL_IDLE when traffic comes downstream.
*
* wq - lower write queue of mxx
*/
static int
{
goto exit;
goto exit;
}
/*
* Arrange to send deferred mp's first, then mblks on the
* service queue. Since we are exclusive in the inner perimeter,
* we dont have to worry about md_lock, like the put procedures,
* which are MTPUTSHARED.
*/
mdp->md_dmp_count = 0;
goto exit;
}
}
/*
* Put it back on queue, making sure to avoid
* infinite loop mentioned in putbq(9F)
*/
break;
}
}
exit:
return (0);
}
/*
* man_lrput - handle DLPI messages issued from downstream.
*
* rq - lower read queue of mxx
* mp - mblk ptr to DLPI request
*
* returns 0
*/
static int
{
#if defined(DEBUG)
char *prim_str;
#endif /* DEBUG */
#if defined(DEBUG)
}
" db_type(0x%x) dl_prim %s", (void *)rq,
#endif /* DEBUG */
/* Turn around */
} else
return (0);
}
return (0);
}
/*
* If we have a destination in the right state, forward on datagrams.
*/
if (MAN_IS_DATA(mp)) {
/*
* go put mblk_t directly up to next queue.
*/
} else {
}
} else {
/*
* Handle in man_lrsrv with exclusive inner perimeter lock.
*/
}
return (0);
}
/*
* Either this is a response from our attempt to sync the upper and lower
* stream states, or its data. If its not data. Do DL_* response processing
* and transition md_dlpistate accordingly. If its data, toss it.
*/
static int
{
#if defined(DEBUG)
#endif /* DEBUG */
goto exit;
}
/*
* If we're not connected, or its a datagram, toss it.
*/
continue;
}
/*
* Should be response to man_dlpi_replay. Discard unless there
* is a failure we care about.
*/
case M_PROTO:
case M_PCPROTO:
/* Do proto processing below. */
break;
case M_IOCNAK:
/*
* DL_IOC* failed for some reason.
*/
#if defined(DEBUG)
#endif /* DEBUG */
/* FALLTHRU */
case M_IOCACK:
case M_CTL:
/*
* OK response from DL_IOC*, ignore.
*/
goto dl_reset;
}
/*
* DLPI state processing big theory: We do not rigorously check
* DLPI states (e.g. PENDING stuff). Simple rules:
*
* 1) If we see an OK_ACK to an ATTACH_REQ, dlpistate = DL_UNBOUND.
* 2) If we see an BIND_ACK to a BIND_REQ, dlpistate = DL_IDLE.
* 3) If we see a OK_ACK response to an UNBIND_REQ
* dlpistate = DL_UNBOUND.
* 4) If we see a OK_ACK response to a DETACH_REQ,
* dlpistate = DL_UNATTACHED.
*
* Everything that isn't handle by 1-4 above is handled by 5)
*
* 5) A NAK to any DL_* messages we care about causes
* dlpistate = DL_UNATTACHED and man_reset_dlpi to run
*
* TBD - need a reset counter so we can try a switch if it gets
* too high.
*/
switch (prim) {
case DL_OK_ACK:
switch (cprim) {
case DL_ATTACH_REQ:
if (man_dlioc_replay(mdp)) {
} else {
break;
}
break;
case DL_DETACH_REQ:
break;
case DL_UNBIND_REQ:
/*
* Cancel timer and set md_dlpistate.
*/
if (mdp->md_lc_timer_id != 0) {
(void) quntimeout(man_ctl_wq,
mdp->md_lc_timer_id = 0;
}
}
break;
case DL_BIND_ACK:
/*
* We're ready for data. Get man_lwsrv to run to
* process any defered data and start linkcheck timer.
*/
if (man_needs_linkcheck(mdp)) {
man_linkcheck_timer, (void *)mdp,
}
break;
case DL_ERROR_ACK:
switch (cprim) {
case DL_ATTACH_REQ:
case DL_BIND_REQ:
case DL_DISABMULTI_REQ:
case DL_ENABMULTI_REQ:
case DL_PROMISCON_REQ:
case DL_PROMISCOFF_REQ:
case DL_SET_PHYS_ADDR_REQ:
break;
/*
* ignore error TBD (better comment)
*/
case DL_UNBIND_REQ:
case DL_DETACH_REQ:
break;
}
("\tdl_errno %d dl_unix_errno %d cprim %s",
break;
case DL_UDERROR_IND:
("\tdl_errno %d unix_errno %d",
break;
case DL_INFO_ACK:
break;
default:
/*
* We should not get here.
*/
prim);
break;
}
if (need_dl_reset) {
}
mdp->md_dlpierrors++;
if (mdp->md_lc_timer_id != 0) {
mdp->md_lc_timer_id = 0;
}
(man_is_on_domain ||
/*
* Autoswitching is disabled for instance 0
* on the SC as we expect the domain to
* initiate the path switching.
*/
} else {
man_reset_dlpi, (void *)mdp,
}
}
} /* End while (getq()) */
exit:
return (0);
}
static int
{
/*
* Not ready for linkcheck.
*/
return (0);
/*
* Linkchecking needs to be done on IP streams. For domain, all
* driver instances need checking, for SC only instance 1 needs it.
*/
return (1);
/*
* Linkcheck not need on this link.
*/
return (0);
}
/*
* The following routines process work requests posted to man_iwork_q
* from the non-STREAMS half of the driver (see man_bwork.c). The work
* requires access to the inner perimeter lock of the driver. This
* lock is acquired by man_uwsrv, who calls man_iwork to process the
* man_iwork_q->
*/
/*
* The man_bwork has posted some work for us to do inside the
* perimeter. This mainly involves updating lower multiplexor data
* structures (non-blocking type stuff). So, we can hold the man_lock
* until we are done processing all work items. Note that some of these
* routines in turn submit work back to the bgthread, which they can do
* since we hold the man_lock.
*/
static void
{
int wp_finished;
(void *)man_iwork_q->q_work));
while (man_iwork_q->q_work) {
wp_finished = TRUE;
case MAN_WORK_DRATTACH:
(void) man_do_dr_attach(wp);
break;
case MAN_WORK_DRSWITCH:
/*
* Return status to man_dr_detach immediately. If
* no error submitting SWITCH request, man_iswitch
* or man_bclose will cv_signal man_dr_detach on
* completion of SWITCH work request.
*/
if (man_do_dr_switch(wp) == 0)
wp_finished = FALSE;
break;
case MAN_WORK_DRDETACH:
break;
case MAN_WORK_SWITCH:
if (man_iswitch(wp))
wp_finished = FALSE;
break;
case MAN_WORK_KSTAT_UPDATE:
break;
default:
break;
}
/*
* If we've completed the work request, delete, or
* cv_signal waiter.
*/
if (wp_finished) {
else
}
}
}
/*
* man_dr_detach has submitted a request to DRSWITCH a path.
* He is in cv_wait_sig(wp->mw_cv). We forward the work request on to
* man_bwork as a switch request. It should end up back at
* man_iwork, who will cv_signal(wp->mw_cv) man_dr_detach.
*
* Called holding inner perimeter lock.
* man_lock is held to synchronize access to pathgroup list(man_pg).
*/
static int
{
int status = 0;
goto exit;
}
goto exit;
}
goto exit;
}
/*
* Check to see if detaching device is active. If so, activate
* an alternate.
*/
goto exit;
}
mpath.mip_man_ppa = 0;
/*
* DR thread is sleeping on wp->mw_cv. We change the work
* request from DRSWITCH to SWITCH and submit it to
* for processing by man_bwork (via man_pg_cmd). At
* completion the SWITCH work request is processed by
* man_iswitch() or man_bclose and the DR thread will
* be cv_signal'd.
*/
goto exit;
} else {
/*
* Tell man_dr_detach that detaching device is not currently
* in use.
*/
}
exit:
if (status) {
/*
* ENODEV is a noop, not really an error.
*/
}
return (status);
}
/*
* man_dr_attach has submitted a request to DRATTACH a path,
* add that path to the path list.
*
* Called holding perimeter lock.
*/
static int
{
int status = 0;
goto exit;
}
goto exit;
}
/*
* Extract SC ethernet address from IOSRAM.
*/
/*
* man_dr_attach passes the new device info in a_sf_dev.
*/
exit:
return (status);
}
/*
* man_dr_detach has submitted a request to DRDETACH a path.
* He is in cv_wait_sig(wp->mw_cv). We remove the path and
* cv_signal(wp->mw_cv) man_dr_detach.
*
* Called holding perimeter lock.
*/
static void
{
int i;
int found;
int status = 0;
goto exit;
}
goto exit;
}
goto exit;
}
/*
* We should have switched detaching path if it was active.
*/
goto exit;
}
/*
* Submit an ASSIGN command, minus the detaching device.
*/
goto exit;
}
mpath.mip_man_ppa = 0;
i = 0;
i++;
} else {
}
}
if (found) {
/*
* Need to include SCs ethernet address in command.
*/
}
/*
* Hand back status to man_dr_detach request.
*/
exit:
}
/*
* The background thread has configured new lower multiplexor streams for
* the given destinations. Update the appropriate destination data structures
* inside the inner perimeter. We must take care to deal with destinations
* whose upper stream has closed or detached from lower streams.
*
* Returns
* 0 Done with work request.
* 1 Reused work request.
*/
static int
{
int i;
}
/*
* Update destination structures as appropriate.
*/
/*
* Check to see if lower stream we just switch is still
* around.
*/
continue;
/*
* Switch failed for some reason. Clear
* PLUMBING flag and retry switch again later.
*/
continue;
}
/*
* Swap new info, for old. We return the old info to
* man_bwork to close things up below.
*/
/*
* save the wq from the destination passed(tdp).
*/
mdp->md_switch_id = 0;
mdp->md_switches++;
mdp->md_dlpierrors = 0;
/*
* Resync lower w/ upper dlpi state. This will start link
*/
man_reset_dlpi((void *)mdp);
}
if (switch_ok) {
}
} else {
/*
* Never got switch-to destinations open, free them.
*/
}
/*
* Clear pathgroup switching flag and update path flags.
*/
/*
* Switch succeeded, mark path we switched from as failed, and
* device we switch to as active and clear its failed flag (if set).
* Sync up kstats.
*/
if (switch_ok) {
} else
}
/*
* Decrement manp reference count and hand back work request if
* needed.
*/
manp->man_refcnt--;
if (switch_ok) {
}
return (switch_ok);
}
/*
* Find the destination in the upper stream that we just switched.
*/
{
/*
* Check if upper stream closed, or detached.
*/
continue;
break;
/*
* Upper stream detached and reattached while we were
* switching.
*/
break;
}
}
return (mdp);
}
/*
* bg_thread cant complete the switch for some reason. (Re)start the
* linkcheck timer again.
*/
static void
{
/*
* If we have not yet initialized link, or the upper stream is
* DL_IDLE, restart the linktimer.
*/
}
}
/*
* Arrange to replay all of ms_dl_mp on the new lower stream to get it
* in sync with the upper stream. Note that this includes setting the
* physical address.
*
* Called from qtimeout with inner perimeter lock.
*/
static void
{
mdp->md_lc_timer_id = 0;
return;
}
goto fail;
/*
* Send down an unbind and detach request, just to clean things
* out, we ignore ERROR_ACKs for unbind and detach in man_lrsrv.
*/
tmp = man_alloc_ubreq_dreq();
goto fail;
}
return;
fail:
while (rmp) {
}
/*
* If low on memory, try again later. I Could use qbufcall, but that
* could fail and I would have to try and recover from that w/
* qtimeout anyway.
*/
}
/*
* Once we receive acknowledgement that DL_ATTACH_REQ was successful,
* we can send down the DL_* related IOCTLs (e.g. DL_IOC_HDR). If we
* try and send them downsteam w/o waiting, the ioctl's get processed before
* the ATTACH_REQ and they are rejected. TBD - could just do the lower
* dlpi state change in lock step. TBD
*/
static int
{
goto exit;
status = 0;
goto exit;
}
exit:
return (status);
}
static mblk_t *
{
goto exit;
goto exit;
}
exit:
return (ubreq);
}
static mblk_t *
{
goto nomem;
} else {
}
}
return (listp);
while (listp) {
}
return (NULL);
}
static mblk_t *
{
goto exit;
exit:
ether_sprintf(eap)));
return (mp);
}
/*
* A new path in a pathgroup has become active for the first time. Setup
* the lower destinations in prepartion for man_pg_activate to call
* man_autoswitch.
*/
static void
{
continue;
/*
* TBD - Take out
* ASSERT(mdp->md_device.mdev_state == MDEV_UNASSIGNED);
* ASSERT(mdp->md_state == MAN_DSTATE_NOTPRESENT);
*/
}
}
}
static int
{
int close_cnt = 0;
int status = 0;
goto exit;
}
/*
* Count up number of destinations we need to close.
*/
continue;
close_cnt++;
}
if (close_cnt == 0)
goto exit;
goto exit;
}
continue;
/*
* Clean up optimized destination pointer if we are
* closing it.
*/
if (mdp->md_lc_timer_id != 0) {
mdp->md_lc_timer_id = 0;
}
}
}
mdp->md_dmp_count = 0;
}
exit:
return (status);
}
/*
* Returns TRUE if stream uses pathgroup, FALSE otherwise.
*/
static int
{
int status;
return (!status);
}
static int
{
int time = 0;
switch (timer) {
case MAN_TIMER_INIT:
if (attached)
else
break;
case MAN_TIMER_LINKCHECK:
if (attached) {
else
} else
break;
case MAN_TIMER_DLPIRESET:
if (attached)
else
break;
default:
break;
}
return (drv_usectohz(time));
}
/*
* Check the links for each active destination. Called inside inner
* perimeter via qtimeout. This timer only runs on the domain side of the
* driver. It should never run on the SC side.
*
* MAN_LINKCHECK_TIME seconds. If the link goes MAN_LINKSTALE, the we probe
* the link every MAN_LINKSTALE_TIME seconds, and fail the link after probing
* the link MAN_LINKSTALE_RETRIES times.
* The man_lock is held to synchronize access pathgroup list(man_pg).
*/
void
{
int newstate;
int oldstate;
/*
* Clear timeout id and check if someones waiting on us to
* complete a close.
*/
mdp->md_lc_timer_id = 0;
goto exit;
}
/*
* If the lower stream needs initializing, just go straight to
* switch code. As the linkcheck timer is started for all
* SAPs, do not send ping packets during the initialization.
*/
goto do_switch;
}
if (!man_needs_linkcheck(mdp)) {
"man_linkcheck_timer: unneeded linkcheck on mdp(0x%p)",
(void *)mdp);
return;
}
/*
* The above call to man_needs_linkcheck() validates
* mdp->md_msp and mdp->md_msp->ms_manp pointers.
*/
/*
* This is the most common case, when traffic is flowing.
*/
/*
* Clear the FAILED flag and update lru.
*/
manp->man_meta_ppa);
}
goto done;
}
/*
* If we're here, it means we have not seen any traffic
*/
switch (oldstate) {
case MAN_LINKINIT:
case MAN_LINKGOOD:
mdp->md_linkstales++;
break;
case MAN_LINKSTALE:
case MAN_LINKFAIL:
mdp->md_linkstales++;
if (mdp->md_linkstale_retries < 0) {
mdp->md_linkfails++;
/*
* Mark the destination as FAILED and
* update lru.
*/
if (oldstate != MAN_LINKFAIL) {
}
}
break;
default:
" state %d", oldstate);
break;
}
done:
}
/*
* Do any work required from state transitions above.
*/
if (newstate == MAN_LINKFAIL) {
if (!man_do_autoswitch(mdp)) {
/*
* Stop linkcheck timer until switch completes.
*/
}
}
if (send_ping)
if (restart_timer)
exit:
}
/*
* Handle linkcheck initiated autoswitching.
* Called with man_lock held.
*/
static int
{
int status = 0;
/*
* Set flags and refcnt. Cleared in man_iswitch when SWITCH completes.
*/
return (EBUSY);
/*
* We're initializing, ask for a switch to our currently
* active device.
*/
} else {
manp->man_meta_ppa);
}
goto exit;
}
}
exit:
if (status != 0) {
/*
* man_iswitch not going to run, clean up.
*/
}
return (status);
}
/*
* Gather up all lower multiplexor streams that have this link open and
* try to switch them. Called from inner perimeter and holding man_lock.
*
* pg_id - Pathgroup to do switch for.
* st_devp - New device to switch to.
* wait_for_switch - whether or not to qwait for completion.
*/
static int
{
int sdp_cnt = 0;
int status = 0;
goto exit;
}
} else {
}
/*
* Set dests as PLUMBING, cancel timers and return array of dests
* that need a switch.
*/
if (status) {
goto exit;
}
/*
* If no streams are active, there are no streams to switch.
* Return ENODEV (see man_pg_activate).
*/
if (sdp_cnt == 0) {
goto exit;
}
/*
* Ask the bgthread to switch. See man_bwork.
*/
exit:
return (status);
}
/*
* If an alternate path exists for pathgroup, arrange for switch to
* happen. Note that we need to switch each of msp->dests[pg_id], for
* all on man_strup. We must:
*
* Cancel any timers
* Mark dests as PLUMBING
* Submit switch request to man_bwork_q->
*/
static int
{
int sdp_cnt = 0;
int status = 0;
/*
* Count up number of streams, there is one destination that needs
* switching per stream.
*/
sdp_cnt++;
}
if (sdp_cnt == 0)
goto exit;
goto exit;
}
/*
* Mark each destination as unusable.
*/
/*
* Mark destination as plumbing and store the
* address of sdp as a way to identify the
* SWITCH request when it comes back (see man_iswitch).
*/
/*
* Copy destination info.
*/
tdp++;
/*
* Cancel timers.
*/
if (mdp->md_lc_timer_id) {
(void) quntimeout(man_ctl_wq,
mdp->md_lc_timer_id = 0;
}
}
}
}
status = 0;
exit:
return (status);
}
/*
* The code below generates an ICMP echo packet and sends it to the
* broadcast address in the hopes that the other end will respond
* and the man_linkcheck_timer logic will see the traffic.
*
* This assumes ethernet-like media.
*/
/*
* Generate an ICMP packet. Called exclusive inner perimeter.
*
* mdp - destination to send packet to.
* sap - either ETHERTYPE_ARP or ETHERTYPE_IPV6
*/
static void
{
/* TBD - merge pinger and this routine. */
if (sap == ETHERTYPE_IPV6) {
mdp->md_icmpv6probes++;
} else {
mdp->md_icmpv4probes++;
}
/*
* Send the ICMP message
*/
return;
/*
* Send it out.
*/
}
}
static mblk_t *
{
int ipver;
int iph_hdr_len;
uchar_t i;
if (sap == ETHERTYPE_IPV6) {
iph_hdr_len = sizeof (ip6_t);
size = ICMP6_MINLEN;
} else {
iph_hdr_len = sizeof (ipha_t);
size = ICMPH_SIZE;
}
goto exit;
/*
* fill out the ICMP echo packet headers
*/
if (ipver == IPV4_VERSION) {
ipha->ipha_type_of_service = 0;
if (man_is_on_domain) {
if (man_get_iosram(&manc)) {
goto exit;
}
/*
* Domain generates ping packets for domain to
* SC network (dman0 <--> scman0).
*/
} else {
/*
* Note that ping packets are only generated
* by the SC across scman1 (SC to SC network).
*/
}
ipha->ipha_ident = 0;
ipha->ipha_hdr_checksum = 0;
} else {
/*
* IP version = 6, priority = 0, flow = 0
*/
if (man_is_on_domain) {
if (man_get_iosram(&manc)) {
goto exit;
}
/*
* Domain generates ping packets for domain to
* SC network (dman0 <--> scman0).
*/
} else {
/*
* Note that ping packets are only generated
* by the SC across scman1 (SC to SC network).
*/
}
}
/*
* IPv6 and IP are the same for ICMP as far as I'm concerned.
*/
if (ipver == IPV4_VERSION) {
icmph->icmph_code = 0;
} else {
icmph->icmph_code = 0;
}
for (i = 0; i < datalen; i++)
*datap++ = i;
if (ipver == IPV4_VERSION) {
} else {
}
/*
* TBD
* icp->icmp_time = ???;
*/
exit:
return (mp);
}
static mblk_t *
{
return (NULL);
}
return (NULL);
}
/*
* phys addr first - TBD
*/
return (mp);
}
/*
* The routines in this file are executed by the MAN background thread,
* which executes outside of the STREAMS framework (see man_str.c). It is
* allowed to do the things required to modify the STREAMS driver (things
* that are normally done from a user process). These routines do things like
* etc.
*
* The mechanism of communication between the STREAMS portion of the driver
* and the background thread portion are two work queues, man_bwork_q
* and man_iwork_q (background work q and streams work q). Work
* requests are placed on those queues when one half of the driver wants
* the other half to do some work for it.
*
* The MAN background thread executes the man_bwork routine. Its sole
* job is to process work requests placed on this work q. The MAN upper
* write service routine is responsible for processing work requests posted
* to the man_iwork_q->
*
* Both work queues are protected by the global mutex man_lock. The
* man_bwork is signalged via the condvarman_bwork_q->q_cv. The man_uwsrv
* routine is signaled by calling qenable (forcing man_uwsrv to run).
*/
/*
* man_bwork - Work thread for this device. It is responsible for
* performing operations which can't occur within the STREAMS framework.
*
* Locking:
* - Called holding no locks
* - Obtains the global mutex man_lock to remove work from
* man_bwork_q, and post work to man_iwork_q->
* - Note that we do not want to hold any locks when making
* any ldi_ calls.
*/
void
{
int done = 0;
int wp_finished;
"mn_work_thrd");
while (done == 0) {
/*
* While there is nothing to do, sit in cv_wait. If work
* request is made, requester will signal.
*/
}
wp_finished = TRUE;
case MAN_WORK_OPEN_CTL:
break;
case MAN_WORK_CLOSE_CTL:
break;
case MAN_WORK_CLOSE:
case MAN_WORK_CLOSE_STREAM:
break;
case MAN_WORK_SWITCH:
wp_finished = FALSE;
break;
case MAN_WORK_STOP: /* man_bwork_stop() */
done = 1;
break;
default:
break;
}
if (wp_finished) {
else
}
}
man_bwork_id = NULL;
thread_exit();
}
/*
* man_open_ctl - Open the control stream.
*
* returns - success - 0
* - failure - errno code
*
* Mutex Locking Notes:
* We need a way to keep the CLONE_OPEN qwaiters in man_open from
* checking the man_config variables after the ldi_open call below
* returns from man_open, leaving the inner perimeter. So, we use the
* man_lock to synchronize the threads in man_open_ctl and man_open. We
* hold man_lock across this call into man_open, which in general is a
* no-no. But, the STREAMs portion of the driver (other than open)
* doesn't use it. So, if ldi_open gets hijacked to run any part of
* the MAN streams driver, it wont end up recursively trying to acquire
* man_lock. Note that the non-CLONE_OPEN portion of man_open doesnt
* acquire it either, so again no recursive mutex.
*/
static int
{
int status = 0;
/*
* Get eri driver loaded and kstats initialized. Is there a better
* way to do this? - TBD.
*/
if (status) {
"man_open_ctl: ident alloc failed, error %d", status);
goto exit;
}
if (status) {
"man_open_ctl: eri open failed, error %d", status);
goto exit;
}
if (man_ctl_lh != NULL) {
goto exit;
}
if (status) {
"man_open_ctl: man control dev open failed, "
"error %d", status);
goto exit;
}
/*
* Update global config state. TBD - dont need lock here, since
* everyone is stuck in open until we finish. Only other modifier
* is man_deconfigure via _fini, which returns EBUSY if there is
* any open streams (other than control). Do need to signal qwaiters
* on error.
*/
man_ctl_lh = ctl_lh;
exit:
if (li)
(void *)man_ctl_lh, status));
return (status);
}
/*
* man_close_ctl - Close control stream, we are about to unload driver.
*
* Locking:
* - Called holding no locks.
*/
static void
{
man_ctl_lh = NULL;
}
}
/*
* Close the lower streams. Get all the timers canceled, close the lower
* stream and delete the dest array.
*
* Returns:
* 0 Closed all streams.
* 1 Couldn't close one or more streams, timers still running.
*
* Locking:
* - Called holding no locks.
*/
static void
{
int i;
}
}
/*
* We want to close down all lower streams. Need to wait until all
* timers and work related to these lower streams is quiesced.
*
* Returns 1 if lower streams are quiesced, 0 if we need to wait
* a bit longer.
*/
static void
{
int cnt;
int i;
for (i = 0; i < cnt; i++) {
if (mdp[i].md_lc_timer_id != 0) {
mdp[i].md_lc_timer_id = 0;
}
}
}
}
/*
* A failover is started at start of day, when the driver detects a
* link failure (see man_linkcheck_timer), or when DR detaches
* the IO board containing the current active link between SC and
* domain (see man_dr_detach, man_iwork, and man_do_dr_detach). A
* MAN_WORK_SWITCH work request containing all the lower streams that
* should be switched is posted on the man_bwork_q-> This work request is
* processed here. Once all lower streams have been switched to an
* alternate path, the MAN_WORK_SWITCH work request is passed back to
* man_iwork_q where it is processed within the inner perimeter of the
* STREAMS framework (see man_iswitch).
*
* Note that when the switch fails for whatever reason, we just hand
* back the lower streams untouched and let another failover happen.
* Hopefully we will sooner or later succeed at the failover.
*/
static void
{
int i;
int status = 0;
/*
* Make a temporary copy of dest array, updating device to the
* alternate and try to open all lower streams. bgthread can sleep.
*/
KM_SLEEP);
/*
* Before we switch to the new path, lets sync the kstats.
*/
} else
if (status != 0)
goto exit;
break;
}
/*
* Didn't plumb everyone, unplumb new lower stuff and return.
*/
int j;
for (j = 0; j <= i; j++)
man_unplumb(&tdp[j]);
goto exit;
}
/*
* If we cant set new path on the SSC, then fail the
* failover.
*/
man_unplumb(&tdp[i]);
goto exit;
}
exit:
if (status)
/*
* Hand processed switch request back to man_iwork for
* processing in man_iswitch.
*/
}
/*
* man_plumb - Configure a lower stream for this destination.
*
* Locking:
* - Called holding no locks.
*
* Returns:
* - success - 0
* - failure - error code of failure
*/
static int
{
int status;
int muxid;
/*
* Control stream should already be open.
*/
if (man_ctl_lh == NULL) {
goto exit;
}
if (status != 0) {
"man_plumb: ident alloc failed, error %d", status);
goto exit;
}
/*
* previously opens were done by a dev_t of makedev(clone_major,
*/
if (status) {
"man_plumb: eri open failed, error %d", status);
goto exit;
}
/*
* Link netdev under MAN.
*/
if (status) {
"man_plumb: ldi_ioctl(I_PLINK) failed, error %d", status);
goto exit;
}
/*
* If we can't find the linkrec then return an
* error. It will be automatically unplumbed on failure.
*/
exit:
if (li)
return (status);
}
/*
* man_unplumb - tear down the STREAMs framework for the lower multiplexor.
*
* mdp - destination struct of interest
*
* returns - success - 0
* - failure - return error from ldi_ioctl
*/
static void
{
return;
/*
* I_PUNLINK causes the multiplexor resources to be freed.
*/
if (status) {
" errno %d\n", status);
}
/*
* Delete linkrec if it exists.
*/
}
/*
* The routines below deal with paths and pathgroups. These data structures
* are used to track the physical devices connecting the domain and SSC.
* These devices make up the lower streams of the MAN multiplexor. The
* routines all expect the man_lock to be held.
*
* A pathgroup consists of all paths that connect a particular domain and the
* SSC. The concept of a pathgroup id (pg_id) is used to uniquely identify
* a pathgroup. For Domains, there is just one pathgroup, that connecting
* the domain to the SSC (pg_id == 0). On the SSC, there is one pathgroup per
* domain. The pg_id field corresponds to the domain tags A-R. A pg_id of
* 0 means domain tag A, a pg_id of 1 means domain B, etc.
*
* The path data structure identifies one path between the SSC and a domain.
* It describes the information for the path: the major and minor number of
* the physical device; kstat pointers; and ethernet address of the
* other end of the path.
*
* The pathgroups are anchored at man_pg_head and are protected by the
* by the inner perimeter. The routines are only called by the STREAMs
* portion of the driver.
*/
/*
* Update man instance pathgroup info. Exclusive inner perimeter assures
* this code is single threaded. man_refcnt assures man_t wont detach
* while we are playing with man_pg stuff.
*
* Returns 0 on success, errno on failure.
*/
int
{
int status = 0;
goto exit;
}
goto exit;
}
case MI_PATH_ASSIGN:
break;
case MI_PATH_ADD:
break;
case MI_PATH_UNASSIGN:
break;
case MI_PATH_ACTIVATE:
break;
case MI_PATH_READ:
break;
default:
break;
}
exit:
return (status);
}
/*
* Assign paths to a pathgroup. If pathgroup doesnt exists, create it.
* If path doesnt exist, create it. If ethernet address of existing
* pathgroup different, change it. If an existing path is not in the new
* list, remove it. If anything changed, send PATH_UPDATE request to
* man_iwork to update all man_dest_t's.
*
* mplpp - man pathgroup list point to point.
*/
static int
{
int cnt;
int i;
int status = 0;
if (cnt == 0) {
goto exit;
}
/*
* Assure the devices to be assigned are not assigned to some other
* pathgroup.
*/
for (i = 0; i < cnt; i++) {
continue;
/*
* Already assigned to some other man instance
* or pathgroup.
*/
goto exit;
}
}
/*
* Find pathgroup, or allocate new one if it doesnt exist and
* add it to list at mplpp. Result is that mpg points to
* pathgroup to modify.
*/
if (status)
goto exit;
goto exit;
}
/*
* Create list of new paths to add to pathgroup.
*/
for (i = 0; i < cnt; i++) {
continue; /* Already exists in this pathgroup */
goto exit;
}
if (status) {
goto exit;
}
}
/*
* man_dr_attach passes only the path which is being DRd in.
* So just add the path and don't worry about removing paths.
*/
goto exit;
/*
* Check if any paths we want to remove are ACTIVE. If not,
* do a second pass and remove them.
*/
int in_new_list;
in_new_list = FALSE;
for (i = 0; i < cnt; i++) {
in_new_list = TRUE;
break;
}
}
if (!in_new_list) {
if (first_pass) {
goto exit;
}
} else {
}
}
}
if (first_pass == TRUE) {
first_pass = FALSE;
goto again;
}
exit:
if (status == 0) {
if (add_paths)
} else {
}
}
return (status);
}
/*
* Remove all paths from a pathgroup (domain shutdown). If there is an
* active path in the group, shut down all destinations referencing it
* first.
*/
static int
{
int status = 0;
/*
* Check for existence of pathgroup.
*/
goto exit;
if (status)
goto exit;
}
/*
* Free all the paths for this pathgroup.
*/
}
/*
* Remove this pathgroup from the list, and free it.
*/
goto free_pg;
}
break;
}
exit:
return (status);
}
/*
* Set a new active path. This is done via man_ioctl so we are
* exclusive in the inner perimeter.
*/
static int
{
int status = 0;
goto exit;
}
goto exit;
}
goto exit;
}
goto exit;
}
goto exit;
}
/*
* This is the first time a path has been activated for
* this pathgroup. Initialize all upper streams dest
* structure for this pathgroup so autoswitch will find
* them.
*/
goto exit;
}
/*
* Path already active, nothing to do.
*/
goto exit;
/*
* Try to autoswitch to requested device. Set flags and refcnt.
* Cleared in man_iswitch when SWITCH completes.
*/
manp->man_refcnt++;
/*
* Switch to path specified.
*/
if (status != 0) {
/*
* man_iswitch not going to run, clean up.
*/
manp->man_refcnt--;
/*
* Device not plumbed isn't really an error. Change
* active device setting here, since man_iswitch isn't
* going to be run to do it.
*/
status = 0;
}
}
exit:
return (status);
}
static int
{
int cnt;
int status = 0;
goto exit;
}
cnt = 0;
break;
cnt++;
}
/*
* TBD - What should errno be if user buffer too small ?
*/
}
exit:
return (status);
}
/*
* return existing pathgroup, or create it. TBD - Need to update
* all of destinations if we added a pathgroup. Also, need to update
* all of man_strup if we add a path.
*
* mplpp - man pathgroup list point to pointer.
* mpgp - returns newly created man pathgroup.
* mip - info to fill in mpgp.
*/
static int
{
int status = 0;
" addresss not set!");
goto exit;
}
goto exit;
}
} else {
}
exit:
return (status);
}
/*
* Return pointer to pathgroup containing mdevp, null otherwise. Also,
* if a path pointer is passed in, set it to matching path in pathgroup.
*
* Called holding man_lock.
*/
static man_pg_t *
{
return (mpg);
}
}
}
return (NULL);
}
/*
* Return pointer to pathgroup assigned to destination, null if not found.
*
* Called holding man_lock.
*/
static man_pg_t *
{
return (mpg);
}
return (NULL);
}
static man_path_t *
{
return (mp);
}
return (NULL);
}
static man_path_t *
{
return (mp);
return (NULL);
}
/*
* Try and find an alternate path.
*/
static man_path_t *
{
/*
* Find a non-failed path, or the lru failed path and switch to it.
*/
continue;
goto exit;
else
}
}
/*
* Nowhere to switch to.
*/
goto exit;
exit:
return (np);
}
/*
* Assumes caller has verified existence.
*/
static void
{
goto exit;
}
break;
}
exit:
}
/*
* Insert path into list, ascending order by ppa.
*/
static void
{
return;
}
return;
}
break;
}
} else {
}
}
/*
* Merge npp into lpp, ascending order by ppa. Assumes no
* duplicates in either list.
*/
static void
{
}
}
static int
{
int status = 0;
/*
* Create named kstats for accounting purposes.
*/
goto exit;
}
exit:
return (status);
}
static void
{
}
/*
* man_work_alloc - allocate and initiate a work request structure
*
* type - type of request to allocate
* returns - success - ptr to an initialized work structure
* - failure - NULL
*/
{
goto exit;
exit:
return (wp);
}
/*
* man_work_free - deallocate a work request structure
*
* wp - ptr to work structure to be freed
*/
void
{
}
/*
* Post work to a work queue. The man_bwork sleeps on
* man_bwork_q->q_cv, and work requesters may sleep on mw_cv.
* The man_lock is used to protect both cv's.
*/
void
{
if (lp) {
} else {
}
/*
* cv_signal for man_bwork_q, qenable for man_iwork_q
*/
if (q == man_bwork_q) {
} else { /* q == man_iwork_q */
if (man_ctl_wq != NULL)
}
}
/* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS >>>>>>>>>>>>>>>>>>> */
/*
*/
/*
* Register each element of the parameter array with the
* named dispatch handler. Each element is loaded using
* nd_load()
*
* cnt - the number of elements present in the parameter array
*/
static int
{
int i;
switch (man_param_display[i]) {
case MAN_NDD_GETABLE:
break;
case MAN_NDD_SETABLE:
break;
default:
continue;
}
(void) man_nd_free(&man_ndlist);
goto exit;
}
}
(void) man_nd_free(&man_ndlist);
goto exit;
}
(void) man_nd_free(&man_ndlist);
goto exit;
}
(void) man_nd_free(&man_ndlist);
goto exit;
}
exit:
return (status);
}
static void
{
else
}
/*ARGSUSED*/
static int
{
int i;
int pad_end;
"==========================================");
for (i = 0; i < 2; i++) {
continue;
(void) mi_mpprintf(mp,
"Interface\tDestination\t\tActive Path\tAlternate Paths");
"----------------------------------------");
manp->man_meta_ppa);
if (man_is_on_domain) {
} else {
if (i == 0) {
&mpg->mpg_dst_eaddr));
pad_end = 0;
pad);
} else {
(void) mi_mpprintf_nr(mp,
"Other SSC\t");
}
}
}
}
return (0);
}
static void
{
/*
* Active path
*/
} else {
}
/*
* Alternate Paths.
*/
if (plist)
}
}
/*
* NDD request to set active path. Calling context is man_ioctl, so we are
* exclusive in the inner perimeter.
*
*/
/* ARGSUSED3 */
static int
{
int meta_ppa;
int phys_ppa;
int pg_id;
int status = 0;
goto exit;
}
*pg_idp++ = '\0';
goto exit;
}
*phys_ppap++ = '\0';
goto exit;
}
goto exit;
}
goto exit;
}
exit:
return (status);
}
/*
* Dump out the contents of the IOSRAM handoff structure. Note that if
* anything changes here, you must make sure that the sysinit script
* stays in sync with this output.
*/
/* ARGSUSED */
static int
{
char *ipaddr;
int i;
int status;
if (!man_is_on_domain)
return (0);
return (status);
}
} else {
}
for (i = 0; i < MAN_MAX_EXPANDERS; i++) {
}
}
return (0);
}
static char *
{
static char b[18];
unsigned char *p;
p = (unsigned char *)∈
return (b);
}
/*
* parameter value. cp points to the required parameter.
*/
/* ARGSUSED */
static int
{
return (0);
}
/*
* Sets the man parameter to the value in the param_register using
* nd_load().
*/
/* ARGSUSED */
static int
{
char *end;
return (EINVAL);
}
return (0);
}
/*
* Free the Named Dispatch Table by calling man_nd_free
*/
static void
{
if (man_ndlist != NULL)
}
/*
* Free the table pointed to by 'ndp'
*/
static void
{
}
}
/*
* man_kstat_update - update the statistics for a meta-interface.
*
* ksp - kstats struct
* rw - flag indicating whether stats are to be read or written.
*
* returns 0
*
* The destination specific kstat information is protected by the
* perimeter lock, so we submit a work request to get the stats
* updated (see man_do_kstats()), and then collect the results
* when cv_signal'd. Note that we are doing cv_timedwait_sig()
* as a precautionary measure only.
*/
static int
{
int status = 0;
int i;
"KSTAT_READ"));
manp->man_refcnt++;
/*
* If the driver has been configured, get kstats updated by inner
* perimeter prior to retrieving.
*/
if (man_config_state == MAN_CONFIGURED) {
} else {
ASSERT(wait_status <= 0);
if (wait_status == 0)
else {
"timedout, returning stale stats."));
status = 0;
}
}
if (status)
goto exit;
}
if (rw == KSTAT_READ) {
for (i = 0; i < MAN_NUMSTATS; i++) {
}
} else {
for (i = 0; i < MAN_NUMSTATS; i++) {
}
}
exit:
manp->man_refcnt--;
return (status);
}
/*
* Sum destination kstats for all active paths for a given instance of the
* MAN driver. Called with perimeter lock.
*/
static void
{
/*
* Sync mp_last_knp for each path associated with the MAN instance.
*/
/*
* We just to update the destination statistics here.
*/
}
}
}
/*
* Sum device kstats for all active paths for a given instance of the
* MAN driver. Called with man_lock.
*/
static void
{
}
}
}
/*
* Update the device kstats.
* As man_kstat_update() is called with kstat_chain_lock held,
* we can safely update the statistics from the underlying driver here.
*/
static void
{
int instance;
} else {
("man_update_dev_kstats: no kstat data found for %s(%d,%d)",
}
}
static void
{
int i;
int flags;
char *statname;
continue;
}
for (i = 0; i < MAN_NUMSTATS; i++) {
if (!(flags & MK_NOT_PHYSICAL))
continue;
}
}
}
/*
* Initialize MAN named kstats in the space provided.
*/
static void
{
int i;
for (i = 0; i < num_stats; i++) {
man_kstat_info[i].mk_type);
}
}
/*
* man_kstat_byname - get a kernel stat value from its structure
*
* ksp - kstat_t structure to play with
* s - string to match names with
*
* returns - success - 1 (found)
* - failure - 0 (not found)
*/
static int
{
int found = 0;
knp++) {
found++;
}
}
} else {
}
/*
* if getting a value but couldn't find the namestring, result = 0.
*/
if (!found) {
/*
* a reasonable default
*/
}
return (found);
}
/*
*
* Accumulate MAN driver kstats from the incremental values of the underlying
* physical interfaces.
*
* Parameters:
* sum_knp - The named kstat area to put cumulative value,
* NULL if we just want to sync next two params.
* phys_ksp - Physical interface kstat_t pointer. Contains
* more current counts.
* phys_last_knp - counts from the last time we were called for this
* physical interface. Note that the name kstats
* pointed to are actually in MAN format, but they
* hold the mirrored physical devices last read
* kstats.
* Basic algorithm is:
*
* for each named kstat variable {
* sum_knp[i] += (phys_ksp->ksp_data[i] - phys_last_knp[i]);
* phys_last_knp[i] = phys_ksp->ksp_data[i];
* }
*
*/
static void
{
char *physname;
char *physalias;
char *statname;
int i;
(void *)phys_last_knp));
/*
* Now for each entry in man_kstat_info, sum the named kstat.
* Not that all MAN specific kstats will end up !found.
*/
for (i = 0; i < MAN_NUMSTATS; i++) {
int found = 0;
int flags = 0;
delta64 = 0;
/*
* Update MAN private kstats.
*/
if (flags & MK_NOT_PHYSICAL) {
continue;
}
continue; /* phys_ksp doesnt have this stat */
}
/*
* first try it by the "official" name
*/
if (phys_ksp) {
&phys_kn_entry)) {
found = 1;
physalias, &phys_kn_entry))) {
found = 1;
}
}
if (!found) {
/*
* clear up the "last" value, no change to the sum
*/
continue;
}
/*
* at this point, we should have the good underlying
* kstat value stored in phys_kn_entry
*/
if (flags & MK_NOT_COUNTER) {
/*
* it isn't a counter, so store the value and
* move on (e.g. ifspeed)
*/
continue;
}
switch (phys_kn_entry.data_type) {
case KSTAT_DATA_UINT32:
/*
* this handles 32-bit wrapping
*/
/*
* we've wrapped!
*/
}
break;
default:
/*
* must be a 64-bit value, we ignore 64-bit
* wraps, since they shouldn't ever happen
* within the life of a machine (if we assume
* machines don't stay up for more than a few
* hundred years without a reboot...)
*/
}
/*
* now we need to save the value
*/
case KSTAT_DATA_UINT32:
/* trunk down to 32 bits, possibly lossy */
break;
default:
break;
}
}
}
}
#if defined(DEBUG)
static char *_ms_flags[] = {
"NONE",
"FAST", /* 0x1 */
"RAW", /* 0x2 */
"ALLPHYS", /* 0x4 */
"ALLMULTI", /* 0x8 */
"ALLSAP", /* 0x10 */
"CKSUM", /* 0x20 */
"MULTI", /* 0x40 */
"SERLPBK", /* 0x80 */
"MACLPBK", /* 0x100 */
"CLOSING", /* 0x200 */
"CLOSE_DONE", /* 0x400 */
"CONTROL" /* 0x800 */
};
static void
{
int i;
return;
buf[0] = '\0';
prbuf[0] = '\0';
if ((flags >> i) & 0x1) {
}
}
}
static char *_md_state[] = {
"NOTPRESENT", /* 0x0 */
"INITIALIZING", /* 0x1 */
"READY", /* 0x2 */
"PLUMBING", /* 0x4 */
"CLOSING" /* 0x8 */
};
static void
{
int i;
buf[0] = '\0';
prbuf[0] = '\0';
return;
mdp->md_lastrcvcnt);
/*
* Print out state as text.
*/
if (state == 0) {
} else {
if ((state >> i) & 0x1) {
}
}
}
}
static void
{
buf[0] = '\0';
prbuf[0] = '\0';
return;
manp->man_meta_ppa);
} else {
}
}
static char *_mdev_state[] = {
"UNASSIGNED ",
"ASSIGNED",
"ACTIVE",
"FAILED"
};
static void
{
int i;
buf[0] = '\0';
prbuf[0] = '\0';
return;
if (mdevp->mdev_major == 0) {
} else
goto number;
buf[0] = '\0';
prbuf[0] = '\0';
if (state == 0) {
} else {
for (i = 0; i < A_CNT(_mdev_state); i++) {
if ((state >> i) & 0x1) {
}
}
}
}
static char *_mip_cmd[] = {
"MI_PATH_READ",
"MI_PATH_ASSIGN",
"MI_PATH_ACTIVATE",
"MI_PATH_DEACTIVATE",
"MI_PATH_UNASSIGN"
};
static void
{
return;
}
static void
{
return;
}
static void
{
return;
}
static char *_mw_flags[] = {
"NOWAITER", /* 0x0 */
"CVWAITER", /* 0x1 */
"QWAITER", /* 0x2 */
"DONE" /* 0x3 */
};
static void
{
int i;
return;
}
}
static void
{
return;
}
void *
{
void *tmp;
return (tmp);
}
void
{
}
#endif /* DEBUG */