strsubr.c revision 8aa5c3092bce9155bf46c01a22270bfe4b0382cb
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <sys/priocntl.h>
#include <sys/sunldi_impl.h>
#include <sys/isa_defs.h>
#include <sys/multidata.h>
#include <sys/netstack.h>
/*
* WARNING:
* The variables and routines in this file are private, belonging
* to the STREAMS subsystem. These should not be used by modules
* or drivers. Compatibility will not be guaranteed.
*/
/*
* Id value used to distinguish between different multiplexor links.
*/
#define STREAMS_LOPRI MINCLSYSPRI
typedef struct str_stat {
} str_stat_t;
static str_stat_t str_statistics = {
{ "sqenables", KSTAT_DATA_UINT64 },
{ "stenables", KSTAT_DATA_UINT64 },
{ "syncqservice", KSTAT_DATA_UINT64 },
{ "freebs", KSTAT_DATA_UINT64 },
{ "qwr_outer", KSTAT_DATA_UINT64 },
{ "rservice", KSTAT_DATA_UINT64 },
{ "strwaits", KSTAT_DATA_UINT64 },
{ "taskqfails", KSTAT_DATA_UINT64 },
{ "bufcalls", KSTAT_DATA_UINT64 },
{ "qhelps", KSTAT_DATA_UINT64 },
{ "qremoved", KSTAT_DATA_UINT64 },
{ "sqremoved", KSTAT_DATA_UINT64 },
{ "bcwaits", KSTAT_DATA_UINT64 },
{ "sqtoomany", KSTAT_DATA_UINT64 },
};
/*
* qrunflag was used previously to control background scheduling of queues. It
* is not used anymore, but kept here in case some module still wants to access
* it via qready() and setqsched macros.
*/
char qrunflag; /* Unused */
/*
* Most of the streams scheduling is done via task queues. Task queues may fail
* for non-sleep dispatches, so there are two backup threads servicing failed
* requests for queues and syncqs. Both of these threads also service failed
* dispatches freebs requests. Queues are put in the list specified by `qhead'
* and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs
* requests are put into `freebs_list' which has no tail pointer. All three
* lists are protected by a single `service_queue' lock and use
* `services_to_run' condition variable for signaling background threads. Use of
* a single lock should not be a problem because it is only used under heavy
* loads when task queues start to fail and at that time it may be a good idea
* to throttle scheduling requests.
*
* NOTE: queues and syncqs should be scheduled by two separate threads because
* queue servicing may be blocked waiting for a syncq which may be also
* scheduled for background execution. This may create a deadlock when only one
* thread is used for both.
*/
/*
* List of queues scheduled for background processing dueue to lack of resources
* in the task queues. Protected by service_queue lock;
*/
/*
* Same list for syncqs
*/
/*
* Backup threads for servicing queues and syncqs
*/
/*
* Bufcalls related variables.
*/
/*
* run_queues is no longer used, but is kept in case some 3-d party
*/
int run_queues = 0;
/*
* sq_max_size is the depth of the syncq (in number of messages) before
* qfill_syncq() starts QFULL'ing destination queues. As its primary
* consumer - IP is no longer D_MTPERMOD, but there may be other
* choose a large number as the default value. For potential
*/
int sq_max_size = 10000;
/*
* the number of ciputctrl structures per syncq and stream we create when
* needed.
*/
int n_ciputctrl;
int max_n_ciputctrl = 16;
/*
* if n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache.
*/
int min_n_ciputctrl = 2;
/*
* Per-driver/module syncqs
* ========================
*
* perdm structures, new entries being added (and new syncqs allocated) when
* before.
* The reason for this mechanism is that some modules and drivers share a
* common streamtab and it is necessary for those modules and drivers to also
* share a common PERMOD syncq.
*
* perdm_list --> dm_str == streamtab_1
* dm_sq == syncq_1
* dm_ref
* dm_next --> dm_str == streamtab_2
* dm_sq == syncq_2
* dm_ref
* dm_next --> ... NULL
*
* a reference to the perdm structure and hence shares the syncq.
* References are held in the fmodsw_impl_t structure for each STREAMS module
* or the dev_impl array (indexed by device major number) for each driver.
*
* perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL
* ^ ^ ^ ^
* | ______________/ | |
* | / | |
* dev_impl: ...|x|y|... module A module B
*
* when it falls to zero, the perdm structure is removed from the list and
* the syncq is freed (see rele_dm()).
*/
static krwlock_t perdm_rwlock;
static void runservice(queue_t *);
static void streams_bufcall_service(void);
static void streams_qbkgrnd_service(void);
static void streams_sqbkgrnd_service(void);
static void free_syncq(syncq_t *);
static void enable_svc(queue_t *);
static void runbufcalls(void);
static void wait_q_syncq(queue_t *);
static void backenable_insertedq(queue_t *);
static void queue_service(queue_t *);
static void stream_service(stdata_t *);
static void syncq_service(syncq_t *);
static void qwriter_outer_service(syncq_t *);
#ifdef DEBUG
static int qprocsareon(queue_t *);
#endif
static void sq_run_events(syncq_t *);
static int propagate_syncq(queue_t *);
static void sqlist_free(sqlist_t *);
struct kmem_cache *stream_head_cache;
struct kmem_cache *queue_cache;
struct kmem_cache *syncq_cache;
struct kmem_cache *qband_cache;
struct kmem_cache *linkinfo_cache;
static linkinfo_t *linkinfo_list;
/* global esballoc throttling queue */
static esb_queue_t system_esbq;
/*
* esballoc tunable parameters.
*/
/*
* routines to handle esballoc queuing.
*/
static void esballoc_process_queue(esb_queue_t *);
static void esballoc_enqueue_mblk(mblk_t *);
static void esballoc_timer(void *);
static void esballoc_mblk_free(mblk_t *);
/*
* Qinit structure and Module_info structures
* for passthru read and write queues
*/
static void link_rempassthru(queue_t *);
struct module_info passthru_info = {
0,
"passthru",
0,
};
struct qinit passthru_rinit = {
(int (*)())putnext,
NULL,
NULL,
NULL,
NULL,
};
struct qinit passthru_winit = {
(int (*)()) pass_wput,
NULL,
NULL,
NULL,
NULL,
};
/*
* Special form of assertion: verify that X implies Y i.e. when X is true Y
* should also be true.
*/
/*
* Logical equivalence. Verify that both X and Y are either TRUE or FALSE.
*/
/*
*/
}
/*
* Enqueue a list element `el' in the end of a list denoted by `head' and `tail'
* using a `link' field.
*/
else \
}
/*
* Dequeue the first element of the list denoted by `head' and `tail' pointers
* using a `link' field and put result into `el'.
*/
} \
}
/*
* Remove `el' from the list using `chase' and `curr' pointers and return result
* in `succeed'.
*/
succeed = 0; \
succeed = 1; \
else \
} \
}
/* Handling of delayed messages on the inner syncq. */
/*
* DEBUG versions should use function versions (to simplify tracing) and
* non-DEBUG kernels should use macro versions.
*/
/*
* Put a queue on the syncq list of queues.
* Assumes SQLOCK held.
*/
{ \
/* The queue should not be linked anywhere */ \
/* Head and tail may only be NULL simultaneously */ \
/* Queue may be only enqueyed on its syncq */ \
/* Check the correctness of SQ_MESSAGES flag */ \
/* \
* Sanity check of priority field: empty queue should \
* have zero priority \
* and nqueues equal to zero. \
*/ \
/* Sanity check of sq_nqueues field */ \
} else { \
/* \
* Put this queue in priority order: higher \
* priority gets closer to the head. \
*/ \
\
} \
} else { \
} \
} \
sq->sq_nqueues++; \
} \
}
/*
* Remove a queue from the syncq list
* Assumes SQLOCK held.
*/
{ \
/* Check that the queue is actually in the list */ \
/* First queue on list, make head q_sqnext */ \
} else { \
/* Make prev->next == next */ \
} \
/* Last queue on list, make tail sqprev */ \
} else { \
/* Make next->prev == prev */ \
} \
/* clear out references on this queue */ \
/* If there is nothing queued, clear SQ_MESSAGES */ \
} else { \
} \
sq->sq_nqueues--; \
}
/* Hide the definition from the header file. */
#ifdef SQPUT_MP
#endif
/*
* Put a message on the queue syncq.
* Assumes QLOCK held.
*/
{ \
qp->q_syncqmsgs++; \
} else { \
} \
}
#define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \
int i; \
for (i = 0; i <= nlocks; i++) { \
} \
} \
}
#define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \
int i; \
for (i = 0; i <= nlocks; i++) { \
} \
} \
}
/*
* Run service procedures for all queues in the stream head.
*/
#define STR_SERVICE(stp, q) { \
stp->sd_nqueues--; \
queue_service(q); \
} \
}
/*
* constructor/destructor routines for the stream head cache
*/
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static void
{
}
/*
* constructor/destructor routines for the queue cache
*/
/* ARGSUSED */
static int
{
sq->sq_svcflags = 0;
sq->sq_servcount = 0;
sq->sq_needexcl = 0;
sq->sq_nqueues = 0;
return (0);
}
/* ARGSUSED */
static void
{
}
/*
* constructor/destructor routines for the syncq cache
*/
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static void
{
}
/* ARGSUSED */
static int
{
int i;
for (i = 0; i < n_ciputctrl; i++) {
}
return (0);
}
/* ARGSUSED */
static void
{
int i;
for (i = 0; i < n_ciputctrl; i++) {
}
}
/*
* Init routine run from main at boot time.
*/
void
strinit(void)
{
sizeof (stdata_t), 0,
n_ciputctrl = ncpus;
if (n_ciputctrl >= min_n_ciputctrl) {
sizeof (ciputctrl_t) * n_ciputctrl,
sizeof (ciputctrl_t), ciputctrl_constructor,
}
if (streams_taskq == NULL)
panic("strinit: no memory for streams taskq!");
/*
* Create STREAMS kstats.
*/
"net", KSTAT_TYPE_NAMED,
sizeof (str_statistics) / sizeof (kstat_named_t),
}
/*
* TPI support routine initialisation.
*/
tpi_init();
/*
* Handle to have autopush and persistent link information per
* zone.
* Note: uses shutdown hook instead of destroy hook so that the
* persistent links can be torn down before the destroy hooks
*/
}
void
{
/* Have to hold sd_lock to prevent siglist from changing */
}
/*
* Send the "sevent" set of signals to a process.
* This might send more than one signal if the process is registered
* for multiple events. The caller should pass in an sevent that only
* includes the events for which the process has registered.
*/
static void
{
}
}
}
else
}
}
}
}
}
}
if (sevent != 0) {
}
}
/*
* registered on the given signal list that want a signal for at
* least one of the specified events.
*
* Must be called with exclusive access to siglist (caller holding sd_lock).
*
* sd_lock and the ioctl code maintains a PID_HOLD on the pid structure
* while it is in the siglist.
*
* For performance reasons (MP scalability) the code drops pidlock
* when sending signals to a single process.
* When sending to a process group the code holds
* pidlock to prevent the membership in the process group from changing
* while walking the p_pglink list.
*/
void
{
int sevent;
if (sevent == 0)
continue;
/* pid was released but still on event list */
continue;
}
/*
* XXX This unfortunately still generates
* a signal when a fd is closed but
* the proc is active.
*/
continue;
}
} else {
/*
* Send to process group. Hold pidlock across
* calls to dosendsig().
*/
}
}
}
}
/*
* Attach a stream device or module.
* qp is a read queue; the new queue goes in so its next
* read ptr is the argument, and the write queue corresponding
* to the argument points to this queue. Return 0 on success,
* or a non-zero errno on failure.
*/
int
{
int error;
int sflag;
/*
* stash away a pointer to the module structure so we can
* unref it in qdetach.
*/
} else {
/* create perdm_t if needed */
sflag = 0;
}
/* setq might sleep in allocator - avoid holding locks. */
/*
* Before calling the module's open routine, set up the q_next
* pointer for inserting a module in the middle of a stream.
*
* Note that we can always set _QINSERTING and set up q_next
* pointer for both inserting and pushing a module. Then there
* is no need for the is_insert parameter. In insertq(), called
* by qprocson(), assume that q_next of the new module always points
* to the correct queue and use it for insertion. Everything should
* work out fine. But in the first release of _I_INSERT, we
* distinguish between inserting and pushing to make sure that
* pushing a module follows the same code path as before.
*/
if (is_insert) {
}
/*
* If there is an outer perimeter get exclusive access during
* the open procedure. Bump up the reference count on the queue.
*/
if (error != 0)
goto failed;
return (0);
return (error);
}
/*
* Handle second open of stream. For modules, set the
* last argument to MODOPEN and do not pass any open flags.
* Ignore dummydev since this is not the first open.
*/
int
{
int error;
return (error);
}
/*
* successful open should have done qprocson()
*/
return (0);
}
/*
* Detach a stream module or device.
* If clmode == 1 then the module or driver was opened and its
* close routine must be called. If clmode == 0, the module
* or driver was never opened or the open failed, and so its close
* should not be called.
*/
void
{
if (clmode) {
/*
* Make sure that all the messages on the write side syncq are
* processed and nothing is left. Since we are closing, no new
* messages may appear there.
*/
if (is_remove) {
}
/*
* Check that qprocsoff() was actually called.
*/
} else {
}
/*
* Allow any threads blocked in entersq to proceed and discover
* the QWCLOSE is set.
* Note: This assumes that all users of entersq check QWCLOSE.
* Currently runservice is the only entersq that can happen
* after removeq has finished.
* Removeq will have discarded all messages destined to the closing
* pair of queues from the syncq.
* NOTE: Calling a function inside an assert is unconventional.
* However, it does not cause any problem since flush_syncq() does
* not change any state except when it returns non-zero i.e.
* when the assert will trigger.
*/
/* release any fmodsw_impl_t structure held on behalf of the queue */
/* freeq removes us from the outer perimeter if any */
}
/* Prevent service procedures from being called */
void
{
}
/* allow service procedures to be called again */
void
{
}
/*
* Only reset QENAB if the queue was removed from the runlist.
* A queue goes through 3 stages:
* It is on the service list and QENAB is set.
* It is removed from the service list but QENAB is still set.
* QENAB gets changed to QINSERVICE.
* QINSERVICE is reset (when the service procedure is done)
* Thus we can not reset QENAB unless we actually removed it from the service
* queue.
*/
void
{
int removed;
if (removed) {
}
}
}
/*
* wait for any pending service processing to complete.
* The removal of queues from the runlist is not atomic with the
* clearing of the QENABLED flag and setting the INSERVICE flag.
* consequently it is possible for remove_runlist in strclose
* to not find the queue on the runlist but for it to be QENABLED
* and not yet INSERVICE -> hence wait_svc needs to check QENABLED
* as well as INSERVICE.
*/
void
{
/*
*/
}
/*
* Wait till the syncqs associated with the queue
* will dissapear from background processing list.
* This only needs to be done for non-PERMOD perimeters since
* for PERMOD perimeters the syncq may be shared and will only be freed
* If for PERMOD perimeters queue was on the syncq list, removeq()
* should call propagate_syncq() or drain_syncq() for it. Both of these
* function remove the queue from its syncq list, so sqthread will not
* try to access the queue.
*/
/*
* Disable rsq and wsq and wait for any background processing of
* syncq to complete.
*/
}
}
/*
* Put ioctl data from userland buffer `arg' into the mblk chain `bp'.
* `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may
* also be set, and is passed through to allocb_cred_wait().
*
* Returns errno on failure, zero on success.
*/
int
{
int error = 0;
} else {
}
/*
* strdoioctl validates ioc_count, so if this assert fails it
* cannot be due to user error.
*/
NULL) {
return (error);
}
if (error != 0) {
return (error);
}
return (0);
}
/*
* Copy ioctl data to user-land. Return non-zero errno on failure,
* 0 for success.
*/
int
{
size_t n;
int error;
else {
}
if (error)
return (error);
}
return (0);
}
/*
* Allocate a linkinfo entry given the write queue of the
* bottom module of the top stream and the write queue of the
* stream head of the bottom stream.
*/
{
return (linkp);
}
/*
* Free a linkinfo entry.
*/
void
{
else
}
/*
* Check for a potential linking cycle.
* Return 1 if a link will result in a cycle,
* and 0 otherwise.
*/
int
{
int i;
/*
*/
return (0);
}
for (;;) {
if (!MUX_DIDVISIT(np)) {
return (1);
return (0);
continue;
}
} else {
return (0);
else {
continue;
}
}
/*
* If ep->me_nodep is a FIFO (me_nodep == NULL),
* ignore the edge and move on. ep->me_nodep gets
* set to NULL in mux_addedge() if it is a FIFO.
*
*/
continue;
}
}
}
/*
* Find linkinfo entry corresponding to the parameters.
*/
{
return (linkp);
}
}
} else {
while (mep) {
break;
}
if (!mep) {
return (NULL);
}
return (linkp);
}
}
}
return (NULL);
}
/*
* Given a queue ptr, follow the chain of q_next pointers until you reach the
* last queue on the chain and return it.
*/
queue_t *
{
while (_SAMESTR(q))
q = q->q_next;
return (q);
}
/*
* wait for the syncq count to drop to zero.
* sq could be either outer or inner.
*/
static void
{
while (count != 0) {
}
}
/*
* Wait while there are any messages for the queue in its syncq.
*/
static void
wait_q_syncq(queue_t *q)
{
}
}
}
int
int lhlink)
{
int error = 0;
netstack_t *ns;
/*
* Test for invalid upper stream
*/
return (ENXIO);
}
return (EINVAL);
}
return (EINVAL);
}
return (EINVAL);
}
return (EBADF);
}
return (EINVAL);
}
return (ENXIO);
}
/*
* Test for invalid lower stream.
* The check for the v_type != VFIFO and having a major
* number not >= devcnt is done to avoid problems with
* adding mux_node entry past the end of mux_nodes[].
* For FIFO's we don't add an entry so this isn't a
* problem.
*/
return (EINVAL);
}
/*
* STRPLUMB protects plumbing changes and should be set before
* link_addpassthru()/link_rempassthru() are called, so it is set here
* and cleared in the end of mlink when passthru queue is removed.
* Setting of STRPLUMB prevents reopens of the stream while passthru
* queue is in-place (it is not a proper module and doesn't have open
* entry point).
*
* STPLEX prevents any threads from entering the stream from above. It
* can't be set before the call to link_addpassthru() because putnext
* from below may cause stream head I/O routines to be called and these
* routines assert that STPLEX is not set. After link_addpassthru()
* nothing may come from below since the pass queue syncq is blocked.
* Note also that STPLEX should be cleared before the call to
* link_remmpassthru() since when messages start flowing to the stream
* head (e.g. because of message propagation from the pass queue) stream
* head I/O routines may be called with STPLEX flag set.
*
* When STPLEX is set, nothing may come into the stream from above and
* it is safe to do a setq which will change stream head. So, the
* correct sequence of actions is:
*
* 1) Set STRPLUMB
* 2) Call link_addpassthru()
* 3) Set STPLEX
* 4) Call setq and update the stream state
* 5) Clear STPLEX
* 6) Call link_rempassthru()
* 7) Clear STRPLUMB
*
* The same sequence applies to munlink() code.
*/
/*
* Add passthru queue below lower mux. This will block
*/
/*
* There may be messages in the streamhead's syncq due to messages
* that arrived before link_addpassthru() was done. To avoid
* background processing of the syncq happening simultaneous with
* setq processing, we disable the streamhead syncq and wait until
* existing background thread finishes working on it.
*/
/* setq might sleep in allocator - avoid holding locks. */
/* Note: we are holding muxifier here. */
/* create perdm_t if needed */
B_TRUE);
/*
* XXX Remove any "odd" messages from the queue.
* Keep only M_DATA, M_PROTO, M_PCPROTO.
*/
if (error != 0) {
/*
* Restore the stream head queue and then remove
* the passq. Turn off STPLEX before we turn on
* the stream by removing the passq.
*/
B_TRUE);
/* Wakeup anyone waiting for STRPLUMB to clear. */
return (error);
}
/*
* if we've made it here the linkage is all set up so we should also
* set up the layered driver linkages
*/
} else {
}
/*
* Mark the upper stream as having dependent links
* so that strclose can clean it up.
*/
}
/*
* Wake up any other processes that may have been
* waiting on the lower stream. These will all
* error out.
*/
/* The passthru module is removed so we may release STRPLUMB */
return (0);
}
int
{
int ret;
return (ret);
}
/*
* Unlink a multiplexor link. Stp is the controlling stream for the
* link, and linkp points to the link's entry in the linkinfo list.
* The muxifier lock must be held on entry and is dropped on exit.
*
* NOTE : Currently it is assumed that mux would process all the messages
* sitting on it's queue before ACKing the UNLINK. It is the responsibility
* of the mux to handle all the messages that arrive before UNLINK.
* If the mux has to send down messages on its lower stream before
* ACKing I_UNLINK, then it *should* know to handle messages even
* after the UNLINK is acked (actually it should be able to handle till we
* re-block the read side of the pass queue here). If the mux does not
* open up the lower stream, any messages that arrive during UNLINK
* will be put in the stream head. In the case of lower stream opening
* up, some messages might land in the stream head depending on when
* the message arrived and when the read side of the pass queue was
* re-blocked.
*/
int
{
int error = 0;
/*
*/
/*
* Add passthru queue below lower mux. This will block
*/
else
/*
* If there was an error and this is not called via strclose,
* return to the user. Otherwise, pretend there was no error
* and close the link.
*/
if (error) {
"unlink ioctl, closing anyway (%d)\n", error);
} else {
return (error);
}
}
/*
* We go ahead and drop muxifier here--it's a nasty global lock that
* can slow others down. It's okay to since attempts to mlink() this
* stream will be stopped because STPLEX is still set in the stdata
* structure, and munlink() is stopped because mux_rmvedge() and
* lbfree() have removed it from mux_nodes[] and linkinfo_list,
* respectively. Note that we defer the closef() of fpdown until
* after we drop muxifier since strclose() can call munlinkall().
*/
/*
* Get rid of outstanding service procedure runs, before we make
* it a stream head, since a stream head doesn't have any service
* procedure.
*/
/*
* Since we don't disable the syncq for QPERMOD, we wait for whatever
* is queued up to be finished. mux should take care that nothing is
* send down to this queue. We should do it now as we're going to block
* passyncq if it was unblocked.
*/
}
}
/*
* Messages could be flowing from underneath. We will
* block the read side of the passq. This would be
* sufficient for QPAIR and QPERQ muxes to ensure
* that no data is flowing up into this queue
* and hence no thread active in this instance of
* lower mux. But for QPERMOD and QMTOUTPERIM there
* syncqs respectively. We will wait for them to drain.
* Because passq is blocked messages end up in the syncq
* And qfill_syncq could possibly end up setting QFULL
* which will access the rq->q_flag. Hence, we have to
* acquire the QLOCK in setq.
*
* XXX Messages can also flow from top into this
* queue though the unlink is over (Ex. some instance
* in putnext() called from top that has still not
* accessed this queue. And also putq(lowerq) ?).
* Solution : How about blocking the l_qtop queue ?
* Do we really care about such pure D_MP muxes ?
*/
/*
* We have to just wait for the outer sq_count
* drop to zero. As this does not prevent new
* messages to enter the outer perimeter, this
* is subject to starvation.
*
* NOTE :Because of blocksq above, messages could
* be in the inner syncq only because of some
* thread holding the outer perimeter exclusively.
* Hence it would be sufficient to wait for the
* exclusive holder of the outer perimeter to drain
* the inner and outer syncqs. But we will not depend
* on this feature and hence check the inner syncqs
* separately.
*/
}
/*
* There could be messages destined for
* this queue. Let the exclusive holder
* drain it.
*/
wait_syncq(sq);
}
/*
* We haven't taken care of QPERMOD case yet. QPERMOD is a special
* case as we don't disable its syncq or remove it off the syncq
* service list.
*/
}
}
/*
* flush_syncq changes states only when there is some messages to
* free. ie when it returns non-zero value to return.
*/
/*
* No body else should know about this queue now.
* If the mux did not process the messages before
* acking the I_UNLINK, free them now.
*/
/*
* Convert the mux lower queue into a stream head queue.
* Turn off STPLEX before we turn on the stream by removing the passq.
*/
enable_svc(rq);
/*
* Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still
* needs to be set to prevent reopen() of the stream - such reopen may
* try to call non-existent pass queue open routine and panic.
*/
/* clean up the layered driver linkages */
} else {
}
/*
* Now all plumbing changes are finished and STRPLUMB is no
* longer needed.
*/
return (0);
}
/*
* Unlink all multiplexor links for which stp is the controlling stream.
* Return 0, or a non-zero errno on failure.
*/
int
{
int error = 0;
/*
* munlink() releases the muxifier lock.
*/
return (error);
}
return (0);
}
/*
* A multiplexor link has been made. Add an
* edge to the directed graph.
*/
void
{
} else {
}
/*
* Save the dev_t for the purposes of str_stack_shutdown.
* str_stack_shutdown assumes that the device allows reopen, since
* this dev_t is the one after any cloning by xx_open().
* Would prefer finding the dev_t from before any cloning,
* but specfs doesn't retain that.
*/
else
}
/*
* A multiplexor link has been removed. Remove the
* edge in the directed graph.
*/
void
{
while (ep) {
if (pep)
else
return;
}
}
ASSERT(0); /* should not reach here */
}
/*
* Translate the device flags (from conf.h) to the corresponding
* qflag and sq_flag (type) values.
*/
int
{
goto bad;
/* Inner perimeter presence and scope */
switch (devflag & D_MTINNER_MASK) {
case D_MP:
break;
break;
break;
case D_MTPERMOD|D_MP:
break;
default:
goto bad;
}
/* Outer perimeter */
if (devflag & D_MTOUTPERIM) {
switch (devflag & D_MTINNER_MASK) {
case D_MP:
break;
default:
goto bad;
}
qflag |= QMTOUTPERIM;
}
/* Inner perimeter modifiers */
if (devflag & D_MTINNER_MOD) {
switch (devflag & D_MTINNER_MASK) {
case D_MP:
goto bad;
default:
break;
}
if (devflag & D_MTPUTSHARED)
if (devflag & _D_MTOCSHARED) {
/*
* The code in putnext assumes that it has the
* highest concurrency by not checking sq_count.
* Thus _D_MTOCSHARED can only be supported when
* D_MTPUTSHARED is set.
*/
if (!(devflag & D_MTPUTSHARED))
goto bad;
}
if (devflag & _D_MTCBSHARED) {
/*
* The code in putnext assumes that it has the
* highest concurrency by not checking sq_count.
* Thus _D_MTCBSHARED can only be supported when
* D_MTPUTSHARED is set.
*/
if (!(devflag & D_MTPUTSHARED))
goto bad;
}
if (devflag & _D_MTSVCSHARED) {
/*
* The code in putnext assumes that it has the
* highest concurrency by not checking sq_count.
* Thus _D_MTSVCSHARED can only be supported when
* D_MTPUTSHARED is set. Also _D_MTSVCSHARED is
* supported only for QPERMOD.
*/
goto bad;
}
}
/* Default outer perimeter concurrency */
/* Outer perimeter modifiers */
if (devflag & D_MTOCEXCL) {
if (!(devflag & D_MTOUTPERIM)) {
/* No outer perimeter */
goto bad;
}
}
/* Synchronous Streams extended qinit structure */
/*
* Private flag used by a transport module to indicate
* to sockfs that it supports direct-access mode without
* having to go through STREAMS or the transport can use
* sodirect_t sharing to bypass STREAMS for receive-side
* M_DATA processing.
*/
/* Reject unless the module is fully-MT (no perimeter) */
goto bad;
if (devflag & _D_SODIRECT)
qflag |= _QSODIRECT;
}
return (0);
bad:
"stropen: bad MT flags (0x%x) in driver '%s'",
(int)(qflag & D_MTSAFETY_MASK),
return (EINVAL);
}
/*
* Set the interface values for a pair of queues (qinit structure,
* packet sizes, water marks).
* setq assumes that the caller does not have a claim (entersq or claimq)
* on the queue.
*/
void
{
/* Remove old syncqs */
}
free_syncq(sq);
}
}
}
}
/*
* Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS
* bits in sq_flag based on the sqtype.
*/
/*
* We are making sq_svcflags zero,
* resetting SQ_DISABLED in case it was set by
* wait_svc() in the munlink path.
*
*/
sq->sq_svcflags = 0;
/*
* We need to acquire the lock here for the mlink and munlink case,
* where canputnext, backenable, etc can access the q_flag.
*/
if (lock_needed) {
} else {
}
/* Allocate a separate syncq for the write side */
}
/*
* Assert that we do have an inner perimeter syncq and that it
* does not have an outer perimeter associated with it.
*/
}
if (qflag & QMTOUTPERIM) {
}
/*
* Initialize struio() types.
*/
}
perdm_t *
{
perdm_t *p;
return (p);
}
}
} else {
}
p->dm_ref++;
free_syncq(sq);
return (p);
}
}
return (dmp);
}
void
{
perdm_t *p;
return;
}
if (p == dmp)
break;
/*
* Wait for any background processing that relies on the
* syncq to complete before it is freed.
*/
wait_sq_svc(p->dm_sq);
free_syncq(p->dm_sq);
}
/*
* Make a protocol message given control and data buffers.
* n.b., this can block; be careful of what locks you hold when calling it.
*
* If sd_maxblk is less than *iosize this routine can fail part way through
* (due to an allocation failure). In this case on return *iosize will contain
* the amount that was consumed. Otherwise *iosize will not be modified
* i.e. it will contain the amount that was consumed.
*/
int
{
int error;
/* Create control part, if any */
if (error)
return (error);
}
/* Create data part, if any */
if (*iosize >= 0) {
if (error) {
return (error);
}
}
} else {
}
return (0);
}
/*
* Make the control part of a protocol message given a control buffer.
* n.b., this can block; be careful of what locks you hold when calling it.
*/
int
{
unsigned char msgtype;
int error = 0;
/*
* Create control part of message, if any.
*/
int ctlcount;
int allocsz;
else
/*
* blocks by increasing the size to something more usable.
*/
/*
* Range checking has already been done; simply try
* to allocate a message block for the ctl part.
*/
return (EAGAIN);
return (error);
}
return (EFAULT);
}
}
return (0);
}
/*
* Make a protocol message given data buffers.
* n.b., this can block; be careful of what locks you hold when calling it.
*
* If sd_maxblk is less than *iosize this routine can fail part way through
* (due to an allocation failure). In this case on return *iosize will contain
* the amount that was consumed. Otherwise *iosize will not be modified
* i.e. it will contain the amount that was consumed.
*/
int
{
int error = 0;
if (count < 0)
return (0);
/*
* Create data part of message, if any.
*/
do {
return (error);
} else {
return (0);
}
}
}
if (flag & STRUIO_POSTPONE) {
/*
* Setup the stream uio portion of the
* dblk for subsequent use by struioget().
*/
dp->db_cksumstart = 0;
dp->db_cksumstuff = 0;
} else {
if (size != 0) {
uiop);
if (error != 0) {
return (error);
}
}
return (ECOMM);
}
}
}
else
} while (count > 0);
return (0);
}
/*
* Wait for a buffer to become available. Return non-zero errno
* if not able to wait, 0 if buffer is probably there.
*/
int
{
return (ENOSR);
}
return (EINTR);
}
return (0);
}
/*
* This function waits for a read or write event to happen on a stream.
* The timeout is in ms with -1 meaning infinite.
* The flag values work as follows:
* READWAIT Check for read side errors, send M_READ
* GETWAIT Check for read side errors, no M_READ
* WRITEWAIT Check for write side errors.
* NOINTR Do not return error if nonblocking or timeout.
* STR_NOERROR Ignore all errors except STPLEX.
* STR_PEEK Pass through the strgeterr().
*/
int
int *done)
{
int error;
} else {
}
if (flag & STR_NOERROR)
/*
* A strwakeq() is pending, no need to sleep.
*/
*done = 0;
return (0);
}
else
error = 0;
*done = 1;
return (error);
}
/*
* Check for errors before going to sleep since the
* caller might not have checked this while holding
* sd_lock.
*/
if (error != 0) {
*done = 1;
return (error);
}
}
/*
* If any module downstream has requested read notification
* by setting SNDMREAD flag using M_SETOPTS, send a message
* down stream.
*/
*done = 1;
return (error);
}
/*
* Send the number of bytes requested by the
* read as the argument to M_READ.
*/
/*
* If any data arrived due to inline processing
* of putnext(), don't sleep.
*/
*done = 0;
return (0);
}
}
"strwaitq sleeps (2):%p, %X, %lX, %X, %p",
if (rval > 0) {
/* EMPTY */
"strwaitq awakes(2):%X, %X, %X, %X, %X",
} else if (rval == 0) {
"strwaitq interrupt #2:%p, %X, %lX, %X, %p",
else
error = 0;
*done = 1;
return (error);
} else {
/* timeout */
"strwaitq timeout:%p, %X, %lX, %X, %p",
*done = 1;
return (ETIME);
else
return (0);
}
/*
* If the caller implements delayed errors (i.e. queued after data)
* we can not check for errors here since data as well as an
* error might have arrived at the stream head. We return to
* have the caller check the read queue before checking for errors.
*/
if (error != 0) {
*done = 1;
return (error);
}
}
*done = 0;
return (0);
}
/*
* Perform job control discipline access checks.
* Return 0 for success and the errno for failure.
*/
int
{
return (0);
for (;;) {
/*
* If this is not the calling process's controlling terminal
* or if the calling process is already in the foreground
* then allow access.
*/
mutex_exit(&p->p_splock);
mutex_exit(&p->p_lock);
return (0);
}
/*
* Check to see if controlling terminal has been deallocated.
*/
mutex_exit(&p->p_splock);
mutex_exit(&p->p_lock);
return (EIO);
}
mutex_exit(&p->p_splock);
mutex_exit(&p->p_lock);
return (0);
}
mutex_exit(&p->p_lock);
return (EIO);
}
mutex_exit(&p->p_lock);
mutex_enter(&p->p_lock);
} else { /* mode == JCWRITE or JCSETP */
mutex_exit(&p->p_lock);
return (0);
}
if (p->p_detached) {
mutex_exit(&p->p_lock);
return (EIO);
}
mutex_exit(&p->p_lock);
mutex_enter(&p->p_lock);
}
/*
* We call cv_wait_sig_swap() to cause the appropriate
* action for the jobcontrol signal to take place.
* If the signal is being caught, we will take the
* EINTR error return. Otherwise, the default action
* of causing the process to stop will take place.
* In this case, we rely on the periodic cv_broadcast() on
* &lbolt_cv to wake us up to loop around and test again.
* We can't get here if the signal is ignored or
* if the current thread is blocking the signal.
*/
mutex_exit(&p->p_lock);
return (EINTR);
}
mutex_exit(&p->p_lock);
mutex_enter(&p->p_lock);
}
}
/*
* Return size of message of block type (bp->b_datap->db_type)
*/
{
unsigned char type;
break;
}
return (count);
}
/*
* Allocate a stream head.
*/
struct stdata *
{
stp->sd_struiodnak = 0;
stp->sd_rput_opt = 0;
stp->sd_wput_opt = 0;
stp->sd_read_opt = 0;
stp->sd_nciputctrl = 0;
stp->sd_nqueues = 0;
stp->sd_svcflags = 0;
stp->sd_copyflag = 0;
return (stp);
}
/*
* Free a stream head.
*/
void
{
}
stp->sd_nciputctrl, 0);
stp->sd_nciputctrl = 0;
}
}
/*
* Allocate a pair of queues and a syncq for the pair
*/
queue_t *
allocq(void)
{
qp->q_draining = 0;
qp->q_syncqmsgs = 0;
qp->q_sqtstamp = 0;
wqp->q_draining = 0;
wqp->q_syncqmsgs = 0;
wqp->q_sqtstamp = 0;
sq->sq_rmqcount = 0;
sq->sq_callbflags = 0;
sq->sq_cancelid = 0;
sq->sq_nciputctrl = 0;
sq->sq_needexcl = 0;
sq->sq_svcflags = 0;
return (qp);
}
/*
* Free a pair of queues and the "attached" syncq.
* Discard any messages left on the syncq(s), remove the syncq(s) from the
* outer perimeter, and free the syncq(s) if they are not the "attached" syncq.
*/
void
{
/*
* If a previously dispatched taskq job is scheduled to run
* sync_service() or a service routine is scheduled for the
* queues about to be freed, wait here until all service is
* done on the queue and all associated queues and syncqs.
*/
/*
* Flush the queues before q_next is set to NULL This is needed
* in order to backenable any downstream queue before we go away.
* Note: we are already removed from the stream so that the
* backenabling will not cause any messages to be delivered to our
* put procedures.
*/
/* Tidy up - removeq only does a half-remove from stream */
}
/*
* Free any syncqs that are outside what allocq returned.
*/
sq->sq_nciputctrl, 0);
sq->sq_nciputctrl = 0;
}
/* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */
/* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */
while (qbp) {
}
while (qbp) {
}
}
/*
* Allocate a qband structure.
*/
qband_t *
allocband(void)
{
return (NULL);
qbp->qb_mblkcnt = 0;
return (qbp);
}
/*
* Free a qband structure.
*/
void
{
}
/*
* Just like putnextctl(9F), except that allocb_wait() is used.
*
* Consolidation Private, and of course only callable from the stream head or
* routines that may block.
*/
int
{
int error;
return (0);
return (1);
}
/*
* run any possible bufcalls.
*/
void
runbufcalls(void)
{
int nevent;
/*
* count how many events are on the list
* now so we can check to avoid looping
* in low memory situations
*/
nevent = 0;
nevent++;
/*
* get estimate of available memory from kmem_avail().
* awake all bufcall functions waiting for
* memory whose request could be satisfied
* by 'count' memory and let 'em fight for it.
*/
count = kmem_avail();
--nevent;
} else {
/*
* too big, try again later - note
* that nevent was decremented above
* so we won't retry this one on this
* iteration of the loop
*/
}
}
}
}
}
/*
* actually run queue's service routine.
*/
static void
runservice(queue_t *q)
{
"runservice starts:%p", q);
"runservice ends:(%p)", q);
mutex_enter(QLOCK(q));
mutex_exit(QLOCK(q));
goto again;
}
q->q_flag &= ~QINSERVICE;
/*
* Wakeup thread waiting for the service procedure
* to be run (strclose and qdetach).
*/
cv_broadcast(&q->q_wait);
mutex_exit(QLOCK(q));
}
/*
* Background processing of bufcalls.
*/
void
streams_bufcall_service(void)
{
"streams_bufcall_service");
for (;;) {
runbufcalls();
}
/* Wait for memory to become available */
}
/* Wait for new work to arrive */
}
}
}
/*
* Background processing of streams background tasks which failed
* taskq_dispatch.
*/
static void
streams_qbkgrnd_service(void)
{
queue_t *q;
"streams_bkgrnd_service");
for (;;) {
/*
* Wait for work to arrive.
*/
}
/*
* Handle all pending freebs requests to free memory.
*/
while (freebs_list != NULL) {
}
/*
* Run pending queues.
*/
queue_service(q);
}
}
}
/*
* Background processing of streams background tasks which failed
* taskq_dispatch.
*/
static void
streams_sqbkgrnd_service(void)
{
"streams_sqbkgrnd_service");
for (;;) {
/*
* Wait for work to arrive.
*/
}
/*
* Run pending syncqs.
*/
}
}
}
/*
* Disable the syncq and wait for background syncq processing to complete.
* list.
*/
void
{
int removed;
if (removed) {
sq->sq_servcount = 0;
goto done;
}
}
while (sq->sq_servcount != 0) {
}
done:
}
/*
* Put a syncq on the list of syncq's to be serviced by the sqthread.
* Add the argument to the end of the sqhead list and set the flag
* indicating this syncq has been enabled. If it has already been
* enabled, don't do anything.
* This routine assumes that SQLOCK is held.
* NOTE that the lock order is to have the SQLOCK first,
* so if the service_syncq lock is held, we need to release it
* before aquiring the SQLOCK (mostly relevant for the background
* thread, and this seems to be common among the STREAMS global locks).
* Note the the sq_svcflags are protected by the SQLOCK.
*/
void
{
/*
* This is probably not important except for where I believe it
* is being called. At that point, it should be held (and it
* is a pain to release it just for this routine, so don't do
* it).
*/
/*
* Do not put on list if background thread is scheduled or
* syncq is disabled.
*/
return;
/*
* Check whether we should enable sq at all.
* Non PERMOD syncqs may be drained by at most one thread.
* PERMOD syncqs may be drained by several threads but we limit the
* total amount to the lesser of
* Number of queues on the squeue and
* Number of CPUs.
*/
if (sq->sq_servcount != 0) {
return;
}
}
/* Attempt a taskq dispatch */
sq->sq_servcount++;
return;
}
/*
* This taskq dispatch failed, but a previous one may have succeeded.
* Don't try to schedule on the background thread whilst there is
* outstanding taskq processing.
*/
if (sq->sq_servcount != 0)
return;
/*
* System is low on resources and can't perform a non-sleeping
* dispatch. Schedule the syncq for a background thread and mark the
* syncq to avoid any further taskq dispatch attempts.
*/
}
/*
* Note: fifo_close() depends on the mblk_t on the queue being freed
* asynchronously. The asynchronous freeing of messages breaks the
* recursive call chain of fifo_close() while there are I_SENDFD type of
* messages refering other file pointers on the queue. Then when
* closing pipes it can avoid stack overflow in case of daisy-chained
* pipes, and also avoid deadlock in case of fifonode_t pairs (which
* share the same fifolock_t).
*/
void
{
/*
* Check data sanity. The dblock should have non-empty free function.
* It is better to panic here then later when the dblock is freed
* asynchronously when the context is lost.
*/
panic("freebs_enqueue: dblock %p has a NULL free callback",
(void *)dbp);
}
/* queue the new mblk on the esballoc queue */
} else {
}
/* If we're the first thread to reach the threshold, process */
}
static void
{
do {
/*
* Detach the message chain for processing.
*/
/*
* Process the message chain.
*/
}
/*
* taskq callback routine to free esballoced mblk's
*/
static void
{
}
}
static void
{
TQ_NOSLEEP) == NULL) {
/*
* System is low on resources and can't perform a non-sleeping
* dispatch. Schedule for a background thread.
*/
}
}
static void
esballoc_timer(void *arg)
{
}
static void
{
}
}
void
esballoc_queue_init(void)
{
system_esbq.eq_len = 0;
system_esbq.eq_flags = 0;
}
/*
* Set the QBACK or QB_BACK flag in the given queue for
* the given priority band.
*/
void
{
int i;
if (pri != 0) {
while (*qbpp)
"setqback: can't allocate qband\n");
return;
}
q->q_nband++;
}
}
i = pri;
while (--i)
} else {
}
}
int
{
return (EFAULT);
} else {
}
return (0);
}
int
{
return (EFAULT);
} else {
}
return (0);
}
/*
* strsignal_nolock() posts a signal to the process(es) at the stream head.
* It assumes that the stream head lock is already held, whereas strsignal()
* acquires the lock first. This routine was created because a few callers
* release the stream head lock before calling only to re-acquire it after
* it returns.
*/
void
{
switch (sig) {
case SIGPOLL:
break;
default:
}
break;
}
}
void
{
switch (sig) {
case SIGPOLL:
break;
default:
}
break;
}
}
void
{
}
/*
* Backenable the first queue upstream from `q' with a service procedure.
*/
void
{
/*
* our presence might not prevent other modules in our own
* have a claim on the queue (some drivers do a getq on somebody
* else's queue - they know that the queue itself is not going away
* but the framework has to guarantee q_next in that stream.)
*/
claimstr(q);
/* find nearest back queue with service proc */
}
if (nq) {
/*
* backenable can be called either with no locks held
* or with the stream frozen (the latter occurs when a module
* calls rmvq with the stream frozen.) If the stream is frozen
* by the caller the caller will hold all qlocks in the stream.
* Note that a frozen stream doesn't freeze a mated stream,
* so we explicitly check for that.
*/
}
#ifdef DEBUG
else {
}
#endif
}
releasestr(q);
}
/*
* Return the appropriate errno when one of flags_to_check is set
* in sd_flags. Uses the exported error routines if they are set.
* Will return 0 if non error is set (or if the exported error routines
* do not return an error).
*
* If there is both a read and write error to check we prefer the read error.
* Also, give preference to recorded errno's over the error functions.
* The flags that are handled are:
* STPLEX return EINVAL
* STRDERR return sd_rerror (and clear if STRDERRNONPERSIST)
* STWRERR return sd_werror (and clear if STWRERRNONPERSIST)
* STRHUP return sd_werror
*
* If the caller indicates that the operation is a peek a nonpersistent error
* is not cleared.
*/
int
{
int error = 0;
/*
* Read errors are non-persistent i.e. discarded once
* returned to a non-peeking caller,
*/
}
int clearerr = 0;
&clearerr);
if (clearerr) {
}
}
/*
* Write errors are non-persistent i.e. discarded once
* returned to a non-peeking caller,
*/
}
int clearerr = 0;
&clearerr);
if (clearerr) {
}
}
/* sd_werror set when STRHUP */
}
return (error);
}
/*
* for twisted streams also
*/
int
{
int waited = 1;
int error = 0;
while (waited) {
waited = 0;
return (EAGAIN);
}
waited = 1;
return (EINTR);
}
}
return (EAGAIN);
}
waited = 1;
return (EINTR);
}
}
if (error != 0) {
return (error);
}
}
}
} else {
return (EAGAIN);
}
return (EINTR);
}
if (error != 0) {
return (error);
}
}
}
}
return (0);
}
/*
* Complete the plumbing operation associated with stream `stp'.
*/
void
{
}
/*
* This describes how the STREAMS framework handles synchronization
* The key interfaces for open and close are qprocson and qprocsoff,
* respectively. While the close case in general is harder both open
* have close have significant similarities.
*
* During close the STREAMS framework has to both ensure that there
* are no stale references to the queue pair (and syncq) that
* are being closed and also provide the guarantees that are documented
* in qprocsoff(9F).
* If there are stale references to the queue that is closing it can
* result in kernel memory corruption or kernel panics.
*
* does not have any stale references to the closing queues once its close
* routine returns. This includes:
* associated with the queues. For timeout and bufcall callbacks the
* are in progress.
* esballoc free functions do not refer to a queue that has closed.
* (Note that in general the close routine can not wait for the esballoc'ed
* messages to be freed since that can cause a deadlock.)
* - Cancelling any interrupts that refer to the closing queues and
* also ensuring that there are no interrupts in progress that will
* refer to the closing queues once the close routine returns.
* - For multiplexors removing any driver global state that refers to
* the closing queue and also ensuring that there are no threads in
* the multiplexor that has picked up a queue pointer but not yet
* finished using it.
*
* in its open, close, put, or service procedures or in a
* stream. Thus it can not reference the q_next pointer in an interrupt
* routine or a timeout, bufcall or esballoc callback routine. Likewise
* it can not reference q_next of a different queue e.g. in a mux that
* field it must use the *next* versions e.g. canputnext instead of
* canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...).
*
*
* the STREAMS framework has to avoid stale references to q_next for all
* the framework internal cases which include (but are not limited to):
* - Threads in canput/canputnext/backenable and elsewhere that are
* walking q_next.
* - Messages on a syncq that have a reference to the queue through b_queue.
* - Messages on an outer perimeter (syncq) that have a reference to the
* queue through b_queue.
* - Threads that use q_nfsrv (e.g. canput) to find a queue.
* Note that only canput and bcanput use q_nfsrv without any locking.
*
* The STREAMS framework providing the qprocsoff(9F) guarantees means that
* after qprocsoff returns, the framework has to ensure that no threads can
* enter the put or service routines for the closing read or write-side queue.
* In addition to preventing "direct" entry into the put procedures
* the framework also has to prevent messages being drained from
* the syncq or the outer perimeter.
* XXX Note that currently qdetach does relies on D_MTOCEXCL as the only
* mechanism to prevent qwriter(PERIM_OUTER) from running after
* qprocsoff has returned.
* get called when the queue is closing.
*
*
* The framework aspects of the above "contract" is implemented by
* qprocsoff, removeq, and strlock:
* - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from
* entering the service procedures.
* - strlock acquires the sd_lock and sd_reflock to prevent putnext,
* canputnext, backenable etc from dereferencing the q_next that will
* soon change.
* - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext
* or other q_next walker that uses claimstr/releasestr to finish.
* - optionally for every syncq in the stream strlock acquires all the
* sq_lock's and waits for all sq_counts to drop to a value that indicates
* that no thread executes in the put or service procedures and that no
* currently executing hence no such thread can end up with the old stale
* q_next value and no canput/backenable can have the old stale
* - qdetach (wait_svc) makes sure that any scheduled or running threads
* have either finished or observed the QWCLOSE flag and gone away.
*/
/*
* Get all the locks necessary to change q_next.
*
* Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the
* sq_count of each syncq in the list to drop to sq_rmqcount, indicating that
* the only threads inside the sqncq are threads currently calling removeq().
* Since threads calling removeq() are in the process of removing their queues
* from the stream, we do not need to worry about them accessing a stale q_next
* pointer and thus we do not need to wait for them to exit (in fact, waiting
* for them can cause deadlock).
*
* This routine is subject to starvation since it does not set any flag to
* prevent threads from entering a module in the stream(i.e. sq_count can
* increase on some syncq while it is waiting on some other syncq.)
*
* Assumes that only one thread attempts to call strlock for a given
* stream. If this is not the case the two threads would deadlock.
* This assumption is guaranteed since strlock is only called by insertq
* and removeq and streams plumbing changes are single-threaded for
* a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags.
*
* For pipes, it is not difficult to atomically designate a pair of streams
* to be mated. Once mated atomically by the framework the twisted pair remain
* configured that way until dismantled atomically by the framework.
* When plumbing takes place on a twisted stream it is necessary to ensure that
* this operation is done exclusively on the twisted stream since two such
* operations, each initiated on different ends of the pipe will deadlock
* waiting for each other to complete.
*
* On entry, no locks should be held.
* The locks acquired and held by strlock depends on a few factors.
* - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired
* and held on exit and all sq_count are at an acceptable level.
* - In all cases, sd_lock and sd_reflock are acquired and held on exit with
* sd_refcnt being zero.
*/
static void
{
/*
* Wait for any claimstr to go away.
*/
/*
* Note that the selection of locking order is not
* important, just that they are always aquired in
* the same order. To assure this, we choose this
* order based on the value of the pointer, and since
* the pointer will not change for the life of this
* pair, we will always grab the locks in the same
* order (and hence, prevent deadlocks).
*/
} else {
}
goto retry;
}
goto retry;
}
} else {
}
}
}
return;
continue;
/* Failed - drop all locks that we have acquired so far */
} else {
}
}
/*
* The wait loop below may starve when there are many threads
* claiming the syncq. This is especially a problem with permod
* syncqs (IP). To lessen the impact of the problem we increment
* sq_needexcl and clear fastbits so that putnexts will slow
* down and call sqenable instead of draining right away.
*/
sq->sq_needexcl++;
}
sq->sq_needexcl--;
if (sq->sq_needexcl == 0)
goto retry;
}
}
/*
* Drop all the locks that strlock acquired.
*/
static void
{
} else {
}
return;
}
}
/*
* When the module has service procedure, we need check if the next
* module which has service procedure is in flow control to trigger
* the backenable.
*/
static void
{
claimstr(q);
backenable(q, 0);
}
releasestr(q);
}
/*
* Given two read queues, insert a new single one after another.
*
* This routine acquires all the necessary locks in order to change
* q_next and related pointer using strlock().
* It depends on the stream head ensuring that there are no concurrent
* insertq or removeq on the same stream. The stream head ensures this
* using the flags STWOPEN, STRCLOSE, and STRPLUMB.
*
* Note that no syncq locks are held during the q_next change. This is
* applied to all streams since, unlike removeq, there is no problem of stale
* applied this optimization to all streams.
*/
void
{
} else {
}
/* Do we have a FIFO? */
} else {
}
/*
* set_nfsrv_ptr() needs to know if this is an insertion or not,
* so only reset this flag after calling it.
*/
if (have_fifo) {
} else {
}
/* The QEND flag might have to be updated for the upstream guy */
/*
* If this was a module insertion, bump the push count.
*/
stp->sd_pushcnt++;
/* check if the write Q needs backenable */
/* check if the read Q needs backenable */
}
/*
* Given a read queue, unlink it from any neighbors.
*
* This routine acquires all the necessary locks in order to
* change q_next and related pointers and also guard against
* stale references (e.g. through q_next) to the queue that
* is being removed. It also plays part of the role in ensuring
* after qprocsoff returns.
*
* Removeq depends on the stream head ensuring that there are
* no concurrent insertq or removeq on the same stream. The
* stream head ensures this using the flags STWOPEN, STRCLOSE and
* STRPLUMB.
*
* The set of locks needed to remove the queue is different in
* different cases:
*
* Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after
* waiting for the syncq reference count to drop to 0 indicating that no
* non-close threads are present anywhere in the stream. This ensures that any
* procedures.
*
* The sq_rmqcount counter tracks the number of threads inside removeq().
* strlock() ensures that there is either no threads executing inside perimeter
* or there is only a thread calling qprocsoff().
*
* strlock() compares the value of sq_count with the number of threads inside
* removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup
* any threads waiting in strlock() when the sq_rmqcount increases.
*/
void
{
int moved;
/*
* For queues using Synchronous streams, we must wait for all threads in
* rwnext() to drain out before proceeding.
*/
/* First, we need wakeup any threads blocked in rwnext() */
}
}
}
}
}
}
sq->sq_rmqcount++;
}
/* Do we have a FIFO? */
} else {
}
/* The QEND flag might have to be updated for the upstream guy */
/*
* Move any messages destined for the put procedures to the next
* syncq in line. Otherwise free them.
*/
moved = 0;
/*
* Quick check to see whether there are any messages or events.
*/
if (wqp->q_syncqmsgs != 0 ||
/*
* If this was a module removal, decrement the push count.
*/
if (!isdriver)
stp->sd_pushcnt--;
/*
* Make sure any messages that were propagated are drained.
* Also clear any QFULL bit caused by messages that were propagated.
*/
/*
* For the driver calling qprocsoff, propagate_syncq
* frees all the messages instead of putting it in
* the stream head
*/
}
/*
* We come here for any pop of a module except for the
* case of driver being removed. We don't call emptysq
* if we did not move any messages. This will avoid holding
* PERMOD syncq locks in emptysq
*/
if (moved > 0)
}
sq->sq_rmqcount--;
}
/*
* Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or
* SQ_WRITER) on a syncq.
* If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the
* sync queue and waits until sq_count reaches maxcnt.
*
* if maxcnt is -1 there's no need to grab sq_putlocks since the caller
* does not care about putnext threads that are in the middle of calling put
* entry points.
*
* This routine is used for both inner and outer syncqs.
*/
static void
{
/*
* Wait for SQ_FROZEN/SQ_BLOCKED to be reset.
* SQ_FROZEN will be set if there is a frozen stream that has a
* queue which also refers to this "shared" syncq.
* SQ_BLOCKED will be set if there is "off" queue which also
* refers to this "shared" syncq.
*/
if (maxcnt != -1) {
}
sq->sq_needexcl++;
if (maxcnt != -1) {
}
if (maxcnt != -1) {
}
}
sq->sq_needexcl--;
if (maxcnt != -1) {
if (sq->sq_needexcl == 0) {
}
} else if (sq->sq_needexcl == 0) {
}
}
/*
* Reset a flag that was set with blocksq.
*
* Can not use this routine to reset SQ_WRITER.
*
* If "isouter" is set then the syncq is assumed to be an outer perimeter
* and drain_syncq is not called. Instead we rely on the qwriter_outer thread
* to handle the queued qwriter operations.
*
* no need to grab sq_putlocks here. See comment in strsubr.h that explains when
* sq_putlocks are used.
*/
static void
{
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
if (!isouter) {
/* drain_syncq drops SQLOCK */
return;
}
}
}
}
/*
* Reset a flag that was set with blocksq.
* Does not drain the syncq. Use emptysq() for that.
* Returns 1 if SQ_QUEUED is set. Otherwise 0.
*
* no need to grab sq_putlocks here. See comment in strsubr.h that explains when
* sq_putlocks are used.
*/
static int
{
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
return (1);
return (0);
}
/*
* Empty all the messages on a syncq.
*
* no need to grab sq_putlocks here. See comment in strsubr.h that explains when
* sq_putlocks are used.
*/
static void
{
/*
* To prevent potential recursive invocation of drain_syncq we
* do not call drain_syncq if count is non-zero.
*/
/* drain_syncq() drops SQLOCK */
return;
} else
}
}
/*
* Ordered insert while removing duplicates.
*/
static void
{
return;
break;
}
}
*prev_sqlpp = new_sqlp;
}
/*
* Walk the write side queues until we hit either the driver
* or a twist in the stream (_SAMESTR will return false in both
* these cases) then turn around and walk the read side queues
* back up to the stream head.
*/
static void
{
while (q != NULL) {
if (_SAMESTR(q))
q = q->q_next;
q = _RD(q);
else
q = NULL;
}
}
/*
* Allocate and build a list of all syncqs in a stream and the syncq(s)
* associated with the "q" parameter. The resulting list is sorted in a
* canonical order and is free of duplicates.
* Assumes the passed queue is a _RD(q).
*/
static sqlist_t *
{
/*
*/
if (do_twist)
return (sqlist);
}
static sqlist_t *
{
/*
* Allocate 2 syncql_t's for each pushed module. Note that
* the sqlist_t structure already has 4 syncql_t's built in:
*/
sizeof (sqlist_t);
sqlist->sqlist_index = 0;
return (sqlist);
}
/*
* Free the list created by sqlist_alloc()
*/
static void
{
}
/*
* Prevent any new entries into any syncq in this stream.
* Used by freezestr.
*/
void
{
q = _RD(q);
/*
* Get a sorted list with all the duplicates removed containing
* all the syncqs referenced by this stream.
*/
}
/*
* Release the block on new entries into this stream
*/
void
strunblock(queue_t *q)
{
int drain_needed;
q = _RD(q);
/*
* Get a sorted list with all the duplicates removed containing
* all the syncqs referenced by this stream.
* Have to drop the SQ_FROZEN flag on all the syncqs before
* starting to drain them; otherwise the draining might
* cause a freezestr in some module on the stream (which
* would deadlock.)
*/
drain_needed = 0;
if (drain_needed) {
}
}
#ifdef DEBUG
static int
{
return (0);
}
int
{
return (count != 0);
}
/*
* Check if anyone has frozen this stream with freezestr
*/
int
{
}
#endif /* DEBUG */
/*
* Enter a queue.
* Obsoleted interface. Should not be used.
*/
void
{
}
void
{
}
/*
* Enter a perimeter. c_inner and c_outer specifies which concurrency bits
* to check.
* Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter
* calls and the running of open, close and service procedures.
*
* if c_inner bit is set no need to grab sq_putlocks since we don't care
* if other threads have entered or are entering put entry point.
*
* if c_inner bit is set it might have been posible to use
* sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize
* qwait() we wouldn't know which counter to decrement. Currently counter is
* selected by current cpu_seqid and current CPU can change at any moment. XXX
* in the future we might use curthread id bits to select the counter and this
* would stay constant across routine calls.
*/
void
{
/*
* Increment ref count to keep closes out of this queue.
*/
/* Make sure all putcounts now use slowlock. */
sq->sq_needexcl++;
waitflags |= SQ_MESSAGES;
}
/*
* Wait until we can enter the inner perimeter.
* If we want exclusive access we wait until sq_count is 0.
* We have to do this before entering the outer perimeter in order
*/
}
}
}
sq->sq_needexcl--;
if (sq->sq_needexcl == 0) {
}
}
/* Check if we need to enter the outer perimeter */
/*
* We have to enter the outer perimeter exclusively before
* we can increment sq_count to avoid deadlock. This implies
* that we have to re-check sq_flags and sq_count.
*
* is it possible to have c_inner set when c_outer is not set?
*/
}
/*
* there should be no need to recheck sq_putcounts
* because outer_enter() has already waited for them to clear
* after setting SQ_WRITER.
*/
#ifdef DEBUG
/*
* SUMCHECK_SQ_PUTCOUNTS should return the sum instead
* of doing an ASSERT internally. Others should do
* something like
* ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0);
* without the need to #ifdef DEBUG it.
*/
SUMCHECK_SQ_PUTCOUNTS(sq, 0);
#endif
}
}
/* Exclusive entry */
}
}
}
/*
* leave a syncq. announce to framework that closes may proceed.
* c_inner and c_outer specifies which concurrency bits
* to check.
*
* must never be called from driver or module put entry point.
*
* no need to grab sq_putlocks here. See comment in strsubr.h that explains when
* sq_putlocks are used.
*/
void
{
#ifdef DEBUG
#endif
/*
* decrement ref count, drain the syncq if possible, and wake up
* any waiting close.
*/
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
if (flags & SQ_WANTEXWAKEUP) {
flags &= ~SQ_WANTEXWAKEUP;
}
/*
* The syncq needs to be drained. "Exit" the syncq
* before calling drain_syncq.
*/
/* Check if we need to exit the outer perimeter */
/* XXX will this ever be true? */
return;
}
}
/* Check if we need to exit the outer perimeter */
}
/*
* Prevent q_next from changing in this stream by incrementing sq_count.
*
* no need to grab sq_putlocks here. See comment in strsubr.h that explains when
* sq_putlocks are used.
*/
void
{
}
/*
* Undo claimq.
*
* no need to grab sq_putlocks here. See comment in strsubr.h that explains when
* sq_putlocks are used.
*/
void
{
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
/*
* To prevent potential recursive invocation of
* drain_syncq we do not call drain_syncq if count is
* non-zero.
*/
return;
} else
}
}
}
/*
* Prevent q_next from changing in this stream by incrementing sd_refcnt.
*/
void
{
}
/*
* Undo claimstr.
*/
void
{
}
static syncq_t *
new_syncq(void)
{
}
static void
{
sq->sq_nciputctrl, 0);
}
sq->sq_nciputctrl = 0;
sq->sq_rmqcount = 0;
sq->sq_callbflags = 0;
sq->sq_cancelid = 0;
sq->sq_needexcl = 0;
sq->sq_svcflags = 0;
sq->sq_nqueues = 0;
sq->sq_servcount = 0;
}
/* Outer perimeter code */
/*
* The outer syncq uses the fields and flags in the syncq slightly
* differently from the inner syncqs.
* sq_count Incremented when there are pending or running
* writers at the outer perimeter to prevent the set of
* inner syncqs that belong to the outer perimeter from
* changing.
*
* SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while
* inner syncqs are added to or removed from the
* outer perimeter.
*
* SQ_WRITER A thread is currently traversing all the inner syncqs
* setting the SQ_WRITER flag.
*/
/*
* Get write access at the outer perimeter.
* Note that read access is done by entersq, putnext, and put by simply
* incrementing sq_count in the inner syncq.
*
* Waits until "flags" is no longer set in the outer to prevent multiple
* threads from having write access at the same time. SQ_WRITER has to be part
* of "flags".
*
* Increases sq_count on the outer syncq to keep away outer_insert/remove
* until the outer_exit is finished.
*
* outer_enter is vulnerable to starvation since it does not prevent new
* threads from entering the inner syncqs while it is waiting for sq_count to
* go to zero.
*/
void
{
int wait_needed;
}
wait_needed = 0;
/*
* Set SQ_WRITER on all the inner syncqs while holding
* the SQLOCK on the outer syncq. This ensures that the changing
* of SQ_WRITER is atomic under the outer SQLOCK.
*/
if (count != 0)
wait_needed = 1;
}
/*
* Get everybody out of the syncqs sequentially.
* Note that we don't actually need to aqiure the PUTLOCKS, since
* we have already cleared the fastbit, and set QWRITER. By
* definition, the count can not increase since putnext will
* take the slowlock path (and the purpose of aquiring the
* putlocks was to make sure it didn't increase while we were
* waiting).
*
* Note that we still aquire the PUTLOCKS to be safe.
*/
if (wait_needed) {
while (count != 0) {
}
}
/*
* Verify that none of the flags got set while we
* were waiting for the sq_counts to drop.
* If this happens we exit and retry entering the
* outer perimeter.
*/
goto retry;
}
}
}
/*
* Drop the write access at the outer perimeter.
* Read access is dropped implicitly (by putnext, put, and leavesq) by
* decrementing sq_count.
*/
void
{
int drain_needed;
/*
* Atomically (from the perspective of threads calling become_writer)
* drop the write access at the outer perimeter by holding
* SQLOCK(outer) across all the dropsq calls and the resetting of
* SQ_WRITER.
* This defines a locking order between the outer perimeter
* SQLOCK and the inner perimeter SQLOCKs.
*/
}
/*
* sq_onext is stable since sq_count has not yet been decreased.
* Reset the SQ_WRITER flags in all syncqs.
* After dropping SQ_WRITER on the outer syncq we empty all the
* inner syncqs.
*/
drain_needed = 0;
if (drain_needed) {
}
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
}
/*
* Add another syncq to an outer perimeter.
* Block out all other access to the outer perimeter while it is being
* changed using blocksq.
* Assumes that the caller has *not* done an outer_enter.
*
* Vulnerable to starvation in blocksq.
*/
static void
{
/* Get exclusive access to the outer perimeter list */
}
/*
* Remove a syncq from an outer perimeter.
* Block out all other access to the outer perimeter while it is being
* changed using blocksq.
* Assumes that the caller has *not* done an outer_enter.
*
* Vulnerable to starvation in blocksq.
*/
static void
{
/* Get exclusive access to the outer perimeter list */
}
/*
* Queue a deferred qwriter(OUTER) callback for this outer perimeter.
* If this is the first callback for this outer perimeter then add
* this outer perimeter to the list of outer perimeters that
* the qwriter_outer_thread will process.
*
* Increments sq_count in the outer syncq to prevent the membership
* of the outer perimeter (in terms of inner syncqs) to change while
* the callback is pending.
*/
static void
{
/* First message. */
(void) taskq_dispatch(streams_taskq,
} else {
}
}
/*
* Try and upgrade to write access at the outer perimeter. If this can
* not be done without blocking then queue the callback to be done
* by the qwriter_outer_thread.
*
* This routine can only be called from put or service procedures plus
* asynchronous callback routines that have properly entered to
* queue (with entersq.) Thus qwriter(OUTER) assumes the caller has one claim
* on the syncq associated with q.
*/
void
{
int failed;
panic("qwriter(PERIM_OUTER): no outer perimeter");
/*
* If some thread is traversing sq_next, or if we are blocked by
* outer_insert or outer_remove, or if the we already have queued
* callbacks, then queue this callback for later processing.
*
* Also queue the qwriter for an interrupt thread in order
* to reduce the time spent running at high IPL.
* to identify there are events.
*/
/*
* Queue the become_writer request.
* The queueing is atomic under SQLOCK(outer) in order
* to synchronize with outer_exit.
* queue_writer will drop the outer SQLOCK
*/
if (flags & SQ_BLOCKED) {
/* Must set SQ_WRITER on inner perimeter */
} else {
/*
* The outer could have been SQ_BLOCKED thus
* SQ_WRITER might not be set on the inner.
*/
}
}
return;
}
/*
* We are half-way to exclusive access to the outer perimeter.
* Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove
* while the inner syncqs are traversed.
*/
/*
* Check if we can run the function immediately. Mark all
* syncqs with the writer flag to prevent new entries into
* put and service procedures.
*
* Set SQ_WRITER on all the inner syncqs while holding
* the SQLOCK on the outer syncq. This ensures that the changing
* of SQ_WRITER is atomic under the outer SQLOCK.
*/
failed = 0;
failed = 1;
}
if (failed) {
/*
* Some other thread has a read claim on the outer perimeter.
* Queue the callback for deferred processing.
*
* queue_writer will set SQ_QUEUED before we drop SQ_WRITER
* so that other qwriter(OUTER) calls will queue their
* callbacks as well. queue_writer increments sq_count so we
* decrement to compensate for the our increment.
*
* Dropping SQ_WRITER enables the writer thread to work
* on this outer perimeter.
*/
/* queue_writer dropper the lock */
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
return;
} else {
}
/* Can run it immediately */
}
/*
* Dequeue all writer callbacks from the outer perimeter and run them.
*/
static void
{
queue_t *q;
void (*func)();
/*
* queues cannot be placed on the queuelist on the outer
* perimiter.
*/
}
/*
* Drop the message if the queue is closing.
* Make sure that the queue is "claimed" when the callback
* is run in order to satisfy various ASSERTs.
*/
} else {
claimq(q);
releaseq(q);
}
}
}
/*
* The list of messages on the inner syncq is effectively hashed
* by destination queue. These destination queues are doubly
* linked lists (hopefully) in priority order. Messages are then
* elements in the mblk, with (similar to putq()) the first message
* having a NULL b_prev and the last message having a NULL b_next.
*
* Events, such as qwriter callbacks, are put onto a list in FIFO
* order referenced by sq_evhead, and sq_evtail. This is a singly
* linked list, and messages here MUST be processed in the order queued.
*/
/*
* Run the events on the syncq event list (sq_evhead).
* Assumes there is only one claim on the syncq, it is
* already exclusive (SQ_EXCL set), and the SQLOCK held.
* Messages here are processed in order, with the SQ_EXCL bit
* held all the way through till the last message is processed.
*/
void
{
void (*func)();
/*
* We need to process all of the events on this list. It
* is possible that new events will be added while we are
* away processing a callback, so on every loop, we start
* back at the beginning of the list.
*/
/*
* We have to reaccess sq_evhead since there is a
* possibility of a new entry while we were running
* the callback.
*/
/*
* Messages from the event queue must be taken off in
* FIFO order.
*/
/* Deleting last */
}
/*
* re-read the flags, since they could have changed.
*/
}
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
if (flags & SQ_WANTEXWAKEUP) {
flags &= ~SQ_WANTEXWAKEUP;
}
}
/*
* Put messages on the event list.
* If we can go exclusive now, do so and process the event list, otherwise
* let the last claim service this list (or wake the sqthread).
* This procedure assumes SQLOCK is held. To run the event list, it
* must be called with no claims.
*/
static void
{
/*
* This is a callback. Add it to the list of callbacks
* and see about upgrading.
*/
} else {
}
/*
* We have set SQ_EVENTS, so threads will have to
* unwind out of the perimiter, and new entries will
* not grab a putlock. But we still need to know
* how many threads have already made a claim to the
* syncq, so grab the putlocks, and sum the counts.
* If there are no claims on the syncq, we can upgrade
* to exclusive, and run the event list.
* NOTE: We hold the SQLOCK, so we can just grab the
* putlocks.
*/
/*
* We have no claim, so we need to check if there
* are no others, then we can upgrade.
*/
/*
* There are currently no claims on
* the syncq by this thread (at least on this entry). The thread who has
* the claim should drain syncq.
*/
if (count > 0) {
/*
* Can't upgrade - other threads inside.
*/
return;
}
/*
* Need to set SQ_EXCL and make a claim on the syncq.
*/
/* Process the events list */
/*
* Release our claim...
*/
/*
* And release SQ_EXCL.
* We don't need to acquire the putlocks to release
* SQ_EXCL, since we are exclusive, and hold the SQLOCK.
*/
/*
* sq_run_events should have released SQ_EXCL
*/
/*
* If anything happened while we were running the
* events (or was there before), we need to process
* them now. We shouldn't be exclusive sine we
* released the perimiter above (plus, we asserted
* for it).
*/
else
}
/*
* Perform delayed processing. The caller has to make sure that it is safe
* to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are
* set.)
*
* Assume that the caller has NO claims on the syncq. However, a claim
* on the syncq does not indicate that a thread is draining the syncq.
* There may be more claims on the syncq than there are threads draining
* (i.e. #_threads_draining <= sq_count)
*
* drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set
* in order to preserve qwriter(OUTER) ordering constraints.
*
* sq_putcount only needs to be checked when dispatching the queued
* writer call for CIPUT sync queue, but this is handled in sq_run_events.
*/
void
{
"drain_syncq start:%p", sq);
/*
* Drop SQ_SERVICE flag.
*/
if (bg_service)
/*
* If SQ_EXCL is set, someone else is processing this syncq - let him
* finish the job.
*/
if (bg_service) {
sq->sq_servcount--;
}
return;
}
/*
* This routine can be called by a background thread if
* it was scheduled by a hi-priority thread. SO, if there are
* NOT messages queued, return (remember, we have the SQLOCK,
* and it cannot change until we release it). Wakeup any waiters also.
*/
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
if (flags & SQ_WANTEXWAKEUP) {
flags &= ~SQ_WANTEXWAKEUP;
}
if (bg_service) {
sq->sq_servcount--;
}
return;
}
/*
* If this is not a concurrent put perimiter, we need to
* become exclusive to drain. Also, if not CIPUT, we would
* not have acquired a putlock, so we don't need to check
* the putcounts. If not entering with a claim, we test
* for sq_count == 0.
*/
if (bg_service) {
sq->sq_servcount--;
}
return;
}
}
/*
* This is where we make a claim to the syncq.
* This can either be done by incrementing a putlock, or
* the sq_count. But since we already have the SQLOCK
* here, we just bump the sq_count.
*
* Note that after we make a claim, we need to let the code
* fall through to the end of this routine to clean itself
* up. A return in the while loop will put the syncq in a
* very bad state.
*/
/*
* If we are told to stayaway or went exclusive,
* we are done.
*/
if (flags & (SQ_STAYAWAY)) {
break;
}
/*
* If there are events to run, do so.
* We have one claim to the syncq, so if there are
* more than one, other threads are running.
*/
if (count > 1) {
/* Can't upgrade - other threads inside */
break;
}
/*
* we have the only claim, run the events,
* sq_run_events will clear the SQ_EXCL flag.
*/
/*
* If this is a CIPUT perimiter, we need
* to drop the SQ_EXCL flag so we can properly
* continue draining the syncq.
*/
}
/*
* And go back to the beginning just in case
* anything changed while we were away.
*/
continue;
}
/*
* Find the queue that is not draining.
*
* q_draining is protected by QLOCK which we do not hold.
* But if it was set, then a thread was draining, and if it gets
* cleared, then it was because the thread has successfully
* drained the syncq, or a GOAWAY state occured. For the GOAWAY
* state to happen, a thread needs the SQLOCK which we hold, and
* if there was such a flag, we whould have already seen it.
*/
;
break;
/*
* We have a queue to work on, and we hold the
* SQLOCK and one claim, call qdrain_syncq.
* This means we need to release the SQLOCK and
* aquire the QLOCK (OK since we have a claim).
* Note that qdrain_syncq will actually dequeue
* this queue from the sq_head list when it is
* convinced all the work is done and release
* the QLOCK before returning.
*/
/* The queue is drained */
/*
* NOTE: After this point qp should not be used since it may be
* closed.
*/
}
/*
* sq->sq_head cannot change because we hold the
* sqlock. However, a thread CAN decide that it is no longer
* going to drain that queue. However, this should be due to
* a GOAWAY state, and we should see that here.
*
* This loop is not very efficient. One solution may be adding a second
* pointer to the "draining" queue, but it is difficult to do when
* queues are inserted in the middle due to priority ordering. Another
* possibility is to yank the queue out of the sq list and put it onto
* the "draining list" and then put it back if it can't be drained.
*/
/* Drop SQ_EXCL for non-CIPUT perimiters */
/* Wake up any waiters. */
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
if (flags & SQ_WANTEXWAKEUP) {
flags &= ~SQ_WANTEXWAKEUP;
}
/* Release our claim. */
if (bg_service) {
sq->sq_servcount--;
}
"drain_syncq end:%p", sq);
}
/*
*
* qdrain_syncq can be called (currently) from only one of two places:
* drain_syncq
* putnext (or some variation of it).
* and eventually
* qwait(_sig)
*
* If called from drain_syncq, we found it in the list
* of queue's needing service, so there is work to be done (or it
* wouldn't be on the list).
*
* If called from some putnext variation, it was because the
* perimiter is open, but messages are blocking a putnext and
* there is not a thread working on it. Now a thread could start
* working on it while we are getting ready to do so ourself, but
* the thread would set the q_draining flag, and we can spin out.
*
* As for qwait(_sig), I think I shall let it continue to call
* drain_syncq directly (after all, it will get here eventually).
*
* qdrain_syncq has to terminate when:
* - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering
* - SQ_EVENTS gets set to preserve qwriter(INNER) ordering
*
* ASSUMES:
* One claim
* QLOCK held
* SQLOCK not held
* Will release QLOCK before returning
*/
void
{
#ifdef DEBUG
#endif
"drain_syncq start:%p", sq);
/*
* For non-CIPUT perimiters, we should be called with the
* exclusive bit set already. For non-CIPUT perimiters we
* will be doing a concurrent drain, so it better not be set.
*/
/*
* All outer pointers are set, or none of them are
*/
#ifdef DEBUG
/*
* This is OK without the putlocks, because we have one
* claim either from the sq_count, or a putcount. We could
* get an erroneous value from other counts, but ours won't
* change, so one way or another, we will have at least a
* value of one.
*/
#endif /* DEBUG */
/*
* The first thing to do here, is find out if a thread is already
* draining this queue or the queue is closing. If so, we are done,
* just return. Also, if there are no messages, we are done as well.
* Note that we check the q_sqhead since there is s window of
* opportunity for us to enter here because Q_SQQUEUED was set, but is
* not anymore.
*/
mutex_exit(QLOCK(q));
return;
}
/*
* If the perimiter is exclusive, there is nothing we can
* do right now, go away.
* Note that there is nothing to prevent this case from changing
* right after this check, but the spin-out will catch it.
*/
/* Tell other threads that we are draining this queue */
/*
* Because we can enter this routine just because
* a putnext is blocked, we need to spin out if
* the perimiter wants to go exclusive as well
* as just blocked. We need to spin out also if
* events are queued on the syncq.
* Don't check for SQ_EXCL, because non-CIPUT
* perimiters would set it, and it can't become
* exclusive while we hold a claim.
*/
break;
}
#ifdef DEBUG
/*
* Since we are in qdrain_syncq, we already know the queue,
* but for sanity, we want to check this against the qp that
* was passed in by bp->b_queue.
*/
/*
* We would have the following check in the DEBUG code:
*
* if (bp->b_prev != NULL) {
* ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp);
* }
*
* This can't be done, however, since IP modifies qinfo
* structure at run-time (switching between IPv4 qinfo and IPv6
* qinfo), invalidating the check.
* So the assignment to func is left here, but the ASSERT itself
* is removed until the whole issue is resolved.
*/
#endif
ASSERT(q->q_syncqmsgs > 0);
mutex_exit(QLOCK(q));
mutex_enter(QLOCK(q));
/*
* We should decrement q_syncqmsgs only after executing the
* put procedure to avoid a possible race with putnext().
* In putnext() though it sees Q_SQQUEUED is set, there is
* an optimization which allows putnext to call the put
* procedure directly if (q_syncqmsgs == 0) and thus
* a message reodering could otherwise occur.
*/
q->q_syncqmsgs--;
/*
* Clear QFULL in the next service procedure queue if
* this is the last message destined to that queue.
*
* It would make better sense to have some sort of
* tunable for the low water mark, but these symantics
* are not yet defined. So, alas, we use a constant.
*/
do_clr = (q->q_syncqmsgs == 0);
mutex_exit(QLOCK(q));
if (do_clr)
clr_qfull(q);
mutex_enter(QLOCK(q));
/*
* Always clear SQ_EXCL when CIPUT in order to handle
* qwriter(INNER).
*/
/*
* The putp() can call qwriter and get exclusive access
* IFF this is the only claim. So, we need to test for
* this possibility so we can aquire the mutex and clear
* the bit.
*/
}
}
/*
* We should either have no queues on the syncq, or we were
* told to goaway by a waiter (which we will wake up at the
* end of this function).
*/
/*
* Remove the q from the syncq list if all the messages are
* drained.
*/
if (q->q_sqflags & Q_SQQUEUED)
/*
* Since the queue is removed from the list, reset its priority.
*/
q->q_spri = 0;
}
/*
* Remember, the q_draining flag is used to let another
* thread know that there is a thread currently draining
* the messages for a queue. Since we are now done with
* this queue (even if there may be messages still there),
* we need to clear this flag so some thread will work
* on it if needed.
*/
ASSERT(q->q_draining);
q->q_draining = 0;
/* called with a claim, so OK to drop all locks. */
mutex_exit(QLOCK(q));
"drain_syncq end:%p", sq);
}
/* END OF QDRAIN_SYNCQ */
/*
* This is the mate to qdrain_syncq, except that it is putting the
* message onto the the queue instead draining. Since the
* message is destined for the queue that is selected, there is
* no need to identify the function because the message is
* intended for the put routine for the queue. But this
* routine will do it anyway just in case (but only for debug kernels).
*
* After the message is enqueued on the syncq, it calls putnext_tail()
* which will schedule a background thread to actually process the message.
*
* Assumes that there is a claim on the syncq (sq->sq_count > 0) and
* SQLOCK(sq) and QLOCK(q) are not held.
*/
void
{
mutex_enter(QLOCK(q));
#ifdef DEBUG
/*
* This is used for debug in the qfill_syncq/qdrain_syncq case
* to trace the queue that the message is intended for. Note
* that the original use was to identify the queue and function
* to call on the drain. In the new syncq, we have the context
* of the queue that we are draining, so call it's putproc and
* don't rely on the saved values. But for debug this is still
* usefull information.
*/
#endif
/*
* Enqueue the message on the list.
* SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to
* protect it. So its ok to acquire SQLOCK after SQPUT_MP().
*/
/*
* And queue on syncq for scheduling, if not already queued.
* Note that we need the SQLOCK for this, and for testing flags
* at the end to see if we will drain. So grab it now, and
* release it before we call qdrain_syncq or return.
*/
if (!(q->q_sqflags & Q_SQQUEUED)) {
}
#ifdef DEBUG
else {
/*
* All of these conditions MUST be true!
*/
} else {
}
ASSERT(q->q_syncqmsgs != 0);
}
#endif
mutex_exit(QLOCK(q));
/*
* SQLOCK is still held, so sq_count can be safely decremented.
*/
putnext_tail(sq, q, 0);
/* Should not reference sq or q after this point. */
}
/* End of qfill_syncq */
/*
* Remove all messages from a syncq (if qp is NULL) or remove all messages
* that would be put into qp by drain_syncq.
* Used when deleting the syncq (qp == NULL) or when detaching
* a queue (qp != NULL).
* Return non-zero if one or more messages were freed.
*
* no need to grab sq_putlocks here. See comment in strsubr.h that explains when
* sq_putlocks are used.
*
* NOTE: This function assumes that it is called from the close() context and
* that all the queues in the syncq are going aay. For this reason it doesn't
* currently valid, but it is useful to rethink this function to behave properly
* in other cases.
*/
int
{
queue_t *q;
int ret = 0;
/*
* Before we leave, we need to make sure there are no
* events listed for this queue. All events for this queue
* will just be freed.
*/
/* Delete this message */
/*
* Update sq_evtail if the last element
* is removed.
*/
}
} else
ret++;
} else {
}
}
}
/*
* Walk sq_head and:
* - match qp if qp is set, remove it's messages
* - all if qp is not set
*/
while (q != NULL) {
/*
* Yank the messages as a list off the queue
*/
/*
* We do not have QLOCK(q) here (which is safe due to
* assumptions mentioned above). To obtain the lock we
* need to release SQLOCK which may allow lots of things
* to change upon us. This place requires more analysis.
*/
/*
* Free each of the messages.
*/
ret++;
}
/*
* Now remove the queue from the syncq.
*/
q->q_spri = 0;
q->q_syncqmsgs = 0;
/*
* If qp was specified, we are done with it and are
* going to drop SQLOCK(sq) and return. We wakeup syncq
* waiters while we still have the SQLOCK.
*/
}
/* Drop SQLOCK across clr_qfull */
/*
* We avoid doing the test that drain_syncq does and
* unconditionally clear qfull for every flushed
* message. Since flush_syncq is only called during
* close this should not be a problem.
*/
clr_qfull(q);
return (ret);
} else {
/*
* The head was removed by SQRM_Q above.
* reread the new head and flush it.
*/
}
} else {
q = q->q_sqnext;
}
}
}
return (ret);
}
/*
* Propagate all messages from a syncq to the next syncq that are associated
* with the specified queue. If the queue is attached to a driver or if the
* messages have been added due to a qwriter(PERIM_INNER), free the messages.
*
* Assumes that the stream is strlock()'ed. We don't come here if there
* are no messages to propagate.
*
* NOTE : If the queue is attached to a driver, all the messages are freed
* as there is no point in propagating the messages from the driver syncq
* to the closing stream head which will in turn get freed later.
*/
static int
{
int moved = 0;
#ifdef DEBUG
void (*func)();
#endif
/* debug macro */
/*
* As entersq() does not increment the sq_count for
* the write side, check sq_count for non-QPERQ
* perimeters alone.
*/
/*
* propagate_syncq() can be called because of either messages on the
* queue syncq or because on events on the queue syncq. Do actual
* message propagations if there are any messages.
*/
if (qp->q_syncqmsgs) {
if (!isdriver) {
/* debug macro */
#ifdef DEBUG
#endif
}
qp->q_syncqmsgs = 0;
/*
* Walk the list of messages, and free them if this is a driver,
* otherwise reset the b_prev and b_queue value to the new putp.
* Afterward, we will just add the head to the end of the next
* syncq, and point the tail to the end of this one.
*/
if (isdriver) {
continue;
}
/* Change the q values for this message */
#ifdef DEBUG
#endif
moved++;
}
/*
* Attach list of messages to the end of the new queue (if there
* is a list of messages).
*/
} else {
}
/*
* When messages are moved from high priority queue to
* another queue, the destination queue priority is
* upgraded.
*/
}
}
/*
* Before we leave, we need to make sure there are no
* events listed for this queue. All events for this queue
* will just be freed.
*/
/* Delete this message */
/*
* Update sq_evtail if the last element
* is removed.
*/
}
} else
} else {
}
}
}
/* Wake up any waiter before leaving. */
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
return (moved);
}
/*
* Try and upgrade to exclusive access at the inner perimeter. If this can
* not be done without blocking then request will be queued on the syncq
* and drain_syncq will run it later.
*
* This routine can only be called from put or service procedures plus
* asynchronous callback routines that have properly entered to
* queue (with entersq.) Thus qwriter_inner assumes the caller has one claim
* on the syncq associated with q.
*/
void
{
if (count == 1) {
/*
* Can upgrade. This case also handles nested qwriter calls
* (when the qwriter callback function calls qwriter). In that
* case SQ_EXCL is already set.
*/
/*
* Assumes that leavesq, putnext, and drain_syncq will reset
* until putnext, leavesq, or drain_syncq drops it.
* That way we handle nested qwriter(INNER) without dropping
* SQ_EXCL until the outermost qwriter callback routine is
* done.
*/
return;
}
}
/*
* Synchronous callback support functions
*/
/*
* Allocate a callback parameter structure.
* Assumes that caller initializes the flags and the id.
* Acquires SQLOCK(sq) if non-NULL is returned.
*/
{
/*
* Only try tryhard allocation if the caller is ready to panic.
* Otherwise just fail.
*/
else
return (NULL);
}
return (cbp);
}
void
{
callbparams_t **pp, *p;
if (p == cbp) {
return;
}
}
(void) (STRLOG(0, 0, 0, SL_CONSOLE,
"callbparams_free: not found\n"));
}
void
{
callbparams_t **pp, *p;
return;
}
}
(void) (STRLOG(0, 0, 0, SL_CONSOLE,
"callbparams_free_id: not found\n"));
}
/*
* Callback wrapper function used by once-only callbacks that can be
* cancelled (qtimeout and qbufcall)
* Contains inline version of entersq(sq, SQ_CALLBACK) that can be
* cancelled by the qun* functions.
*/
void
qcallbwrapper(void *arg)
{
sq->sq_needexcl++;
waitflags |= SQ_MESSAGES;
}
/* Can not handle exlusive entry at outer perimeter */
/* timeout has been cancelled */
sq->sq_needexcl--;
if (sq->sq_needexcl == 0) {
}
}
return;
}
}
}
}
sq->sq_needexcl--;
if (sq->sq_needexcl == 0) {
}
}
/*
* We drop the lock only for leavesq to re-acquire it.
* Possible optimization is inline of leavesq.
*/
}
/*
* no need to grab sq_putlocks here. See comment in strsubr.h that
* explains when sq_putlocks are used.
*
* sq_count (or one of the sq_putcounts) has already been
* decremented by the caller, and if SQ_QUEUED, we need to call
* drain_syncq (the global syncq drain).
* If putnext_tail is called with the SQ_EXCL bit set, we are in
* one of two states, non-CIPUT perimiter, and we need to clear
* it, or we went exclusive in the put procedure. In any case,
* we want to clear the bit now, and it is probably easier to do
* this at the beginning of this function (remember, we hold
* the SQLOCK). Lastly, if there are other messages queued
* on the syncq (and not for our destination), enable the syncq
* for background work.
*/
/* ARGSUSED */
void
{
/* Clear SQ_EXCL if set in passflags */
}
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
if (flags & SQ_WANTEXWAKEUP) {
flags &= ~SQ_WANTEXWAKEUP;
}
/*
* We have cleared SQ_EXCL if we were asked to, and started
* the wakeup process for waiters. If there are no writers
* then we need to drain the syncq if we were told to, or
* enable the background thread to do it.
*/
/* drain_syncq will take care of events in the list */
return;
}
}
/* Drop the SQLOCK on exit */
}
void
{
mutex_enter(QLOCK(q));
if (!O_SAMESTR(q))
else
mutex_exit(QLOCK(q));
q = _OTHERQ(q);
mutex_enter(QLOCK(q));
if (!O_SAMESTR(q))
else
mutex_exit(QLOCK(q));
}
/*
* Set QFULL in next service procedure queue (that cares) if not already
* set and if there are already more messages on the syncq than
* sq_max_size. If sq_max_size is 0, no flow control will be asserted on
* any syncq.
*
* The fq here is the next queue with a service procedure. This is where
* we would fail canputnext, so this is where we need to set QFULL.
* In the case when fq != q we need to take QLOCK(fq) to set QFULL flag.
*
* We already have QLOCK at this point. To avoid cross-locks with
* freezestr() which grabs all QLOCKs and with strlock() which grabs both
* SQLOCK and sd_reflock, we need to drop respective locks first.
*/
void
{
(q->q_syncqmsgs > sq_max_size)) {
} else {
mutex_exit(QLOCK(q));
mutex_enter(QLOCK(q));
}
}
}
void
{
q = q->q_nfsrv;
/* Fast check if there is any work to do before getting the lock. */
return;
}
/*
* Do not reset QFULL (and backenable) if the q_count is the reason
* for QFULL being set.
*/
mutex_enter(QLOCK(q));
/*
* If queue is empty i.e q_mblkcnt is zero, queue can not be full.
* Hence clear the QFULL.
* If both q_count and q_mblkcnt are less than the hiwat mark,
* clear the QFULL.
*/
/*
* A little more confusing, how about this way:
* if someone wants to write,
* AND
* both counts are less than the lowat mark
* OR
* the lowat mark is zero
* THEN
* backenable
*/
mutex_exit(QLOCK(q));
backenable(oq, 0);
} else
mutex_exit(QLOCK(q));
} else
mutex_exit(QLOCK(q));
}
/*
* Set the forward service procedure pointer.
*
* Called at insert-time to cache a queue's next forward service procedure in
* q_nfsrv; used by canput() and canputnext(). If the queue to be inserted
* has a service procedure then q_nfsrv points to itself. If the queue to be
* inserted does not have a service procedure, then q_nfsrv points to the next
* queue forward that has a service procedure. If the queue is at the logical
* end of the stream (driver for write side, stream head for the read side)
* and does not have a service procedure, then q_nfsrv also points to itself.
*/
void
{
/*
* Insert the driver, initialize the driver and stream head.
* _I_INSERT does not allow inserting a driver. Make sure
* that it is not an insertion.
*/
else
} else {
/*
* set up read side q_nfsrv pointer. This MUST be done
* before setting the write side, because the setting of
* the write side for a fifo may depend on it.
*
* Suppose we have a fifo that only has pipemod pushed.
* pipemod has no read or write service procedures, so
* nfsrv for both pipemod queues points to prev_rq (the
* stream read head). Now push bufmod (which has only a
* read service procedure). Doing the write side first,
* wnew->q_nfsrv is set to pipemod's writeq nfsrv, which
* is WRONG; the next queue forward from wnew with a
* service procedure will be rnew, not the stream read head.
* Since the downstream queue (which in the case of a fifo
* is the read queue rnew) can affect upstream queues, it
* needs to be done first. Setting up the read side first
* sets nfsrv for both pipemod queues to rnew and then
* when the write side is set up, wnew-q_nfsrv will also
* point to rnew.
*/
/*
* use _OTHERQ() because, if this is a pipe, next
* module may have been pushed from other end and
* q_next could be a read queue.
*/
}
} else
/* set up write side q_nfsrv pointer */
/*
* For insertion, need to update nfsrv of the modules
* above which do not have a service routine.
*/
}
}
} else {
/*
* a fifo and wnew's nfsrv is same as rnew's.
*/
else
}
}
}
/*
* Reset the forward service procedure pointer; called at remove-time.
*/
void
{
/* Reset the write side q_nfsrv pointer for _I_REMOVE */
}
}
/* reset the read side q_nfsrv pointer */
/* Note that rqp->q_next cannot be NULL */
}
}
}
}
/*
* This routine should be called after all stream geometry changes to update
* with the streamlock()ed.
*
* Note: only enables Synchronous STREAMS for a side of a Stream which has
* an explicit synchronous barrier module queue. That is, a queue that
* has specified a struio() type.
*/
static void
{
/*
* Not stremahead, but a mux, so no Synchronous STREAMS.
*/
return;
}
/*
* Scan the write queue(s) while synchronous
* until we find a qinfo uio type specified.
*/
while (wrq) {
wrq = 0;
break;
}
break;
wrq = 0;
break;
}
}
/*
* Scan the read queue(s) while synchronous
* until we find a qinfo uio type specified.
*/
while (wrq) {
wrq = 0;
break;
}
break;
wrq = 0;
break;
}
}
}
/*
* pass_wput, unblocks the passthru queues, so that
* messages can arrive at muxs lower read queue, before
*/
static void
{
}
/*
* Create a new queue and block it and then insert it
* below the stream head on the lower stream.
* This prevents any messages from arriving during the setq
* been acked or nacked or if a message is generated and sent
* down muxs write put procedure.
* see pass_wput().
*
* After the new queue is inserted, all messages coming from below are
* blocked. The call to strlock will ensure that all activity in the stream head
* read queue syncq is stopped (sq_count drops to zero).
*/
static queue_t *
{
/* setq might sleep in allocator - avoid holding locks. */
/*
* Use strlock() to wait for the stream head sq_count to drop to zero
* since we are going to change q_ptr in the stream head. Note that
* insertq() doesn't wait for any syncq counts to drop to zero.
*/
sqlist.sqlist_index = 0;
return (passq);
}
/*
* Let messages flow up into the mux by removing
* the passq.
*/
static void
{
}
/*
* Wait for the condition variable pointed to by `cvp' to be signaled,
* or for `tim' milliseconds to elapse, whichever comes first. If `tim'
* is negative, then there is no time limit. If `nosigs' is non-zero,
* then the wait will be non-interruptible.
*
* Returns >0 if signaled, 0 if interrupted, or -1 upon timeout.
*/
{
if (tim < 0) {
if (nosigs) {
ret = 1;
} else {
}
} else if (tim > 0) {
/*
* convert milliseconds to clock ticks
*/
if (nosigs) {
} else {
}
} else {
ret = -1;
}
return (ret);
}
/*
* Wait until the stream head can determine if it is at the mark but
* don't wait forever to prevent a race condition between the "mark" state
*
* This is used by sockets and for a socket it would be incorrect
* to return a failure for SIOCATMARK when there is no data in the receive
* queue and the marked urgent data is traveling up the stream.
*
* This routine waits until the mark is known by waiting for one of these
* three events:
* The stream head read queue becoming non-empty (including an EOF)
* The STRATMARK flag being set. (Due to a MSGMARKNEXT message.)
* The STRNOTATMARK flag being set (which indicates that the transport
* has sent a MSGNOTMARKNEXT message to indicate that it is not at
* the mark).
*
* The routine returns 1 if the stream is at the mark; 0 if it can
* be determined that the stream is not at the mark.
* If the wait times out and it can't determine
* whether or not the stream might be at the mark the routine will return -1.
*
* Note: This routine should only be used when a mark is pending i.e.,
* in the socket case the SIGURG has been posted.
* Note2: This can not wakeup just because synchronous streams indicate
* that data is available since it is not possible to use the synchronous
* streams interfaces to determine the b_flag value for the data queued below
* the stream head.
*/
int
{
int mark;
/* Wait for 100 milliseconds for any state change. */
return (-1);
}
}
mark = 1;
mark = 1;
else
mark = 0;
return (mark);
}
/*
* Set a read side error. If persist is set change the socket error
* to persistent. If errfunc is set install the function as the exported
* error handler.
*/
void
{
else
if (persist) {
} else {
}
}
}
/*
* Set a write side error. If persist is set change the socket error
* to persistent.
*/
void
{
else
if (persist) {
} else {
}
}
}
/*
* Make the stream return 0 (EOF) when all data has been read.
* No effect on write side.
*/
void
{
if (!eof) {
return;
}
}
}
void
{
}
void
{
else
else
if (flags & SH_CONSOL_DATA)
else
if (flags & SH_SIGALLDATA)
else
if (flags & SH_IGN_ZEROLEN)
else
}
void
{
if (flags & SH_SIGPIPE)
else
if (flags & SH_RECHECK_ERR)
else
}
void
{
}
/* Used within framework when the queue is already locked */
void
{
return;
/*
* Do not place on run queue if already enabled or closing.
*/
return;
/*
* mark queue enabled and place on run list if it is not already being
* serviced. If it is serviced, the runservice() function will detect
* that QENAB is set and call service procedure before clearing
* QINSERVICE flag.
*/
if (q->q_flag & QINSERVICE)
return;
/* Record the time of qenable */
/*
* Put the queue in the stp list and schedule it for background
* processing if it is not already scheduled or if stream head does not
* intent to process it in the foreground later by setting
* STRS_WILLSERVICE flag.
*/
/*
* If there are already something on the list, stp flags should show
* intention to drain it.
*/
stp->sd_nqueues++;
/*
* If no one will drain this stream we are the first producer and
* need to schedule it for background thread.
*/
/*
* No one will service this stream later, so we have to
* schedule it now.
*/
/*
* Task queue failed so fail over to the backup
* servicing thread.
*/
/*
* It is safe to clear STRS_SCHEDULED flag because it
* was set by this thread above.
*/
/*
* Failover scheduling is protected by service_queue
* lock.
*/
/*
*/
qhead = q;
else
qtail = q;
/*
* Clear stp queue list.
*/
stp->sd_nqueues = 0;
/*
* Wakeup background queue processing thread.
*/
}
}
}
static void
queue_service(queue_t *q)
{
/*
* The queue in the list should have
* QENAB flag set and should not have
* QINSERVICE flag set. QINSERVICE is
* set when the queue is dequeued and
* qenable_locked doesn't enqueue a
* queue with QINSERVICE set.
*/
mutex_enter(QLOCK(q));
q->q_flag |= QINSERVICE;
mutex_exit(QLOCK(q));
runservice(q);
}
static void
{
/* if we came here from the background thread, clear the flag */
/* let drain_syncq know that it's being called in the background */
}
static void
{
/*
* Note that SQ_WRITER is used on the outer perimeter
* to signal that a qwriter(OUTER) is either investigating
* running or that it is actually running a function.
*/
/*
* All inner syncq are empty and have SQ_WRITER set
* to block entering the outer perimeter.
*
* We do not need to explicitly call write_now since
* outer_exit does it for us.
*/
}
static void
{
}
dbp->db_struioflag = 0;
}
/*
* Background processing of the stream queue list.
*/
static void
{
queue_t *q;
STR_SERVICE(stp, q);
}
/*
* Foreground processing of the stream queue list.
*/
void
{
queue_t *q;
/*
* We are going to drain this stream queue list, so qenable_locked will
* not schedule it until we finish.
*/
STR_SERVICE(stp, q);
/*
*/
if (q != NULL)
queue_service(q);
}
}
void
{
}
/*
* Replace the cred currently in the mblk with a different one.
*/
void
{
}
}
int
{
int rc = 0;
/* Associate values for M_DATA type */
} else {
} else {
rc = -1;
}
}
return (rc);
}
void
{
if (*flags & HCK_PARTIALCKSUM) {
}
} else {
/* get hardware checksum attribute */
}
}
}
/*
* Checksum buffer *bp for len bytes with psum partial checksum,
* or 0 if none, and return the 16 bit partial checksum.
*/
unsigned
{
extern unsigned int ip_ocsum();
/*
* Bp is 16 bit aligned and len is multiple of 16 bit word.
*/
}
/*
* Bp isn't 16 bit aligned.
*/
unsigned int tsum;
#ifdef _LITTLE_ENDIAN
#else
#endif
len--;
bp++;
if (len & 1) {
#ifdef _LITTLE_ENDIAN
#else
#endif
}
} else {
/*
* Bp is 16 bit aligned.
*/
if (odd) {
#ifdef _LITTLE_ENDIAN
#else
#endif
}
}
/*
* Normalize psum to 16 bits before returning the new partial
* checksum. The max psum value before normalization is 0x3FDFE.
*/
}
{
} else {
}
return (rc);
}
void
{
}
}
mblk_t *
{
return (NULL);
}
}
return (nmp);
}
/* NOTE: Do not add code after this point. */
/*
* replacement for QLOCK macro for those that can't use it.
*/
kmutex_t *
{
return (&(q)->q_lock);
}
/*
*/
void
runqueues(void)
{
}
void
queuerun(void)
{
}
/*
* Initialize the STR stack instance, which tracks autopush and persistent
* links.
*/
/* ARGSUSED */
static void *
{
int i;
/*
* set up autopush
*/
/*
* set up mux_node structures.
*/
return (ss);
}
/*
* Note: run at zone shutdown and not destroy so that the PLINKs are
* gone by the time other cleanup happens from the destroy callbacks.
*/
static void
{
int i;
/* Undo all the I_PLINKs for this zone */
int ret;
int rval;
continue;
if (ret != 0) {
continue;
}
if (ret != 0) {
continue;
}
if (ret) {
continue;
}
/* Close layered handles */
}
}
/*
* Free the structure; str_stack_shutdown did the other cleanup work.
*/
/* ARGSUSED */
static void
{
}