synch.c revision 09ce0d4acf1a79c720d7e54b60e87cbfa0f1b2d6
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "lint.h"
#include "thr_uberdata.h"
#include <sys/rtpriocntl.h>
#include <atomic.h>
#if defined(THREAD_DEBUG)
#define INCR32(x) (((x) != UINT32_MAX)? (x)++ : 0)
#define INCR(x) ((x)++)
#define DECR(x) ((x)--)
#define MAXINCR(m, x) ((m < ++x)? (m = x) : 0)
#else
#define INCR32(x)
#define INCR(x)
#define DECR(x)
#define MAXINCR(m, x)
#endif
/*
* This mutex is initialized to be held by lwp#1.
* It is used to block a thread that has returned from a mutex_lock()
* of a LOCK_PRIO_INHERIT mutex with an unrecoverable error.
*/
static int shared_mutex_held(mutex_t *);
static int mutex_queuelock_adaptive(mutex_t *);
static void mutex_wakeup_all(mutex_t *);
/*
* Lock statistics support functions.
*/
void
{
}
{
if (msp->mutex_begin_hold)
msp->mutex_begin_hold = 0;
return (now);
}
/*
* Called once at library initialization.
*/
void
mutex_setup(void)
{
thr_panic("mutex_setup() cannot acquire stall_mutex");
}
/*
* The default spin count of 1000 is experimentally determined.
* On sun4u machines with any number of processors it could be raised
* to 10,000 but that (experimentally) makes almost no difference.
* The environment variable:
* _THREAD_ADAPTIVE_SPIN=count
* can be used to override and set the count in the range [0 .. 1,000,000].
*/
int thread_adaptive_spin = 1000;
int thread_queue_verify = 0;
static int ncpus;
/*
* Distinguish spinning for queue locks from spinning for regular locks.
* We try harder to acquire queue locks by spinning.
* The environment variable:
* _THREAD_QUEUE_SPIN=count
* can be used to override and set the count in the range [0 .. 1,000,000].
*/
int thread_queue_spin = 10000;
#define ALL_ATTRIBUTES \
(LOCK_RECURSIVE | LOCK_ERRORCHECK | \
/*
* 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST,
* augmented by zero or more the flags:
* LOCK_RECURSIVE
* LOCK_ERRORCHECK
* LOCK_PRIO_INHERIT
* LOCK_PRIO_PROTECT
* LOCK_ROBUST
*/
/* ARGSUSED2 */
int
{
int error = 0;
int ceil;
if (basetype == USYNC_PROCESS_ROBUST) {
/*
* USYNC_PROCESS_ROBUST is a deprecated historical type.
* We change it into (USYNC_PROCESS | LOCK_ROBUST) but
* retain the USYNC_PROCESS_ROBUST flag so we can return
* ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST
* mutexes will ever draw ELOCKUNMAPPED).
*/
}
if (type & LOCK_PRIO_PROTECT)
== (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT) ||
((type & LOCK_PRIO_PROTECT) &&
} else if (type & LOCK_ROBUST) {
/*
* Callers of mutex_init() with the LOCK_ROBUST attribute
* are required to pass an initially all-zero mutex.
* Multiple calls to mutex_init() are allowed; all but
* the first return EBUSY. A call to mutex_init() is
* allowed to make an inconsistent robust lock consistent
* (for historical usage, even though the proper interface
* for this is mutex_consistent()). Note that we use
* atomic_or_16() to set the LOCK_INITED flag so as
* not to disturb surrounding bits (LOCK_OWNERDEAD, etc).
*/
} else if (mutex_consistent(mp) != 0) {
}
/* register a process robust mutex with the kernel */
if (basetype == USYNC_PROCESS)
} else {
}
}
/*
* This should be at the beginning of the function,
* but for the sake of old broken applications that
* do not have proper alignment for their mutexes
* (and don't check the return code from mutex_init),
* we put it here, after initializing the mutex regardless.
*/
if (error == 0 &&
curthread->ul_misaligned == 0)
return (error);
}
/*
* Delete mp from list of ceiling mutexes owned by curthread.
* Return 1 if the head of the chain was updated.
*/
int
{
}
}
return (0);
}
/*
* Add mp to the list of ceiling mutexes owned by curthread.
* Return ENOMEM if no memory could be allocated.
*/
int
{
return (ENOMEM);
return (0);
}
/*
* Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below.
*/
static void
{
}
/*
* Inherit priority from ceiling.
* This changes the effective priority, not the assigned priority.
*/
void
_ceil_prio_inherit(int prio)
{
}
/*
* Waive inherited ceiling priority. Inherit from head of owned ceiling locks
* if holding at least one ceiling lock. If no ceiling locks are held at this
* point, disinherit completely, reverting back to assigned priority.
*/
void
_ceil_prio_waive(void)
{
int prio;
} else {
}
}
/*
* Clear the lock byte. Retain the waiters byte and the spinners byte.
* Return the old value of the lock word.
*/
static uint32_t
{
do {
return (old);
}
/*
* Same as clear_lockbyte(), but operates on mutex_lockword64.
* The mutex_ownerpid field is cleared along with the lock byte.
*/
static uint64_t
{
do {
old = *lockword64;
return (old);
}
/*
* Similar to set_lock_byte(), which only tries to set the lock byte.
* Here, we attempt to set the lock byte AND the mutex_ownerpid, keeping
* the remaining bytes constant. This atomic operation is required for the
* correctness of process-shared robust locks, otherwise there would be
* a window or vulnerability in which the lock byte had been set but the
* mutex_ownerpid had not yet been set. If the process were to die in
* this window of vulnerability (due to some other thread calling exit()
* or the process receiving a fatal signal), the mutex would be left locked
* but without a process-ID to determine which process was holding the lock.
* The kernel would then be unable to mark the robust mutex as LOCK_OWNERDEAD
* when the process died. For all other cases of process-shared locks, this
* operation is just a convenience, for the sake of common code.
*
* This operation requires process-shared robust locks to be properly
* aligned on an 8-byte boundary, at least on sparc machines, lest the
* operation incur an alignment fault. This is automatic when locks
* are declared properly using the mutex_t or pthread_mutex_t data types
* and the application does not allocate dynamic memory on less than an
* 8-byte boundary. See the 'horrible hack' comments below for cases
* dealing with such broken applications.
*/
static int
{
return (LOCKCLEAR);
return (LOCKSET);
}
/*
* Increment the spinners count in the mutex lock word.
* Return 0 on success. Return -1 if the count would overflow.
*/
static int
{
do {
return (-1);
return (0);
}
/*
* Decrement the spinners count in the mutex lock word.
* Return the new value of the lock word.
*/
static uint32_t
{
do {
if (new & SPINNERMASK)
return (new);
}
/*
* Non-preemptive spin locks. Used by queue_lock().
* No lock statistics are gathered for these locks.
* No DTrace probes are provided for these locks.
*/
void
{
return;
}
/*
* Spin for a while, attempting to acquire the lock.
*/
if (mutex_queuelock_adaptive(mp) == 0 ||
return;
}
/*
* Try harder if we were previously at a no premption level.
*/
if (mutex_queuelock_adaptive(mp) == 0 ||
return;
}
}
/*
* Give up and block in the kernel for the mutex.
*/
}
void
{
mp->mutex_owner = 0;
(void) ___lwp_mutex_wakeup(mp, 0);
}
}
/*
* Allocate the sleep queue hash table.
*/
void
queue_alloc(void)
{
void *data;
int i;
/*
* No locks are needed; we call here only when single-threaded.
*/
== MAP_FAILED)
thr_panic("cannot allocate thread queue_head table");
#if defined(THREAD_DEBUG)
#endif
}
}
#if defined(THREAD_DEBUG)
/*
* Debugging: verify correctness of a sleep queue.
*/
void
{
char qtype;
void *wchan;
cnt++;
}
if (!thread_queue_verify)
return;
/* real expensive stuff, only for _THREAD_QUEUE_VERIFY */
cnt++;
}
}
}
#else /* THREAD_DEBUG */
#endif /* THREAD_DEBUG */
/*
* Acquire a queue head.
*/
{
/*
* It is possible that we could be called while still single-threaded.
* If so, we call queue_alloc() to allocate the queue_head[] array.
*/
queue_alloc();
}
break;
/* the default queue root is available; use it */
}
return (qp);
}
/*
* Release a queue head.
*/
void
{
}
/*
* For rwlock queueing, we must queue writers ahead of readers of the
* same priority. We do this by making writers appear to have a half
* point higher priority for purposes of priority comparisons below.
*/
void
{
/* use the thread's queue root for the linkage */
qrp->qr_rtcount = 0;
}
/*
* LIFO queue ordering is unfair and can lead to starvation,
* but it gives better performance for heavily contended locks.
* We use thread_queue_fifo (range is 0..8) to determine
* the frequency of FIFO vs LIFO queuing:
* 0 : every 256th time (almost always LIFO)
* 1 : every 128th time
* 2 : every 64th time
* 3 : every 32nd time
* 4 : every 16th time (the default value, mostly LIFO)
* 5 : every 8th time
* 6 : every 4th time
* 7 : every 2nd time
* 8 : every time (never LIFO, always FIFO)
* Note that there is always some degree of FIFO ordering.
* This breaks live lock conditions that occur in applications
* that are written assuming (incorrectly) that threads acquire
* locks fairly, that is, in roughly round-robin order.
* In any event, the queue is maintained in kernel priority order.
*
* If force_fifo is non-zero, fifo queueing is forced.
* SUSV3 requires this for semaphores.
*/
/*
*/
} else if (force_fifo |
/*
* Enqueue after the last thread whose priority is greater
* than or equal to the priority of the thread being queued.
* Attempt first to go directly onto the tail of the queue.
*/
else {
break;
}
} else {
/*
* Enqueue before the first thread whose priority is less
* than or equal to the priority of the thread being queued.
* Hopefully we can go directly onto the head of the queue.
*/
break;
}
ulwp->ul_pilocks) {
qrp->qr_rtcount++;
}
}
/*
* Helper function for queue_slot() and queue_slot_rt().
* Try to find a non-suspended thread on the queue.
*/
static ulwp_t **
{
int priority = -1;
int tpri;
continue;
if (!rt)
break;
}
}
return (foundpp);
}
/*
* For real-time, we search the entire queue because the dispatch
* (kernel) priorities may have changed since enqueueing.
*/
static ulwp_t **
{
int tpri;
}
}
/*
* Try not to return a suspended thread.
* This mimics the old libthread's behavior.
*/
}
return (foundpp);
}
ulwp_t **
{
int rt;
*more = 0;
return (NULL); /* no lwps on the queue */
}
*more = 0;
}
*more = 1;
if (rt) /* real-time queue */
/*
* Try not to return a suspended thread.
* This mimics the old libthread's behavior.
*/
return (ulwpp);
}
/*
* The common case; just pick the first thread on the queue.
*/
}
/*
* Common code for unlinking an lwp from a user-level sleep queue.
*/
void
{
if (ulwp->ul_rtqueued) {
ulwp->ul_rtqueued = 0;
qrp->qr_rtcount--;
}
/*
* We can't continue to use the unlinked thread's
* queue root for the linkage.
*/
/* switch to using the last thread's queue root */
if (qr_next)
if (qr_prev)
else
} else {
/* empty queue root; just delete from the hash list */
if (qr_next)
if (qr_prev)
else
}
}
}
ulwp_t *
{
return (NULL);
return (ulwp);
}
/*
* Return a pointer to the highest priority thread sleeping on wchan.
*/
ulwp_t *
{
int more;
return (NULL);
return (*ulwpp);
}
int
{
int found = 0;
/* find self on the sleep queue */
found = 1;
break;
}
}
}
if (!found)
thr_panic("dequeue_self(): curthread not found on queue");
}
/*
* Called from call_user_handler() and _thrp_suspend() to take
* ourself off of our sleep queue so we can grab locks.
*/
void
unsleep_self(void)
{
/*
* Calling enter_critical()/exit_critical() here would lead
* to recursion. Just manipulate self->ul_critical directly.
*/
self->ul_critical++;
/*
* We may have been moved from a CV queue to a
* mutex queue while we were attempting queue_lock().
* If so, just loop around and try again.
* dequeue_self() clears self->ul_sleepq.
*/
(void) dequeue_self(qp);
}
self->ul_critical--;
}
/*
* Common code for calling the the ___lwp_mutex_timedlock() system call.
* Returns with mutex_owner and mutex_ownerpid set correctly.
*/
static int
{
int acquired;
int error;
}
if (msp) {
begin_sleep = gethrtime();
}
/* defer signals until the assignment of mp->mutex_owner */
for (;;) {
/*
* A return value of EOWNERDEAD or ELOCKUNMAPPED
* means we successfully acquired the lock.
*/
acquired = 0;
break;
}
if (mtype & USYNC_PROCESS) {
/*
* Defend against forkall(). We may be the child,
* in which case we don't actually own the mutex.
*/
acquired = 1;
break;
}
} else {
acquired = 1;
break;
}
}
if (msp)
if (acquired) {
} else {
}
return (error);
}
/*
* Common code for calling the ___lwp_mutex_trylock() system call.
* Returns with mutex_owner and mutex_ownerpid set correctly.
*/
int
{
int error;
int acquired;
for (;;) {
/*
* A return value of EOWNERDEAD or ELOCKUNMAPPED
* means we successfully acquired the lock.
*/
acquired = 0;
break;
}
if (mtype & USYNC_PROCESS) {
/*
* Defend against forkall(). We may be the child,
* in which case we don't actually own the mutex.
*/
acquired = 1;
break;
}
} else {
acquired = 1;
break;
}
}
if (acquired) {
}
return (error);
}
volatile sc_shared_t *
setup_schedctl(void)
{
volatile sc_shared_t *scp;
}
/*
* Unless the call to setup_schedctl() is surrounded
* by enter_critical()/exit_critical(), the address
* we are returning could be invalid due to a forkall()
* having occurred in another thread.
*/
return (scp);
}
/*
* Interfaces from libsched, incorporated into libc.
* libsched.so.1 is now a filter library onto libc.
*/
schedctl_init(void)
{
}
void
schedctl_exit(void)
{
}
/*
* Contract private interface for java.
* Set up the schedctl data if it doesn't exist yet.
* Return a pointer to the pointer to the schedctl data.
*/
volatile sc_shared_t *volatile *
_thr_schedctl(void)
{
volatile sc_shared_t *volatile *ptr;
return (NULL);
(void) setup_schedctl();
return (ptr);
}
/*
* Block signals and attempt to block preemption.
* no_preempt()/preempt() must be used in pairs but can be nested.
*/
void
{
volatile sc_shared_t *scp;
if (self->ul_preempt++ == 0) {
/*
* Save the pre-existing preempt value.
*/
}
}
}
/*
* Undo the effects of no_preempt().
*/
void
{
volatile sc_shared_t *scp;
if (--self->ul_preempt == 0) {
/*
* Restore the pre-existing preempt value.
*/
yield();
/*
* Shouldn't happen. This is either
* a race condition or the thread
* just entered the real-time class.
*/
yield();
}
}
}
}
}
/*
* If a call to preempt() would cause the current thread to yield or to
* take deferred actions in exit_critical(), then unpark the specified
* lwp so it can run while we delay. Return the original lwpid if the
* unpark was not performed, else return zero. The tests are a repeat
* of some of the tests in preempt(), above. This is a statistical
* optimization solely for cond_sleep_queue(), below.
*/
static lwpid_t
{
(void) __lwp_unpark(lwpid);
lwpid = 0;
}
return (lwpid);
}
/*
* Spin for a while (if 'tryhard' is true), trying to grab the lock.
* If this fails, return EBUSY and let the caller deal with it.
* If this succeeds, return 0 with mutex_owner set to curthread.
*/
static int
{
volatile sc_shared_t *scp;
int count = 0;
int max_count;
return (EBUSY);
/* short-cut, not definitive (see below) */
goto done;
}
/*
* Make one attempt to acquire the lock before
* incurring the overhead of the spin loop.
*/
if (set_lock_byte(lockp) == 0) {
error = 0;
goto done;
}
if (!tryhard)
goto done;
if (ncpus == 0)
if (max_count == 0)
goto done;
/*
* This spin loop is unfair to lwps that have already dropped into
* the kernel to sleep. They will starve on a highly-contended mutex.
* This is just too bad. The adaptive spin algorithm is intended
* to allow programs with highly-contended locks (that is, broken
* programs) to execute with reasonable speed despite their contention.
* Being fair would reduce the speed of such programs and well-written
* programs will not suffer in any case.
*/
goto done;
error = 0;
break;
}
break;
SMT_PAUSE();
/*
* Stop spinning if the mutex owner is not running on
* a processor; it will not drop the lock any time soon
* and we would just be wasting time to keep spinning.
*
* Note that we are looking at another thread (ulwp_t)
* without ensuring that the other thread does not exit.
* The scheme relies on ulwp_t structures never being
* deallocated by the library (the library employs a free
* list of ulwp_t structs that are reused when new threads
* are created) and on schedctl shared memory never being
* deallocated once created via __schedctl().
*
* Thus, the worst that can happen when the spinning thread
* looks at the owner's schedctl data is that it is looking
* at some other thread's schedctl data. This almost never
* happens and is benign when it does.
*/
break;
}
/*
* We haven't yet acquired the lock, the lock
* is free, and there are no other spinners.
* Make one final attempt to acquire the lock.
*
* This isn't strictly necessary since mutex_lock_queue()
* (the next action this thread will take if it doesn't
* acquire the lock here) makes one attempt to acquire
* the lock before putting the thread to sleep.
*
* If the next action for this thread (on failure here)
* were not to call mutex_lock_queue(), this would be
* necessary for correctness, to avoid ending up with an
* unheld mutex with waiters but no one to wake them up.
*/
if (set_lock_byte(lockp) == 0) {
error = 0;
}
count++;
}
done:
/*
* We shouldn't own the mutex.
* Just clear the lock; everyone has already been waked up.
*/
*ownerp = 0;
}
if (error) {
if (count) {
}
}
} else {
if (count) {
}
error = EOWNERDEAD;
}
}
return (error);
}
/*
* Same as mutex_trylock_adaptive(), except specifically for queue locks.
* The owner field is not set here; the caller (spin_lock_set()) sets it.
*/
static int
{
volatile sc_shared_t *scp;
if (count == 0)
return (EBUSY);
while (--count >= 0) {
return (0);
SMT_PAUSE();
break;
}
return (EBUSY);
}
/*
* Like mutex_trylock_adaptive(), but for process-shared mutexes.
* Spin for a while (if 'tryhard' is true), trying to grab the lock.
* If this fails, return EBUSY and let the caller deal with it.
* If this succeeds, return 0 with mutex_owner set to curthread
* and mutex_ownerpid set to the current pid.
*/
static int
{
int count = 0;
int max_count;
/* horrible hack, necessary only on 32-bit sparc */
int fix_alignment_problem =
#endif
if (shared_mutex_held(mp))
return (EBUSY);
/* short-cut, not definitive (see below) */
goto done;
}
/*
* Make one attempt to acquire the lock before
* incurring the overhead of the spin loop.
*/
/* horrible hack, necessary only on 32-bit sparc */
if (fix_alignment_problem) {
error = 0;
goto done;
}
} else
#endif
/* mp->mutex_ownerpid was set by set_lock_byte64() */
error = 0;
goto done;
}
if (!tryhard)
goto done;
if (ncpus == 0)
if (max_count == 0)
goto done;
/*
* This is a process-shared mutex.
* We cannot know if the owner is running on a processor.
* We just spin and hope that it is on a processor.
*/
goto done;
/* horrible hack, necessary only on 32-bit sparc */
if (fix_alignment_problem) {
if ((*lockp & LOCKMASK64) == 0 &&
error = 0;
break;
}
} else
#endif
if ((*lockp & LOCKMASK64) == 0 &&
/* mp->mutex_ownerpid was set by set_lock_byte64() */
error = 0;
break;
}
break;
SMT_PAUSE();
}
/*
* We haven't yet acquired the lock, the lock
* is free, and there are no other spinners.
* Make one final attempt to acquire the lock.
*
* This isn't strictly necessary since mutex_lock_kernel()
* (the next action this thread will take if it doesn't
* acquire the lock here) makes one attempt to acquire
* the lock before putting the thread to sleep.
*
* If the next action for this thread (on failure here)
* were not to call mutex_lock_kernel(), this would be
* necessary for correctness, to avoid ending up with an
* unheld mutex with waiters but no one to wake them up.
*/
/* horrible hack, necessary only on 32-bit sparc */
if (fix_alignment_problem) {
error = 0;
}
} else
#endif
/* mp->mutex_ownerpid was set by set_lock_byte64() */
error = 0;
}
count++;
}
done:
/*
* We shouldn't own the mutex.
* Just clear the lock; everyone has already been waked up.
*/
mp->mutex_owner = 0;
/* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
}
if (error) {
if (count) {
}
}
} else {
if (count) {
}
error = EOWNERDEAD;
else
error = EOWNERDEAD;
}
}
return (error);
}
/*
* Mutex wakeup code for releasing a USYNC_THREAD mutex.
* Returns the lwpid of the thread that was dequeued, if any.
* The caller of mutex_wakeup() must call __lwp_unpark(lwpid)
* to wake up the specified lwp.
*/
static lwpid_t
{
int more;
/*
* Dequeue a waiter from the sleep queue. Don't touch the mutex
* waiters bit if no one was found on the queue because the mutex
* might have been deallocated or reallocated for another purpose.
*/
}
return (lwpid);
}
/*
* Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex.
*/
static void
{
int nlwpid = 0;
/*
* Walk the list of waiters and prepare to wake up all of them.
* The waiters flag has already been cleared from the mutex.
*
* We keep track of lwpids that are to be unparked in lwpid[].
* __lwp_unpark_all() is called to unpark all of them after
* they have been removed from the sleep queue and the sleep
* queue lock has been dropped. If we run out of space in our
* on-stack buffer, we need to allocate more but we can't call
* lmalloc() because we are holding a queue lock when the overflow
* occurs and lmalloc() acquires a lock. We can't use alloca()
* either because the application may have allocated a small
* stack and we don't want to overrun the stack. So we call
* alloc_lwpids() to allocate a bigger buffer using the mmap()
* system call directly since that path acquires no locks.
*/
for (;;) {
break;
}
if (nlwpid == 0) {
} else {
mp->mutex_waiters = 0;
if (nlwpid == 1)
(void) __lwp_unpark(lwpid[0]);
else
}
}
/*
* Release a process-private mutex.
* As an optimization, if there are waiters but there are also spinners
* attempting to acquire the mutex, then don't bother waking up a waiter;
* one of the spinners will acquire the mutex soon and it would be a waste
* of resources to wake up some thread just to have it spin for a while
* and then possibly go back to sleep. See mutex_trylock_adaptive().
*/
static lwpid_t
{
mp->mutex_owner = 0;
if ((old_lockword & WAITERMASK) &&
if (release_all)
else
if (lwpid == 0)
}
return (lwpid);
}
/*
* Like mutex_unlock_queue(), but for process-shared mutexes.
*/
static void
{
mp->mutex_owner = 0;
/* horrible hack, necessary only on 32-bit sparc */
mp->mutex_ownerpid = 0;
if ((old_lockword & WAITERMASK) &&
}
return;
}
#endif
/* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
if ((old_lockword64 & WAITERMASK64) &&
}
}
void
stall(void)
{
for (;;)
}
/*
* Acquire a USYNC_THREAD mutex via user-level sleep queues.
* We failed set_lock_byte(&mp->mutex_lockw) before coming here.
* If successful, returns with mutex_owner set correctly.
*/
int
{
int error = 0;
}
if (msp) {
begin_sleep = gethrtime();
}
/*
* Put ourself on the sleep queue, and while we are
* unable to grab the lock, go park in the kernel.
* Take ourself off the sleep queue after we acquire the lock.
*/
for (;;) {
break;
}
/*
* __lwp_park() will return the residual time in tsp
* if we are unparked before the timeout expires.
*/
set_parking_flag(self, 0);
/*
* We could have taken a signal or suspended ourself.
* If we did, then we removed ourself from the queue.
* Someone else may have removed us from the queue
* as a consequence of mutex_unlock(). We may have
* gotten a timeout from __lwp_park(). Or we may still
* be on the queue and this is just a spurious wakeup.
*/
if (error) {
break;
error = 0;
}
break;
}
}
if (error) {
break;
}
error = 0;
}
}
/*
* We shouldn't own the mutex.
* Just clear the lock; everyone has already been waked up.
*/
mp->mutex_owner = 0;
}
if (msp)
if (error) {
} else {
error = EOWNERDEAD;
}
}
return (error);
}
static int
{
if (mtype & LOCK_RECURSIVE) {
return (EAGAIN);
}
mp->mutex_rcount++;
return (0);
}
if (try == MUTEX_LOCK) {
return (EDEADLK);
}
return (EBUSY);
}
/*
* Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so
* it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary.
* We use tdb_hash_lock here and in the synch object tracking code in
* the tdb_agent.c file. There is no conflict between these two usages.
*/
void
{
}
}
/*
* First search the registered table with no locks held.
* This is safe because the table never shrinks
* and we can only get a false negative.
*/
return;
}
/*
* The lock was not found.
* Repeat the operation with tdb_hash_lock held.
*/
return;
}
/* remember the first invalid entry, if any */
}
/*
* The lock has never been registered.
* Add it to the table and register it now.
*/
/*
* Reuse the invalid entry we found above.
* The linkages are still correct.
*/
} else {
/*
* Allocate a new entry and add it to
* the hash table and to the global list.
*/
}
(void) ___lwp_mutex_register(mp);
}
/*
* This is called from mmap(), munmap() and shmdt() to unregister
* all robust locks contained in the mapping that is going away.
* We don't delete the entries in the hash table, since the hash table
* is constrained never to shrink; we just invalidate the addresses.
*/
void
{
/*
* Round up len to a multiple of pagesize.
*/
if (pagesize == 0) /* do this once */
/*
* Do this by traversing the global list, not the hash table.
* The hash table is large (32K buckets) and sparsely populated.
* The global list contains all of the registered entries.
*/
}
}
/*
* This is called in the child of fork()/forkall() to start over
* with a clean slate. (Each process must register its own locks.)
* No locks are needed because all other threads are suspended or gone.
*/
void
unregister_all_locks(void)
{
/*
* Do this first, before calling lfree().
* lfree() may call munmap(), which calls unregister_locks().
*/
/*
* As above, do this by traversing the global list, not the hash table.
*/
}
}
/*
* Returns with mutex_owner set correctly.
*/
int
{
int error = 0;
int myprio;
try &= ~MUTEX_NOCEIL;
if (!self->ul_schedctl_called)
(void) setup_schedctl();
return (EPERM);
}
return (EINVAL);
}
return (error);
}
}
== (USYNC_PROCESS | LOCK_ROBUST))
if (mtype & LOCK_PRIO_INHERIT) {
/* go straight to the kernel */
else /* MUTEX_LOCK */
/*
* The kernel never sets or clears the lock byte
* for LOCK_PRIO_INHERIT mutexes.
* Set it here for consistency.
*/
switch (error) {
case 0:
self->ul_pilocks++;
break;
case EOWNERDEAD:
case ELOCKUNMAPPED:
self->ul_pilocks++;
/* FALLTHROUGH */
case ENOTRECOVERABLE:
break;
case EDEADLK:
/*
* Note: mutex_timedlock() never returns EINTR.
*/
} else { /* simulate a deadlock */
stall();
}
break;
}
} else if (mtype & USYNC_PROCESS) {
} else { /* USYNC_THREAD */
}
switch (error) {
case 0:
case EOWNERDEAD:
case ELOCKUNMAPPED:
if (mtype & LOCK_ROBUST)
if (msp)
break;
default:
(void) _ceil_mylist_del(mp);
}
if (msp)
}
}
break;
}
return (error);
}
int
{
/*
* We know that USYNC_PROCESS is set in mtype and that
* zero, one, or both of the flags LOCK_RECURSIVE and
* LOCK_ERRORCHECK are set, and that no other flags are set.
*/
/* horrible hack, necessary only on 32-bit sparc */
self->ul_misaligned) {
return (0);
}
} else
#endif
/* mp->mutex_ownerpid was set by set_lock_byte64() */
return (0);
}
if (try == MUTEX_LOCK) {
return (0);
}
}
return (EBUSY);
}
static int
{
/*
* Optimize the case of USYNC_THREAD, including
* the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
* no error detection, no lock statistics,
* and the process has only a single thread.
* (Most likely a traditional single-threaded application.)
*/
/*
* Only one thread exists so we don't need an atomic operation.
* We do, however, need to protect against signals.
*/
if (mp->mutex_lockw == 0) {
return (0);
}
/*
* We have reached a deadlock, probably because the
* process is executing non-async-signal-safe code in
* a signal handler and is attempting to acquire a lock
* that it already owns. This is not surprising, given
* bad programming practices over the years that has
* resulted in applications calling printf() and such
* in their signal handlers. Unless the user has told
* us that the signal handlers are safe by setting:
* export _THREAD_ASYNC_SAFE=1
* we return EDEADLK rather than actually deadlocking.
*/
return (EDEADLK);
}
}
/*
* Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
* no error detection, and no lock statistics.
* Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
*/
(gflags->uf_trs_ted |
if (mtype & USYNC_PROCESS)
return (0);
}
return (0);
}
/* else do it the long way */
}
int
{
}
int
{
int error;
return (error);
}
int
{
int error;
return (error);
}
int
{
/*
* Optimize the case of USYNC_THREAD, including
* the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
* no error detection, no lock statistics,
* and the process has only a single thread.
* (Most likely a traditional single-threaded application.)
*/
/*
* Only one thread exists so we don't need an atomic operation.
* We do, however, need to protect against signals.
*/
if (mp->mutex_lockw == 0) {
return (0);
}
return (EBUSY);
}
/*
* Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
* no error detection, and no lock statistics.
* Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
*/
(gflags->uf_trs_ted |
if (mtype & USYNC_PROCESS)
return (0);
}
}
return (EBUSY);
}
/* else do it the long way */
}
int
{
int error = 0;
int release_all;
!mutex_held(mp))
return (EPERM);
mp->mutex_rcount--;
return (0);
}
(void) record_hold_time(msp);
}
if (mtype & LOCK_PRIO_INHERIT) {
mp->mutex_owner = 0;
/* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */
self->ul_pilocks--;
} else if (mtype & USYNC_PROCESS) {
} else { /* USYNC_THREAD */
(void) __lwp_unpark(lwpid);
}
}
if (mtype & LOCK_ROBUST)
return (error);
}
int
{
short el;
/*
* Optimize the case of USYNC_THREAD, including
* the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
* no error detection, no lock statistics,
* and the process has only a single thread.
* (Most likely a traditional single-threaded application.)
*/
if (mtype) {
/*
* At this point we know that one or both of the
* flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
*/
return (EPERM);
mp->mutex_rcount--;
return (0);
}
}
/*
* Only one thread exists so we don't need an atomic operation.
* Also, there can be no waiters.
*/
mp->mutex_owner = 0;
mp->mutex_lockword = 0;
return (0);
}
/*
* Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
* no error detection, and no lock statistics.
* Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
*/
(void) __lwp_unpark(lwpid);
}
return (0);
}
if (el) /* error detection or lock statistics */
goto slow_unlock;
/*
* At this point we know that one or both of the
* flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
*/
return (EPERM);
mp->mutex_rcount--;
return (0);
}
goto fast_unlock;
}
if ((mtype &
/*
* At this point we know that zero, one, or both of the
* flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and
* that the USYNC_PROCESS flag is set.
*/
return (EPERM);
mp->mutex_rcount--;
return (0);
}
mutex_unlock_process(mp, 0);
return (0);
}
}
/* else do it the long way */
return (mutex_unlock_internal(mp, 0));
}
/*
* go through these lmutex_ functions, to protect critical regions.
* We replicate a bit of code from mutex_lock() and mutex_unlock()
* to make these functions faster since we know that the mutex type
* of all internal locks is USYNC_THREAD. We also know that internal
* locking can never fail, so we panic if it does.
*/
void
{
/*
* Optimize the case of no lock statistics and only a single thread.
* (Most likely a traditional single-threaded application.)
*/
/*
* Only one thread exists; the mutex must be free.
*/
} else {
if (!self->ul_schedctl_called)
(void) setup_schedctl();
}
if (msp)
}
}
void
{
/*
* Optimize the case of no lock statistics and only a single thread.
* (Most likely a traditional single-threaded application.)
*/
/*
* Only one thread exists so there can be no waiters.
*/
mp->mutex_owner = 0;
mp->mutex_lockword = 0;
} else {
if (msp)
(void) record_hold_time(msp);
(void) __lwp_unpark(lwpid);
}
}
}
/*
* For specialized code in libc, like the asynchronous i/o code,
* the following sig_*() locking primitives are used in order
* to make the code asynchronous signal safe. Signals are
* deferred while locks acquired by these functions are held.
*/
void
{
(void) mutex_lock(mp);
}
void
{
(void) mutex_unlock(mp);
}
int
{
int error;
return (error);
}
/*
* sig_cond_wait() is a cancellation point.
*/
int
{
int error;
/* take the deferred signal here */
}
return (error);
}
/*
* sig_cond_reltimedwait() is a cancellation point.
*/
int
{
int error;
/* take the deferred signal here */
}
return (error);
}
/*
* For specialized code in libc, like the stdio code.
* the following cancel_safe_*() locking primitives are used in
* order to make the code cancellation-safe. Cancellation is
* deferred while locks acquired by these functions are held.
*/
void
{
(void) mutex_lock(mp);
}
int
{
int error;
return (error);
}
void
{
(void) mutex_unlock(mp);
/*
* Decrement the count of locks held by cancel_safe_mutex_lock().
* If we are then in a position to terminate cleanly and
* if there is a pending cancellation and cancellation
* is not disabled and we received EINTR from a recent
* system call then perform the cancellation action now.
*/
if (--self->ul_libc_locks == 0 &&
}
static int
{
/*
* The 'volatile' is necessary to make sure the compiler doesn't
* reorder the tests of the various components of the mutex.
* They must be tested in this order:
* mutex_lockw
* mutex_owner
* mutex_ownerpid
* This relies on the fact that everywhere mutex_lockw is cleared,
* mutex_owner and mutex_ownerpid are cleared before mutex_lockw
* is cleared, and that everywhere mutex_lockw is set, mutex_owner
* and mutex_ownerpid are set after mutex_lockw is set, and that
* mutex_lockw is set or cleared with a memory barrier.
*/
}
int
{
return (shared_mutex_held(mparg));
}
int
{
return (0);
}
int
{
/*
* Do this only for an inconsistent, initialized robust lock
* that we hold. For all other cases, return EINVAL.
*/
if (mutex_held(mp) &&
mp->mutex_rcount = 0;
return (0);
}
return (EINVAL);
}
/*
* Spin locks are separate from ordinary mutexes,
* but we use the same data structure for them.
*/
int
{
if (pshared == PTHREAD_PROCESS_SHARED)
else
/*
* This should be at the beginning of the function,
* but for the sake of old broken applications that
* do not have proper alignment for their mutexes
* (and don't check the return code from pthread_spin_init),
* we put it here, after initializing the mutex regardless.
*/
curthread->ul_misaligned == 0)
return (EINVAL);
return (0);
}
int
{
return (0);
}
int
{
int error = 0;
else {
}
return (error);
}
int
{
int count = 0;
/*
* We don't care whether the owner is running on a processor.
* We just spin because that's what this interface requires.
*/
for (;;) {
if (*lockp == 0) { /* lock byte appears to be clear */
if (set_lock_byte(lockp) == 0)
break;
}
count++;
SMT_PAUSE();
}
if (count) {
}
return (0);
}
int
{
mp->mutex_owner = 0;
mp->mutex_ownerpid = 0;
return (0);
}
/*
*/
static mutex_t **
{
else {
nlocks = 1;
}
return (lockptr);
}
if (remembered != NULL) {
*remembered = lock;
return (remembered);
}
/*
* No entry available. Allocate more space, converting
* the single entry into an array of entries if necessary.
*/
/*
* Initial allocation of the array.
* Convert the single entry into an array.
*/
/*
* The single entry becomes the first entry in the array.
*/
/*
* Return the next available entry in the array.
*/
return (lockptr);
}
/*
* Reallocate the array, double the size each time.
*/
/*
* Return the next available entry in the newly allocated array.
*/
return (lockptr);
}
/*
* Insert 'lock' into our list of held locks.
* Currently only used for LOCK_ROBUST mutexes.
*/
void
{
(void) find_lock_entry(lock);
}
/*
* Remove 'lock' from our list of held locks.
* Currently only used for LOCK_ROBUST mutexes.
*/
void
{
}
/*
* Free the array of held locks.
*/
void
{
ulwp->ul_heldlockcnt = 0;
}
/*
* Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD.
* Called from _thrp_exit() to deal with abandoned locks.
*/
void
heldlock_exit(void)
{
else {
nlocks = 1;
}
/*
* The kernel takes care of transitioning held
* LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD.
* We avoid that case here.
*/
mutex_held(mp) &&
LOCK_ROBUST) {
mp->mutex_rcount = 0;
}
}
}
/* ARGSUSED2 */
int
{
return (EINVAL);
/*
* This should be at the beginning of the function,
* but for the sake of old broken applications that
* do not have proper alignment for their condvars
* (and don't check the return code from cond_init),
* we put it here, after initializing the condvar regardless.
*/
curthread->ul_misaligned == 0)
return (EINVAL);
return (0);
}
/*
* cond_sleep_queue(): utility function for cond_wait_queue().
*
* Go to sleep on a condvar sleep queue, expect to be waked up
* by someone calling cond_signal() or cond_broadcast() or due
* to receiving a UNIX signal or being cancelled, or just simply
* due to a spurious wakeup (like someome calling forkall()).
*
* The associated mutex is *not* reacquired before returning.
* That must be done by the caller of cond_sleep_queue().
*/
static int
{
int signalled;
int error;
int cv_wake;
int release_all;
/*
* Put ourself on the CV sleep queue, unlock the mutex, then
* park ourself and unpark a candidate lwp to grab the mutex.
* We must go onto the CV sleep queue before dropping the
* mutex in order to guarantee atomicity of the operation.
*/
self->ul_signalled = 0;
}
for (;;) {
if (lwpid != 0) {
}
/*
* We may have a deferred signal present,
* in which case we should return EINTR.
* Also, we may have received a SIGCANCEL; if so
* and we are cancelable we should return EINTR.
* We force an immediate EINTR return from
* __lwp_park() by turning our parking flag off.
*/
set_parking_flag(self, 0);
/*
* __lwp_park() will return the residual time in tsp
* if we are unparked before the timeout expires.
*/
set_parking_flag(self, 0);
lwpid = 0; /* unpark the other lwp only once */
/*
* We were waked up by cond_signal(), cond_broadcast(),
* by an interrupt or timeout (EINTR or ETIME),
* or we may just have gotten a spurious wakeup.
*/
if (!cv_wake)
break;
/*
* We are on either the condvar sleep queue or the
* mutex sleep queue. Break out of the sleep if we
* were interrupted or we timed out (EINTR or ETIME).
* Else this is a spurious wakeup; continue the loop.
*/
if (error) {
break;
}
if (error) {
break;
}
/*
* Else a spurious wakeup on the condvar queue.
* __lwp_park() has already adjusted the timeout.
*/
} else {
thr_panic("cond_sleep_queue(): thread not on queue");
}
if (!cv_wake)
}
self->ul_cv_wake = 0;
self->ul_signalled = 0;
if (!cv_wake)
/*
* If we were concurrently cond_signal()d and any of:
* received a UNIX signal, were cancelled, or got a timeout,
* then perform another cond_signal() to avoid consuming it.
*/
(void) cond_signal(cvp);
return (error);
}
static void
{
}
int
{
int error;
int merror;
/*
* The old thread library was programmed to defer signals
* while in cond_wait() so that the associated mutex would
* be guaranteed to be held when the application signal
* handler was invoked.
*
* We do not behave this way by default; the state of the
* associated mutex in the signal handler is undefined.
*
* To accommodate applications that depend on the old
* behavior, the _THREAD_COND_WAIT_DEFER environment
* variable can be set to 1 and we will behave in the
* old way with respect to cond_wait().
*/
if (self->ul_cond_wait_defer)
/*
* Reacquire the mutex.
*/
/*
* Take any deferred signal now, after we have reacquired the mutex.
*/
if (self->ul_cond_wait_defer)
return (error);
}
/*
* cond_sleep_kernel(): utility function for cond_wait_kernel().
* See the comment ahead of cond_sleep_queue(), above.
*/
static int
{
int error;
mp->mutex_owner = 0;
/* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */
if (mtype & LOCK_PRIO_INHERIT) {
self->ul_pilocks--;
}
/*
* ___lwp_cond_wait() returns immediately with EINTR if
* set_parking_flag(self,0) is called on this lwp before it
* goes to sleep in the kernel. sigacthandler() calls this
* when a deferred signal is noted. This assures that we don't
* get stuck in ___lwp_cond_wait() with all signals blocked
* due to taking a deferred signal before going to sleep.
*/
set_parking_flag(self, 0);
set_parking_flag(self, 0);
return (error);
}
int
{
int error;
int merror;
/*
* See the large comment in cond_wait_queue(), above.
*/
if (self->ul_cond_wait_defer)
/*
* Override the return code from ___lwp_cond_wait()
* with any non-zero return code from mutex_lock().
* This addresses robust lock failures in particular;
* the caller must see the EOWNERDEAD or ENOTRECOVERABLE
* errors in order to take corrective action.
*/
/*
* Take any deferred signal now, after we have reacquired the mutex.
*/
if (self->ul_cond_wait_defer)
return (error);
}
/*
* Common code for cond_wait() and cond_timedwait()
*/
int
{
hrtime_t begin_sleep = 0;
int error = 0;
/*
* The SUSV3 Posix spec for pthread_cond_timedwait() states:
* Except in the case of [ETIMEDOUT], all these error checks
* shall act as if they were performed immediately at the
* beginning of processing for the function and shall cause
* an error return, in effect, prior to modifying the state
* of the mutex specified by mutex or the condition variable
* specified by cond.
* Therefore, we must return EINVAL now if the timout is invalid.
*/
return (EINVAL);
}
if (csp) {
if (tsp)
else
}
if (msp)
else if (csp)
begin_sleep = gethrtime();
if (self->ul_error_detection) {
if (!mutex_held(mp))
if (!(mtype & USYNC_PROCESS))
"condvar process-shared, "
"mutex process-private");
} else {
if (mtype & USYNC_PROCESS)
"condvar process-private, "
"mutex process-shared");
}
}
/*
* We deal with recursive mutexes by completely
* dropping the lock and restoring the recursion
* count after waking up. This is arguably wrong,
* but it obeys the principle of least astonishment.
*/
mp->mutex_rcount = 0;
if ((mtype &
else
if (csp) {
else {
}
}
return (error);
}
/*
* cond_wait() is a cancellation point but __cond_wait() is not.
* Internally, libc calls the non-cancellation version.
* Other libraries need to use pthread_setcancelstate(), as appropriate,
* since __cond_wait() is not exported from libc.
*/
int
{
!mutex_held(mp))
return (EPERM);
/*
* Optimize the common case of USYNC_THREAD plus
* no error detection, no lock statistics, and no event tracing.
*/
/*
* Else do it the long way.
*/
}
int
{
int error;
_cancelon();
_canceloff();
else
return (error);
}
/*
* pthread_cond_wait() is a cancellation point.
*/
int
{
int error;
}
/*
* cond_timedwait() is a cancellation point but __cond_timedwait() is not.
*/
int
{
int error;
!mutex_held(mp))
return (EPERM);
/*
* Don't return ETIME if we didn't really get a timeout.
* This can happen if we return because someone resets
* the system clock. Just return zero in this case,
* giving a spurious wakeup but not a timeout.
*/
error = 0;
}
return (error);
}
int
{
int error;
_cancelon();
_canceloff();
else
return (error);
}
/*
* pthread_cond_timedwait() is a cancellation point.
*/
int
{
int error;
error = 0;
return (error);
}
/*
* cond_reltimedwait() is a cancellation point but __cond_reltimedwait() is not.
*/
int
{
!mutex_held(mp))
return (EPERM);
}
int
{
int error;
_cancelon();
_canceloff();
else
return (error);
}
int
{
int error;
error = 0;
return (error);
}
int
{
int error = 0;
int more;
if (csp)
return (error);
/*
* Move someone from the condvar sleep queue to the mutex sleep
* queue for the mutex that he will acquire on being waked up.
* We can do this only if we own the mutex he will acquire.
* If we do not own the mutex, or if his ul_cv_wake flag
* is set, just dequeue and unpark him.
*/
return (error);
}
/*
* Inform the thread that he was the recipient of a cond_signal().
* This lets him deal with cond_signal() and, concurrently,
* one or more of a cancellation, a UNIX signal, or a timeout.
* These latter conditions must not consume a cond_signal().
*/
/*
* Dequeue the waiter but leave his ul_sleepq non-NULL
* while we move him to the mutex queue so that he can
* deal properly with spurious wakeups.
*/
/* just wake him up */
(void) __lwp_unpark(lwpid);
} else {
/* move him to the mutex queue */
}
return (error);
}
/*
* Utility function called by mutex_wakeup_all(), cond_broadcast(),
* and rw_queue_release() to (re)allocate a big buffer to hold the
* lwpids of all the threads to be set running after they are removed
* from their sleep queues. Since we are holding a queue lock, we
* cannot call any function that might acquire a lock. mmap(), munmap(),
* lwp_unpark_all() are simple system calls and are safe in this regard.
*/
lwpid_t *
{
/*
* Allocate NEWLWPS ids on the first overflow.
* Double the allocation each time after that.
*/
int nlwpid = *nlwpid_ptr;
int maxlwps = *maxlwps_ptr;
int first_allocation;
int newlwps;
void *vaddr;
if (vaddr == MAP_FAILED) {
/*
* Let's hope this never happens.
* If it does, then we have a terrible
* thundering herd on our hands.
*/
*nlwpid_ptr = 0;
} else {
if (!first_allocation)
*maxlwps_ptr = newlwps;
}
return (lwpid);
}
int
{
int error = 0;
int nlwpid = 0;
if (csp)
return (error);
/*
* Move everyone from the condvar sleep queue to the mutex sleep
* queue for the mutex that they will acquire on being waked up.
* We can do this only if we own the mutex they will acquire.
* If we do not own the mutex, or if their ul_cv_wake flag
* is set, just dequeue and unpark them.
*
* We keep track of lwpids that are to be unparked in lwpid[].
* __lwp_unpark_all() is called to unpark all of them after
* they have been removed from the sleep queue and the sleep
* queue lock has been dropped. If we run out of space in our
* on-stack buffer, we need to allocate more but we can't call
* lmalloc() because we are holding a queue lock when the overflow
* occurs and lmalloc() acquires a lock. We can't use alloca()
* either because the application may have allocated a small
* stack and we don't want to overrun the stack. So we call
* alloc_lwpids() to allocate a bigger buffer using the mmap()
* system call directly since that path acquires no locks.
*/
cvp->cond_waiters_user = 0;
for (;;) {
break;
/* just wake him up */
} else {
/* move him to the mutex queue */
}
}
}
if (nlwpid == 0) {
} else {
if (nlwpid == 1)
(void) __lwp_unpark(lwpid[0]);
else
}
return (error);
}
int
{
cvp->cond_magic = 0;
return (0);
}
#if defined(THREAD_DEBUG)
void
{
}
/* protected by link_lock */
/*
* Record spin lock statistics.
* Called by a thread exiting itself in thrp_exit().
* Also called via atexit() from the thread calling
* exit() to do all the other threads as well.
*/
void
{
ulwp->ul_spin_lock_spin = 0;
ulwp->ul_spin_lock_spin2 = 0;
ulwp->ul_spin_lock_sleep = 0;
ulwp->ul_spin_lock_wakeup = 0;
}
/*
* atexit function: dump the queue statistics to stderr.
*/
#include <stdio.h>
void
dump_queue_statistics(void)
{
int qn;
uint64_t spin_lock_total = 0;
return;
return;
if (qp->qh_lockcount == 0)
continue;
return;
}
return;
if (qp->qh_lockcount == 0)
continue;
return;
}
}
#endif