synch.c revision e8031f0a8ed0e45c6d8847c5e09424e66fd34a4b
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include "lint.h"
#include "thr_uberdata.h"
/*
* This mutex is initialized to be held by lwp#1.
* It is used to block a thread that has returned from a mutex_lock()
* of a PTHREAD_PRIO_INHERIT mutex with an unrecoverable error.
*/
static int shared_mutex_held(mutex_t *);
/*
* Lock statistics support functions.
*/
void
{
}
{
if (msp->mutex_begin_hold)
msp->mutex_begin_hold = 0;
return (now);
}
/*
* Called once at library initialization.
*/
void
mutex_setup(void)
{
thr_panic("mutex_setup() cannot acquire stall_mutex");
}
/*
* The default spin counts of 1000 and 500 are experimentally determined.
* On sun4u machines with any number of processors they could be raised
* to 10,000 but that (experimentally) makes almost no difference.
* The environment variables:
* _THREAD_ADAPTIVE_SPIN=count
* _THREAD_RELEASE_SPIN=count
* can be used to override and set the counts in the range [0 .. 1,000,000].
*/
int thread_adaptive_spin = 1000;
int thread_release_spin = 500;
int thread_queue_verify = 0;
static int ncpus;
/*
* Distinguish spinning for queue locks from spinning for regular locks.
* The environment variable:
* _THREAD_QUEUE_SPIN=count
* can be used to override and set the count in the range [0 .. 1,000,000].
* There is no release spin concept for queue locks.
*/
int thread_queue_spin = 1000;
/*
* Use the otherwise-unused 'mutex_ownerpid' field of a USYNC_THREAD
* mutex to be a count of adaptive spins in progress.
*/
#define mutex_spinners mutex_ownerpid
void
{
}
/*
* 'type' can be one of USYNC_THREAD or USYNC_PROCESS, possibly
* or it can be USYNC_PROCESS_ROBUST with no extra flags.
*/
/* ARGSUSED2 */
int
{
int error;
case USYNC_THREAD:
case USYNC_PROCESS:
error = 0;
break;
case USYNC_PROCESS_ROBUST:
else
break;
default:
break;
}
if (error == 0)
return (error);
}
/*
* Delete mp from list of ceil mutexes owned by curthread.
* Return 1 if the head of the chain was updated.
*/
int
{
}
/*
* Add mp to head of list of ceil mutexes owned by curthread.
* Return ENOMEM if no memory could be allocated.
*/
int
{
return (ENOMEM);
return (0);
}
/*
* Inherit priority from ceiling. The inheritance impacts the effective
* priority, not the assigned priority. See _thread_setschedparam_main().
*/
void
_ceil_prio_inherit(int ceil)
{
struct sched_param param;
/*
* Panic since unclear what error code to return.
* If we do return the error codes returned by above
* called routine, update the man page...
*/
thr_panic("_thread_setschedparam_main() fails");
}
}
/*
* Waive inherited ceiling priority. Inherit from head of owned ceiling locks
* if holding at least one ceiling lock. If no ceiling locks are held at this
* point, disinherit completely, reverting back to assigned priority.
*/
void
_ceil_prio_waive(void)
{
struct sched_param param;
/*
* No ceil locks held. Zero the epri, revert back to ul_pri.
* Since thread's hash lock is not held, one cannot just
* read ul_pri here...do it in the called routine...
*/
thr_panic("_thread_setschedparam_main() fails");
} else {
/*
* Set priority to that of the mutex at the head
* of the ceilmutex chain.
*/
thr_panic("_thread_setschedparam_main() fails");
}
}
/*
* Non-preemptive spin locks. Used by queue_lock().
* No lock statistics are gathered for these locks.
*/
void
{
return;
}
/*
* Spin for a while, attempting to acquire the lock.
*/
if (mutex_queuelock_adaptive(mp) == 0 ||
return;
}
/*
* Try harder if we were previously at a no premption level.
*/
if (mutex_queuelock_adaptive(mp) == 0 ||
return;
}
}
/*
* Give up and block in the kernel for the mutex.
*/
}
void
{
mp->mutex_owner = 0;
(void) ___lwp_mutex_wakeup(mp);
}
}
/*
* Allocate the sleep queue hash table.
*/
void
queue_alloc(void)
{
void *data;
int i;
/*
* No locks are needed; we call here only when single-threaded.
*/
== MAP_FAILED)
thr_panic("cannot allocate thread queue_head table");
for (i = 0; i < 2 * QHASHSIZE; i++)
}
#if defined(THREAD_DEBUG)
/*
* Debugging: verify correctness of a sleep queue.
*/
void
{
char qtype;
void *wchan;
if (!thread_queue_verify)
return;
/* real expensive stuff, only for _THREAD_QUEUE_VERIFY */
}
}
#else /* THREAD_DEBUG */
#endif /* THREAD_DEBUG */
/*
* Acquire a queue head.
*/
{
/*
* It is possible that we could be called while still single-threaded.
* If so, we call queue_alloc() to allocate the queue_head[] array.
*/
queue_alloc();
}
/*
* At once per nanosecond, qh_lockcount will wrap after 512 years.
* Were we to care about this, we could peg the value at UINT64_MAX.
*/
qp->qh_lockcount++;
return (qp);
}
/*
* Release a queue head.
*/
void
{
}
/*
* For rwlock queueing, we must queue writers ahead of readers of the
* same priority. We do this by making writers appear to have a half
* point higher priority for purposes of priority comparisons below.
*/
void
{
int do_fifo;
/*
* LIFO queue ordering is unfair and can lead to starvation,
* but it gives better performance for heavily contended locks.
* We use thread_queue_fifo (range is 0..8) to determine
* the frequency of FIFO vs LIFO queuing:
* 0 : every 256th time (almost always LIFO)
* 1 : every 128th time
* 2 : every 64th time
* 3 : every 32nd time
* 4 : every 16th time (the default value, mostly LIFO)
* 5 : every 8th time
* 6 : every 4th time
* 7 : every 2nd time
* 8 : every time (never LIFO, always FIFO)
* Note that there is always some degree of FIFO ordering.
* This breaks live lock conditions that occur in applications
* that are written assuming (incorrectly) that threads acquire
* locks fairly, that is, in roughly round-robin order.
* In any event, the queue is maintained in priority order.
*
* If we are given the FIFOQ flag in qtype, fifo queueing is forced.
* SUSV3 requires this for semaphores.
*/
do_fifo = (force_fifo ||
/*
*/
} else if (do_fifo) {
/*
* Enqueue after the last thread whose priority is greater
* than or equal to the priority of the thread being queued.
* Attempt first to go directly onto the tail of the queue.
*/
else {
break;
}
} else {
/*
* Enqueue before the first thread whose priority is less
* than or equal to the priority of the thread being queued.
* Hopefully we can go directly onto the head of the queue.
*/
break;
}
}
/*
* Return a pointer to the queue slot of the
* highest priority thread on the queue.
* On return, prevp, if not NULL, will contain a pointer
* to the thread's predecessor on the queue
*/
static ulwp_t **
{
/*
* Find a waiter on the sleep queue.
*/
break;
/*
* Try not to return a suspended thread.
* This mimics the old libthread's behavior.
*/
}
}
}
}
*more = 0;
return (NULL);
}
return (ulwpp);
/*
* Scan the remainder of the queue for another waiter.
*/
*more = 1;
return (ulwpp);
}
*more = 1;
return (ulwpp);
}
}
*more = 0;
return (ulwpp);
}
ulwp_t *
{
return (NULL);
/*
* Dequeue the waiter.
*/
return (ulwp);
}
/*
* Return a pointer to the highest priority thread sleeping on wchan.
*/
ulwp_t *
{
return (NULL);
return (*ulwpp);
}
{
int found = 0;
int more = 0;
/* find self on the sleep queue */
/* dequeue ourself */
self->ul_cv_wake = 0;
found = 1;
break;
}
more = 1;
}
if (!found)
thr_panic("dequeue_self(): curthread not found on queue");
if (more)
return (1);
/* scan the remainder of the queue for another waiter */
return (1);
}
return (0);
}
/*
* Called from call_user_handler() and _thrp_suspend() to take
* ourself off of our sleep queue so we can grab locks.
*/
void
unsleep_self(void)
{
/*
* Calling enter_critical()/exit_critical() here would lead
* to recursion. Just manipulate self->ul_critical directly.
*/
self->ul_critical++;
/*
* We may have been moved from a CV queue to a
* mutex queue while we were attempting queue_lock().
* If so, just loop around and try again.
* dequeue_self() clears self->ul_sleepq.
*/
}
self->ul_critical--;
}
/*
* Common code for calling the the ___lwp_mutex_timedlock() system call.
* Returns with mutex_owner and mutex_ownerpid set correctly.
*/
int
{
int error;
}
if (msp) {
begin_sleep = gethrtime();
}
for (;;) {
break;
}
/*
* Defend against forkall(). We may be the child,
* in which case we don't actually own the mutex.
*/
0, 0);
break;
}
} else {
break;
}
}
if (msp)
return (error);
}
/*
* Common code for calling the ___lwp_mutex_trylock() system call.
* Returns with mutex_owner and mutex_ownerpid set correctly.
*/
int
{
int error;
for (;;) {
error);
}
break;
}
/*
* Defend against forkall(). We may be the child,
* in which case we don't actually own the mutex.
*/
0, 0);
break;
}
} else {
break;
}
}
return (error);
}
volatile sc_shared_t *
setup_schedctl(void)
{
volatile sc_shared_t *scp;
}
/*
* Unless the call to setup_schedctl() is surrounded
* by enter_critical()/exit_critical(), the address
* we are returning could be invalid due to a forkall()
* having occurred in another thread.
*/
return (scp);
}
/*
* Interfaces from libsched, incorporated into libc.
* libsched.so.1 is now a filter library onto libc.
*/
_schedctl_init(void)
{
}
void
_schedctl_exit(void)
{
}
/*
* Contract private interface for java.
* Set up the schedctl data if it doesn't exist yet.
* Return a pointer to the pointer to the schedctl data.
*/
volatile sc_shared_t *volatile *
_thr_schedctl(void)
{
volatile sc_shared_t *volatile *ptr;
return (NULL);
(void) setup_schedctl();
return (ptr);
}
/*
* Block signals and attempt to block preemption.
* no_preempt()/preempt() must be used in pairs but can be nested.
*/
void
{
volatile sc_shared_t *scp;
if (self->ul_preempt++ == 0) {
/*
* Save the pre-existing preempt value.
*/
}
}
}
/*
* Undo the effects of no_preempt().
*/
void
{
volatile sc_shared_t *scp;
if (--self->ul_preempt == 0) {
/*
* Restore the pre-existing preempt value.
*/
lwp_yield();
/*
* Shouldn't happen. This is either
* a race condition or the thread
* just entered the real-time class.
*/
lwp_yield();
}
}
}
}
}
/*
* If a call to preempt() would cause the current thread to yield or to
* take deferred actions in exit_critical(), then unpark the specified
* lwp so it can run while we delay. Return the original lwpid if the
* unpark was not performed, else return zero. The tests are a repeat
* of some of the tests in preempt(), above. This is a statistical
* optimization solely for cond_sleep_queue(), below.
*/
static lwpid_t
{
(void) __lwp_unpark(lwpid);
lwpid = 0;
}
return (lwpid);
}
/*
* Spin for a while, trying to grab the lock. We know that we
* failed set_lock_byte(&mp->mutex_lockw) once before coming here.
* If this fails, return EBUSY and let the caller deal with it.
* If this succeeds, return 0 with mutex_owner set to curthread.
*/
int
{
volatile sc_shared_t *scp;
return (EBUSY);
/*
* This spin loop is unfair to lwps that have already dropped into
* the kernel to sleep. They will starve on a highly-contended mutex.
* This is just too bad. The adaptive spin algorithm is intended
* to allow programs with highly-contended locks (that is, broken
* programs) to execute with reasonable speed despite their contention.
* Being fair would reduce the speed of such programs and well-written
* programs will not suffer in any case.
*/
return (0);
}
SMT_PAUSE();
/*
* Stop spinning if the mutex owner is not running on
* a processor; it will not drop the lock any time soon
* and we would just be wasting time to keep spinning.
*
* Note that we are looking at another thread (ulwp_t)
* without ensuring that the other thread does not exit.
* The scheme relies on ulwp_t structures never being
* deallocated by the library (the library employs a free
* list of ulwp_t structs that are reused when new threads
* are created) and on schedctl shared memory never being
* deallocated once created via __schedctl().
*
* Thus, the worst that can happen when the spinning thread
* looks at the owner's schedctl data is that it is looking
* at some other thread's schedctl data. This almost never
* happens and is benign when it does.
*/
break;
}
return (EBUSY);
}
/*
* Same as mutex_trylock_adaptive(), except specifically for queue locks.
* The owner field is not set here; the caller (spin_lock_set()) sets it.
*/
int
{
volatile sc_shared_t *scp;
if (count == 0)
return (EBUSY);
while (--count >= 0) {
return (0);
SMT_PAUSE();
break;
}
return (EBUSY);
}
/*
* Like mutex_trylock_adaptive(), but for process-shared mutexes.
* Spin for a while, trying to grab the lock. We know that we
* failed set_lock_byte(&mp->mutex_lockw) once before coming here.
* If this fails, return EBUSY and let the caller deal with it.
* If this succeeds, return 0 with mutex_owner set to curthread
* and mutex_ownerpid set to the current pid.
*/
int
{
int count;
if (count == 0)
return (EBUSY);
/*
* This is a process-shared mutex.
* We cannot know if the owner is running on a processor.
* We just spin and hope that it is on a processor.
*/
while (--count >= 0) {
if (*lockp == 0) {
if (set_lock_byte(lockp) == 0) {
0, 0);
return (0);
}
SMT_PAUSE();
continue;
}
/*
* The owner of the lock changed; start the count over again.
* This may be too aggressive; it needs testing.
*/
}
return (EBUSY);
}
/*
* Mutex wakeup code for releasing a USYNC_THREAD mutex.
* Returns the lwpid of the thread that was dequeued, if any.
* The caller of mutex_wakeup() must call __lwp_unpark(lwpid)
* to wake up the specified lwp.
*/
{
int more;
/*
* Dequeue a waiter from the sleep queue. Don't touch the mutex
* waiters bit if no one was found on the queue because the mutex
* might have been deallocated or reallocated for another purpose.
*/
}
return (lwpid);
}
/*
* Spin for a while, testing to see if the lock has been grabbed.
* If this fails, call mutex_wakeup() to release a waiter.
*/
{
int count;
/*
* We use the swap primitive to clear the lock, but we must
* atomically retain the waiters bit for the remainder of this
* code to work. We first check to see if the waiters bit is
* set and if so clear the lock by swapping in a word containing
* only the waiters bit. This could produce a false positive test
* for whether there are waiters that need to be waked up, but
* this just causes an extra call to mutex_wakeup() to do nothing.
* The opposite case is more delicate: If there are no waiters,
* we swap in a zero lock byte and a zero waiters bit. The result
* of the swap could indicate that there really was a waiter so in
* this case we go directly to mutex_wakeup() without performing
* any of the adaptive code because the waiter bit has been cleared
* and the adaptive code is unreliable in this case.
*/
mp->mutex_owner = 0;
return (0);
} else {
mp->mutex_owner = 0;
/*
* We spin here fewer times than mutex_trylock_adaptive().
* We are trying to balance two conflicting goals:
* 1. Avoid waking up anyone if a spinning thread
* grabs the lock.
* 2. Wake up a sleeping thread promptly to get on
* with useful work.
* We don't spin at all if there is no acquiring spinner;
* (mp->mutex_spinners is non-zero if there are spinners).
*/
/*
* There is a waiter that we will have to wake
* up unless someone else grabs the lock while
* we are busy spinning. Like the spin loop in
* mutex_trylock_adaptive(), this spin loop is
* unfair to lwps that have already dropped into
* the kernel to sleep. They will starve on a
* highly-contended mutex. Too bad.
*/
if (*lockp != 0) { /* somebody grabbed the lock */
return (0);
}
SMT_PAUSE();
}
/*
* No one grabbed the lock.
* Wake up some lwp that is waiting for it.
*/
mp->mutex_waiters = 0;
}
if (lwpid == 0)
return (lwpid);
}
/*
* Like mutex_unlock_queue(), but for process-shared mutexes.
* We tested the waiters field before calling here and it was non-zero.
*/
void
{
int count;
/*
* See the comments in mutex_unlock_queue(), above.
*/
mp->mutex_owner = 0;
mp->mutex_ownerpid = 0;
if (count == 0) {
/* clear lock, test waiter */
/* no waiters now */
return;
}
} else {
/* clear lock, retain waiter */
while (--count >= 0) {
if (*lockp != 0) {
/* somebody grabbed the lock */
return;
}
SMT_PAUSE();
}
/*
* We must clear the waiters field before going
* to the kernel, else it could remain set forever.
*/
mp->mutex_waiters = 0;
}
(void) ___lwp_mutex_wakeup(mp);
}
/*
* Return the real priority of a thread.
*/
int
{
}
void
stall(void)
{
for (;;)
}
/*
* Acquire a USYNC_THREAD mutex via user-level sleep queues.
* We failed set_lock_byte(&mp->mutex_lockw) before coming here.
* Returns with mutex_owner set correctly.
*/
int
{
int error = 0;
}
if (msp) {
begin_sleep = gethrtime();
}
/*
* Put ourself on the sleep queue, and while we are
* unable to grab the lock, go park in the kernel.
* Take ourself off the sleep queue after we acquire the lock.
*/
for (;;) {
break;
}
/*
* __lwp_park() will return the residual time in tsp
* if we are unparked before the timeout expires.
*/
error = 0;
set_parking_flag(self, 0);
/*
* We could have taken a signal or suspended ourself.
* If we did, then we removed ourself from the queue.
* Someone else may have removed us from the queue
* as a consequence of mutex_unlock(). We may have
* gotten a timeout from __lwp_park(). Or we may still
* be on the queue and this is just a spurious wakeup.
*/
if (error) {
error);
break;
}
0, 0);
break;
}
}
if (error) {
break;
}
}
if (msp)
return (error);
}
/*
* Returns with mutex_owner set correctly.
*/
int
{
int error = 0;
if (!self->ul_schedctl_called)
(void) setup_schedctl();
if (mtype & LOCK_RECURSIVE) {
} else {
mp->mutex_rcount++;
1, 0);
return (0);
}
return (EBUSY);
} else {
return (EDEADLK);
}
}
if (mtype &
int myprio;
if (mtype & PTHREAD_PRIO_PROTECT) {
EINVAL);
return (EINVAL);
}
error);
return (error);
}
}
if (mtype & PTHREAD_PRIO_INHERIT) {
/* go straight to the kernel */
else /* MUTEX_LOCK */
/*
* The kernel never sets or clears the lock byte
* for PTHREAD_PRIO_INHERIT mutexes.
* Set it here for debugging consistency.
*/
switch (error) {
case 0:
case EOWNERDEAD:
break;
}
} else if (mtype & USYNC_PROCESS_ROBUST) {
/* go straight to the kernel */
else /* MUTEX_LOCK */
} else { /* PTHREAD_PRIO_PROTECT */
/*
* Try once at user level before going to the kernel.
* If this is a process shared mutex then protect
* against forkall() while setting mp->mutex_ownerpid.
*/
mutex__acquire, mp, 0, 0);
} else {
}
} else {
mutex__acquire, mp, 0, 0);
} else {
}
}
}
if (error) {
if (mtype & PTHREAD_PRIO_INHERIT) {
switch (error) {
case EOWNERDEAD:
case ENOTRECOVERABLE:
if (mtype & PTHREAD_MUTEX_ROBUST_NP)
break;
if (error == EOWNERDEAD) {
/*
* We own the mutex; unlock it.
* It becomes ENOTRECOVERABLE.
* All waiters are waked up.
*/
mp->mutex_owner = 0;
mp->mutex_ownerpid = 0;
mutex__release, mp, 0);
(void) ___lwp_mutex_unlock(mp);
}
/* FALLTHROUGH */
case EDEADLK:
if (try == MUTEX_LOCK)
stall();
break;
}
}
if ((mtype & PTHREAD_PRIO_PROTECT) &&
error != EOWNERDEAD) {
(void) _ceil_mylist_del(mp);
}
}
} else if (mtype & USYNC_PROCESS) {
/*
* This is a process shared mutex. Protect against
* forkall() while setting mp->mutex_ownerpid.
*/
} else {
/* try a little harder */
}
} else { /* USYNC_THREAD */
/* try once */
} else {
/* try a little harder if we don't own the mutex */
}
}
switch (error) {
case EOWNERDEAD:
case ELOCKUNMAPPED:
/* FALLTHROUGH */
case 0:
if (msp)
break;
default:
if (msp)
}
}
break;
}
return (error);
}
int
{
/*
* We know that USYNC_PROCESS is set in mtype and that
* zero, one, or both of the flags LOCK_RECURSIVE and
* LOCK_ERRORCHECK are set, and that no other flags are set.
*/
return (0);
}
if (mtype & LOCK_RECURSIVE) {
return (EAGAIN);
mp->mutex_rcount++;
return (0);
}
if (try == MUTEX_LOCK) {
return (EDEADLK);
}
return (EBUSY);
}
/* try a little harder if we don't own the mutex */
return (0);
if (try == MUTEX_LOCK)
}
return (EBUSY);
}
static int
{
int error = 0;
return (error);
}
int
{
int mtype;
/*
* Optimize the case of USYNC_THREAD, including
* the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
* no error detection, no lock statistics,
* and the process has only a single thread.
* (Most likely a traditional single-threaded application.)
*/
/*
* Only one thread exists so we don't need an atomic operation.
*/
if (mp->mutex_lockw == 0) {
return (0);
}
/*
* LOCK_RECURSIVE, LOCK_ERRORCHECK, or both.
*/
if (mtype & LOCK_RECURSIVE) {
return (EAGAIN);
mp->mutex_rcount++;
1, 0);
return (0);
}
return (EDEADLK); /* LOCK_ERRORCHECK */
}
/*
* We have reached a deadlock, probably because the
* process is executing non-async-signal-safe code in
* a signal handler and is attempting to acquire a lock
* that it already owns. This is not surprising, given
* bad programming practices over the years that has
* resulted in applications calling printf() and such
* in their signal handlers. Unless the user has told
* us that the signal handlers are safe by setting:
* export _THREAD_ASYNC_SAFE=1
* we return EDEADLK rather than actually deadlocking.
*/
return (EDEADLK);
}
}
/*
* Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
* no error detection, and no lock statistics.
* Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
*/
(gflags->uf_trs_ted |
if (mtype & USYNC_PROCESS)
return (0);
}
if (mtype & LOCK_RECURSIVE) {
return (EAGAIN);
mp->mutex_rcount++;
1, 0);
return (0);
}
return (EDEADLK); /* LOCK_ERRORCHECK */
}
}
/* else do it the long way */
}
int
{
}
int
{
int error;
return (error);
}
int
{
int error;
return (error);
}
static int
{
mutex_trylock_adaptive(mp) != 0) {
}
return (EBUSY);
}
return (0);
}
int
{
int mtype;
/*
* Optimize the case of USYNC_THREAD, including
* the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
* no error detection, no lock statistics,
* and the process has only a single thread.
* (Most likely a traditional single-threaded application.)
*/
/*
* Only one thread exists so we don't need an atomic operation.
*/
if (mp->mutex_lockw == 0) {
return (0);
}
if (mtype & LOCK_RECURSIVE) {
return (EAGAIN);
mp->mutex_rcount++;
1, 0);
return (0);
}
return (EDEADLK); /* LOCK_ERRORCHECK */
}
return (EBUSY);
}
/*
* Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
* no error detection, and no lock statistics.
* Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
*/
(gflags->uf_trs_ted |
if (mtype & USYNC_PROCESS)
return (0);
}
if (mtype & LOCK_RECURSIVE) {
return (EAGAIN);
mp->mutex_rcount++;
1, 0);
return (0);
}
return (EBUSY); /* LOCK_ERRORCHECK */
}
}
/* else do it the long way */
}
int
{
int error;
return (EPERM);
mp->mutex_rcount--;
return (0);
}
(void) record_hold_time(msp);
if (mtype &
mp->mutex_owner = 0;
mp->mutex_ownerpid = 0;
if (mtype & PTHREAD_PRIO_INHERIT) {
} else if (mtype & USYNC_PROCESS_ROBUST) {
} else {
(void) ___lwp_mutex_wakeup(mp);
error = 0;
}
if (mtype & PTHREAD_PRIO_PROTECT) {
if (_ceil_mylist_del(mp))
}
} else if (mtype & USYNC_PROCESS) {
else {
mp->mutex_owner = 0;
mp->mutex_ownerpid = 0;
(void) ___lwp_mutex_wakeup(mp);
}
}
error = 0;
} else { /* USYNC_THREAD */
(void) __lwp_unpark(lwpid);
}
error = 0;
}
return (error);
}
int
{
int mtype;
short el;
/*
* Optimize the case of USYNC_THREAD, including
* the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
* no error detection, no lock statistics,
* and the process has only a single thread.
* (Most likely a traditional single-threaded application.)
*/
if (mtype) {
/*
* At this point we know that one or both of the
* flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
*/
return (EPERM);
mp->mutex_rcount--;
return (0);
}
}
/*
* Only one thread exists so we don't need an atomic operation.
* Also, there can be no waiters.
*/
mp->mutex_owner = 0;
mp->mutex_lockword = 0;
return (0);
}
/*
* Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
* no error detection, and no lock statistics.
* Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
*/
/* no waiter exists right now */
mp->mutex_owner = 0;
WAITERMASK) {
/* a waiter suddenly appeared */
(void) __lwp_unpark(lwpid);
}
(void) __lwp_unpark(lwpid);
}
return (0);
}
if (el) /* error detection or lock statistics */
goto slow_unlock;
/*
* At this point we know that one or both of the
* flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
*/
return (EPERM);
mp->mutex_rcount--;
return (0);
}
goto fast_unlock;
}
if ((mtype &
/*
* At this point we know that zero, one, or both of the
* flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and
* that the USYNC_PROCESS flag is set.
*/
return (EPERM);
mp->mutex_rcount--;
return (0);
}
else {
mp->mutex_owner = 0;
mp->mutex_ownerpid = 0;
WAITERMASK) {
(void) ___lwp_mutex_wakeup(mp);
}
}
return (0);
}
}
/* else do it the long way */
return (mutex_unlock_internal(mp));
}
/*
* go through these lmutex_ functions, to protect critical regions.
* We replicate a bit of code from __mutex_lock() and __mutex_unlock()
* to make these functions faster since we know that the mutex type
* of all internal locks is USYNC_THREAD. We also know that internal
* locking can never fail, so we panic if it does.
*/
void
{
/*
* Optimize the case of no lock statistics and only a single thread.
* (Most likely a traditional single-threaded application.)
*/
/*
* Only one thread exists; the mutex must be free.
*/
} else {
if (!self->ul_schedctl_called)
(void) setup_schedctl();
} else if (mutex_trylock_adaptive(mp) != 0) {
}
if (msp)
}
}
void
{
/*
* Optimize the case of no lock statistics and only a single thread.
* (Most likely a traditional single-threaded application.)
*/
/*
* Only one thread exists so there can be no waiters.
*/
mp->mutex_owner = 0;
mp->mutex_lockword = 0;
} else {
if (msp)
(void) record_hold_time(msp);
(void) __lwp_unpark(lwpid);
}
}
}
static int
{
/*
* There is an inherent data race in the current ownership design.
* The mutex_owner and mutex_ownerpid fields cannot be set or tested
* atomically as a pair. The original implementation tested each
* field just once. This was exposed to trivial false positives in
* the case of multiple multithreaded processes with thread addresses
* in common. To close the window to an acceptable level we now use a
* sequence of five tests: pid-thr-pid-thr-pid. This ensures that any
* single interruption will still leave one uninterrupted sequence of
* pid-thr-pid tests intact.
*
* It is assumed that all updates are always ordered thr-pid and that
* we have TSO hardware.
*/
return (0);
return (0);
return (0);
return (0);
return (0);
return (1);
}
/*
* Some crufty old programs define their own version of _mutex_held()
* to be simply return(1). This breaks internal libc logic, so we
* define a private version for exclusive use by libc, mutex_is_held(),
* and also a new public function, __mutex_held(), to be used in new
* code to circumvent these crufty old programs.
*/
int
{
return (shared_mutex_held(mp));
}
int
{
mp->mutex_magic = 0;
return (0);
}
/*
* Spin locks are separate from ordinary mutexes,
* but we use the same data structure for them.
*/
int
{
if (pshared == PTHREAD_PROCESS_SHARED)
else
return (0);
}
int
{
return (0);
}
int
{
int error = 0;
else {
}
return (error);
}
int
{
/*
* We don't care whether the owner is running on a processor.
* We just spin because that's what this interface requires.
*/
for (;;) {
if (*lockp == 0) { /* lock byte appears to be clear */
if (_pthread_spin_trylock(lock) == 0)
return (0);
}
SMT_PAUSE();
}
}
int
{
mp->mutex_owner = 0;
mp->mutex_ownerpid = 0;
return (0);
}
/* ARGSUSED2 */
int
{
return (EINVAL);
return (0);
}
/*
* cond_sleep_queue(): utility function for cond_wait_queue().
*
* Go to sleep on a condvar sleep queue, expect to be waked up
* by someone calling cond_signal() or cond_broadcast() or due
* to receiving a UNIX signal or being cancelled, or just simply
* due to a spurious wakeup (like someome calling forkall()).
*
* The associated mutex is *not* reacquired before returning.
* That must be done by the caller of cond_sleep_queue().
*/
int
{
int signalled;
int error;
/*
* Put ourself on the CV sleep queue, unlock the mutex, then
* park ourself and unpark a candidate lwp to grab the mutex.
* We must go onto the CV sleep queue before dropping the
* mutex in order to guarantee atomicity of the operation.
*/
self->ul_signalled = 0;
for (;;) {
if (lwpid != 0) {
}
/*
* We may have a deferred signal present,
* in which case we should return EINTR.
* Also, we may have received a SIGCANCEL; if so
* and we are cancelable we should return EINTR.
* We force an immediate EINTR return from
* __lwp_park() by turning our parking flag off.
*/
set_parking_flag(self, 0);
/*
* __lwp_park() will return the residual time in tsp
* if we are unparked before the timeout expires.
*/
set_parking_flag(self, 0);
lwpid = 0; /* unpark the other lwp only once */
/*
* We were waked up by cond_signal(), cond_broadcast(),
* by an interrupt or timeout (EINTR or ETIME),
* or we may just have gotten a spurious wakeup.
*/
break;
/*
* We are on either the condvar sleep queue or the
* mutex sleep queue. If we are on the mutex sleep
* queue, continue sleeping. If we are on the condvar
* sleep queue, break out of the sleep if we were
* interrupted or we timed out (EINTR or ETIME).
* Else this is a spurious wakeup; continue the loop.
*/
if (error) {
break;
}
/*
* Else a spurious wakeup on the condvar queue.
* __lwp_park() has already adjusted the timeout.
*/
} else {
thr_panic("cond_sleep_queue(): thread not on queue");
}
}
self->ul_signalled = 0;
/*
* If we were concurrently cond_signal()d and any of:
* received a UNIX signal, were cancelled, or got a timeout,
* then perform another cond_signal() to avoid consuming it.
*/
(void) cond_signal_internal(cvp);
return (error);
}
int
{
int error;
/*
* The old thread library was programmed to defer signals
* while in cond_wait() so that the associated mutex would
* be guaranteed to be held when the application signal
* handler was invoked.
*
* We do not behave this way by default; the state of the
* associated mutex in the signal handler is undefined.
*
* To accommodate applications that depend on the old
* behavior, the _THREAD_COND_WAIT_DEFER environment
* variable can be set to 1 and we will behave in the
* old way with respect to cond_wait().
*/
if (self->ul_cond_wait_defer)
/*
* Reacquire the mutex.
*/
} else if (mutex_trylock_adaptive(mp) != 0) {
}
if (msp)
/*
* Take any deferred signal now, after we have reacquired the mutex.
*/
if (self->ul_cond_wait_defer)
return (error);
}
/*
* cond_sleep_kernel(): utility function for cond_wait_kernel().
* See the comment ahead of cond_sleep_queue(), above.
*/
int
{
int error;
if (mtype & PTHREAD_PRIO_PROTECT) {
if (_ceil_mylist_del(mp))
}
mp->mutex_owner = 0;
mp->mutex_ownerpid = 0;
if (mtype & PTHREAD_PRIO_INHERIT)
/*
* ___lwp_cond_wait() returns immediately with EINTR if
* set_parking_flag(self,0) is called on this lwp before it
* goes to sleep in the kernel. sigacthandler() calls this
* when a deferred signal is noted. This assures that we don't
* get stuck in ___lwp_cond_wait() with all signals blocked
* due to taking a deferred signal before going to sleep.
*/
set_parking_flag(self, 0);
set_parking_flag(self, 0);
return (error);
}
int
{
int error;
int merror;
/*
* See the large comment in cond_wait_queue(), above.
*/
if (self->ul_cond_wait_defer)
/*
* Override the return code from ___lwp_cond_wait()
* with any non-zero return code from mutex_lock().
* This addresses robust lock failures in particular;
* the caller must see the EOWNERDEAD or ENOTRECOVERABLE
* errors in order to take corrective action.
*/
/*
* Take any deferred signal now, after we have reacquired the mutex.
*/
if (self->ul_cond_wait_defer)
return (error);
}
/*
* Common code for _cond_wait() and _cond_timedwait()
*/
int
{
hrtime_t begin_sleep = 0;
int error = 0;
/*
* The SUSV3 Posix spec for pthread_cond_timedwait() states:
* Except in the case of [ETIMEDOUT], all these error checks
* shall act as if they were performed immediately at the
* beginning of processing for the function and shall cause
* an error return, in effect, prior to modifying the state
* of the mutex specified by mutex or the condition variable
* specified by cond.
* Therefore, we must return EINVAL now if the timout is invalid.
*/
return (EINVAL);
}
if (csp) {
if (tsp)
else
}
if (msp)
else if (csp)
begin_sleep = gethrtime();
if (self->ul_error_detection) {
if (!mutex_is_held(mp))
"condvar process-shared, "
"mutex process-private");
} else {
"condvar process-private, "
"mutex process-shared");
}
}
/*
* We deal with recursive mutexes by completely
* dropping the lock and restoring the recursion
* count after waking up. This is arguably wrong,
* but it obeys the principle of least astonishment.
*/
mp->mutex_rcount = 0;
else
if (csp) {
else {
}
}
return (error);
}
/*
* cond_wait() is a cancellation point but _cond_wait() is not.
* System libraries call the non-cancellation version.
* It is expected that only applications call the cancellation version.
*/
int
{
/*
* Optimize the common case of USYNC_THREAD plus
* no error detection, no lock statistics, and no event tracing.
*/
/*
* Else do it the long way.
*/
}
int
{
int error;
_cancelon();
_canceloff();
else
return (error);
}
int
{
int error;
}
/*
* cond_timedwait() is a cancellation point but _cond_timedwait() is not.
* System libraries call the non-cancellation version.
* It is expected that only applications call the cancellation version.
*/
int
{
int error;
/*
* Don't return ETIME if we didn't really get a timeout.
* This can happen if we return because someone resets
* the system clock. Just return zero in this case,
* giving a spurious wakeup but not a timeout.
*/
error = 0;
}
return (error);
}
int
{
int error;
_cancelon();
_canceloff();
else
return (error);
}
int
{
int error;
error = 0;
return (error);
}
/*
* cond_reltimedwait() is a cancellation point but _cond_reltimedwait()
* is not. System libraries call the non-cancellation version.
* It is expected that only applications call the cancellation version.
*/
int
{
}
int
{
int error;
_cancelon();
_canceloff();
else
return (error);
}
int
const timespec_t *reltime)
{
int error;
error = 0;
return (error);
}
int
{
int error = 0;
if (csp)
return (error);
/*
* Move someone from the condvar sleep queue to the mutex sleep
* queue for the mutex that he will acquire on being waked up.
* We can do this only if we own the mutex he will acquire.
* If we do not own the mutex, or if his ul_cv_wake flag
* is set, just dequeue and unpark him.
*/
break;
/*
* Try not to dequeue a suspended thread.
* This mimics the old libthread's behavior.
*/
}
}
}
}
cvp->cond_waiters_user = 0;
return (error);
}
/*
* Scan the remainder of the CV queue for another waiter.
*/
} else {
break;
}
cvp->cond_waiters_user = 0;
/*
* Inform the thread that he was the recipient of a cond_signal().
* This lets him deal with cond_signal() and, concurrently,
* one or more of a cancellation, a UNIX signal, or a timeout.
* These latter conditions must not consume a cond_signal().
*/
/*
* Dequeue the waiter but leave his ul_sleepq non-NULL
* while we move him to the mutex queue so that he can
* deal properly with spurious wakeups.
*/
ulwp->ul_cv_wake = 0;
(void) __lwp_unpark(lwpid);
} else {
}
return (error);
}
int
{
int error = 0;
int nlwpid = 0;
if (csp)
return (error);
/*
* Move everyone from the condvar sleep queue to the mutex sleep
* queue for the mutex that they will acquire on being waked up.
* We can do this only if we own the mutex they will acquire.
* If we do not own the mutex, or if their ul_cv_wake flag
* is set, just dequeue and unpark them.
*
* We keep track of lwpids that are to be unparked in lwpid[].
* __lwp_unpark_all() is called to unpark all of them after
* they have been removed from the sleep queue and the sleep
* queue lock has been dropped. If we run out of space in our
* on-stack buffer, we need to allocate more but we can't call
* lmalloc() because we are holding a queue lock when the overflow
* occurs and lmalloc() acquires a lock. We can't use alloca()
* either because the application may have allocated a small stack
* and we don't want to overrun the stack. So we use the mmap()
* system call directly since that path acquires no locks.
*/
cvp->cond_waiters_user = 0;
continue;
}
ulwp->ul_cv_wake = 0;
/*
* Allocate NEWLWPS ids on the first overflow.
* Double the allocation each time after that.
*/
2 * maxlwps;
if (vaddr == MAP_FAILED) {
/*
* Let's hope this never happens.
* If it does, then we have a terrible
* thundering herd on our hands.
*/
nlwpid = 0;
} else {
(void) _private_munmap(lwpid,
}
}
} else {
}
}
}
if (nlwpid) {
if (nlwpid == 1)
(void) __lwp_unpark(lwpid[0]);
else
}
return (error);
}
int
{
cvp->cond_magic = 0;
return (0);
}
#if defined(THREAD_DEBUG)
void
{
}
#endif
/* protected by link_lock */
/*
* Record spin lock statistics.
* Called by a thread exiting itself in thrp_exit().
* Also called via atexit() from the thread calling
* exit() to do all the other threads as well.
*/
void
{
ulwp->ul_spin_lock_spin = 0;
ulwp->ul_spin_lock_spin2 = 0;
ulwp->ul_spin_lock_sleep = 0;
ulwp->ul_spin_lock_wakeup = 0;
}
/*
* atexit function: dump the queue statistics to stderr.
*/
#if !defined(__lint)
#endif
#include <stdio.h>
void
dump_queue_statistics(void)
{
int qn;
uint64_t spin_lock_total = 0;
return;
return;
if (qp->qh_lockcount == 0)
continue;
return;
}
return;
if (qp->qh_lockcount == 0)
continue;
return;
}
}