/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include "lint.h"
#include "thr_uberdata.h"
#define READ_LOCK 0
/*
* We must be deferring signals for this to be safe.
* Else if we are returning an entry with ul_rdlockcnt == 0,
* it could be reassigned behind our back in a signal handler.
*/
static readlock_t *
{
/* we must be deferring signals */
else {
nlocks = 1;
}
return (readlockp);
}
if (remembered != NULL) {
return (remembered);
}
/*
* No entry available. Allocate more space, converting the single
* readlock_t entry into an array of readlock_t entries if necessary.
*/
/*
* Initial allocation of the readlock_t array.
* Convert the single entry into an array.
*/
/*
* The single readlock_t becomes the first entry in the array.
*/
/*
* Return the next available entry in the array.
*/
return (readlockp);
}
/*
* Reallocate the array, double the size each time.
*/
nlocks * sizeof (readlock_t));
/*
* Return the next available entry in the newly allocated array.
*/
return (readlockp);
}
/*
* Free the array of rwlocks held for reading.
*/
void
{
ulwp->ul_rdlockcnt = 0;
}
/*
* Check if a reader version of the lock is held by the current thread.
*/
int
{
int rval = 0;
if (!(readers & URW_WRITE_LOCKED) &&
(readers & URW_READERS_MASK) != 0) {
/*
* The lock is held for reading by some thread.
* Search our array of rwlocks held for reading for a match.
*/
else {
nlocks = 1;
}
rval = 1;
break;
}
}
}
return (rval);
}
/*
* Check if a writer version of the lock is held by the current thread.
*/
int
{
int rval;
return (rval);
}
/* ARGSUSED2 */
int
{
return (EINVAL);
/*
* Once reinitialized, we can no longer be holding a read or write lock.
* We can do nothing about other threads that are holding read locks.
*/
/*
* This should be at the beginning of the function,
* but for the sake of old broken applications that
* do not have proper alignment for their rwlocks
* (and don't check the return code from rwlock_init),
* we put it here, after initializing the rwlock regardless.
*/
self->ul_misaligned == 0)
return (EINVAL);
return (0);
}
int
{
/*
* Once destroyed, we can no longer be holding a read or write lock.
* We can do nothing about other threads that are holding read locks.
*/
rwlp->rwlock_magic = 0;
return (0);
}
/*
* The following four functions:
* read_lock_try()
* read_unlock_try()
* write_lock_try()
* write_unlock_try()
* lie at the heart of the fast-path code for rwlocks,
* both process-private and process-shared.
*
* They are called once without recourse to any other locking primitives.
* If they succeed, we are done and the fast-path code was successful.
* If they fail, we have to deal with lock queues, either to enqueue
* ourself and sleep or to dequeue and wake up someone else (slow paths).
*
* Unless 'ignore_waiters_flag' is true (a condition that applies only
* when read_lock_try() or write_lock_try() is called from code that
* is already in the slow path and has already acquired the queue lock),
* these functions will always fail if the waiters flag, URW_HAS_WAITERS,
* is set in the 'rwstate' word. Thus, setting the waiters flag on the
* rwlock and acquiring the queue lock guarantees exclusive access to
* the rwlock (and is the only way to guarantee exclusive access).
*/
/*
* Attempt to acquire a readers lock. Return true on success.
*/
static int
{
return (1);
}
}
return (0);
}
/*
* Attempt to release a reader lock. Return true on success.
*/
static int
{
return (1);
}
}
return (0);
}
/*
* Attempt to acquire a writer lock. Return true on success.
*/
static int
{
== readers) {
return (1);
}
}
return (0);
}
/*
* Attempt to release a writer lock. Return true on success.
*/
static int
{
return (1);
}
}
return (0);
}
/*
* Release a process-private rwlock and wake up any thread(s) sleeping on it.
* This is called when a thread releases a lock that appears to have waiters.
*/
static void
{
int nlwpid = 0;
int more;
/*
* Here is where we actually drop the lock,
* but we retain the URW_HAS_WAITERS flag, if it is already set.
*/
else /* drop the readers lock */
return;
}
/*
* The presence of the URW_HAS_WAITERS flag causes all rwlock
* code to go through the slow path, acquiring queue_lock(qp).
* Therefore, the rest of this code is safe because we are
* holding the queue lock and the URW_HAS_WAITERS flag is set.
*/
writer = 0; /* no current writer */
/*
* Examine the queue of waiters in priority order and prepare
* to wake up as many readers as we encounter before encountering
* a writer. If the highest priority thread on the queue is a
* writer, stop there and wake it up.
*
* We keep track of lwpids that are to be unparked in lwpid[].
* __lwp_unpark_all() is called to unpark all of them after
* they have been removed from the sleep queue and the sleep
* queue lock has been dropped. If we run out of space in our
* on-stack buffer, we need to allocate more but we can't call
* lmalloc() because we are holding a queue lock when the overflow
* occurs and lmalloc() acquires a lock. We can't use alloca()
* either because the application may have allocated a small
* stack and we don't want to overrun the stack. So we call
* alloc_lwpids() to allocate a bigger buffer using the mmap()
* system call directly since that path acquires no locks.
*/
break;
/* one writer to wake */
writer++;
} else {
if (writer != 0)
break;
/* at least one reader to wake */
readers++;
}
if (writer) {
/*
* Hand off the lock to the writer we will be waking.
*/
}
}
/*
* This modification of rwstate must be done last.
* The presence of the URW_HAS_WAITERS flag causes all rwlock
* code to go through the slow path, acquiring queue_lock(qp).
* Otherwise the read_lock_try() and write_lock_try() fast paths
* are effective.
*/
if (nlwpid == 0) {
} else {
if (nlwpid == 1)
(void) __lwp_unpark(lwpid[0]);
else
}
}
/*
* Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock,
* and trywrlock for process-shared (USYNC_PROCESS) rwlocks.
*
* Note: if the lock appears to be contended we call __lwp_rwlock_rdlock()
* or __lwp_rwlock_wrlock() holding the mutex. These return with the mutex
* released, and if they need to sleep will release the mutex first. In the
* event of a spurious wakeup, these will return EAGAIN (because it is much
* easier for us to re-acquire the mutex here).
*/
int
{
int try_flag;
int error;
if (!try_flag) {
}
do {
break;
}
break;
if (read_lock_try(rwlp, 0)) {
(void) mutex_unlock(mp);
break;
}
} else {
if (write_lock_try(rwlp, 0)) {
(void) mutex_unlock(mp);
break;
}
}
/*
* The calls to __lwp_rwlock_*() below will release the mutex,
* so we need a dtrace probe here. The owner field of the
* mutex is cleared in the kernel when the mutex is released,
* so we should not clear it here.
*/
/*
* The waiters bit may be inaccurate.
* Only the kernel knows for sure.
*/
if (try_flag)
else
} else {
if (try_flag)
else
}
if (!try_flag) {
}
return (error);
}
/*
* Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock,
* and trywrlock for process-private (USYNC_THREAD) rwlocks.
*/
int
{
int try_flag;
int ignore_waiters_flag;
int error = 0;
if (!try_flag) {
}
/* initial attempt to acquire the lock fails if there are waiters */
ignore_waiters_flag = 0;
while (error == 0) {
break;
} else {
break;
}
/* subsequent attempts do not fail due to waiters */
ignore_waiters_flag = 1;
if ((readers & URW_WRITE_LOCKED) ||
(rd_wr == WRITE_LOCK &&
(readers & URW_READERS_MASK) != 0))
/* EMPTY */; /* somebody holds the lock */
ignore_waiters_flag = 0;
continue; /* no queued waiters, start over */
} else {
/*
* Do a priority check on the queued waiter (the
* highest priority thread on the queue) to see
* if we should defer to him or just grab the lock.
*/
if (rd_wr == WRITE_LOCK) {
/*
* We defer to a queued thread that has
* a higher priority than ours.
*/
/*
* Don't defer, just grab the lock.
*/
continue;
}
} else {
/*
* We defer to a queued thread that has
* a higher priority than ours or that
* is a writer whose priority equals ours.
*/
/*
* Don't defer, just grab the lock.
*/
continue;
}
}
}
/*
* We are about to block.
* If we're doing a trylock, return EBUSY instead.
*/
if (try_flag) {
break;
}
/*
* Enqueue writers ahead of readers.
*/
error = 0;
set_parking_flag(self, 0);
ignore_waiters_flag = 0;
}
if (rd_wr == WRITE_LOCK &&
(*rwstate & URW_WRITE_LOCKED) &&
/*
* We acquired the lock by hand-off
* from the previous owner,
*/
error = 0; /* timedlock did not fail */
break;
}
}
/*
* Make one final check to see if there are any threads left
* on the rwlock queue. Clear the URW_HAS_WAITERS flag if not.
*/
if (!try_flag) {
}
return (error);
}
int
{
int error;
/*
* If we already hold a readers lock on this rwlock,
* just increment our reference count and return.
*/
goto out;
}
error = 0;
goto out;
}
/*
* If we hold the writer lock, bail out.
*/
if (rw_write_held(rwlp)) {
if (self->ul_error_detection)
"calling thread owns the writer lock");
goto out;
}
if (read_lock_try(rwlp, 0))
error = 0;
else /* user-level */
out:
if (error == 0) {
if (rwsp)
} else {
}
return (error);
}
int
{
}
void
{
}
int
{
int error;
return (error);
}
int
{
int error;
return (error);
}
int
{
int error;
/*
* If we hold a readers lock on this rwlock, bail out.
*/
if (rw_read_held(rwlp)) {
if (self->ul_error_detection)
"calling thread owns the readers lock");
goto out;
}
/*
* If we hold the writer lock, bail out.
*/
if (rw_write_held(rwlp)) {
if (self->ul_error_detection)
"calling thread owns the writer lock");
goto out;
}
if (write_lock_try(rwlp, 0))
error = 0;
else /* user-level */
out:
if (error == 0) {
if (rwsp) {
}
} else {
}
return (error);
}
int
{
}
void
{
}
int
{
int error;
return (error);
}
int
{
int error;
return (error);
}
int
{
int error;
if (rwsp)
/*
* If we already hold a readers lock on this rwlock,
* just increment our reference count and return.
*/
goto out;
}
error = 0;
goto out;
}
if (read_lock_try(rwlp, 0))
error = 0;
else /* user-level */
out:
if (error == 0) {
} else {
if (rwsp)
error);
}
}
return (error);
}
int
{
int error;
if (rwsp)
if (write_lock_try(rwlp, 0))
error = 0;
else /* user-level */
if (error == 0) {
if (rwsp)
} else {
if (rwsp)
error);
}
}
return (error);
}
int
{
int rd_wr;
if (readers & URW_WRITE_LOCKED) {
rd_wr = WRITE_LOCK;
readers = 0;
} else {
}
if (rd_wr == WRITE_LOCK) {
/*
* Since the writer lock is held, we'd better be
* holding it, else we cannot legitimately be here.
*/
if (!rw_write_held(rwlp)) {
if (self->ul_error_detection)
"writer lock held, "
"but not by the calling thread");
return (EPERM);
}
if (rwsp->rw_wrlock_begin_hold)
rwsp->rw_wrlock_begin_hold = 0;
}
rwlp->rwlock_owner = 0;
rwlp->rwlock_ownerpid = 0;
} else if (readers > 0) {
/*
* A readers lock is held; if we don't hold one, bail out.
*/
if (self->ul_error_detection)
"readers lock held, "
"but not by the calling thread");
return (EPERM);
}
/*
* If we hold more than one readers lock on this rwlock,
* just decrement our reference count and return.
*/
goto out;
}
} else {
/*
* This is a usage error.
* No thread should release an unowned lock.
*/
if (self->ul_error_detection)
return (EPERM);
}
/* EMPTY */;
/* EMPTY */;
(void) __lwp_rwlock_unlock(rwlp);
} else {
}
out:
return (0);
}
void
{
}