Lines Matching refs:mp

128 mutex_init(mutex_t *mp, int type, void *arg)
168 if (!(mp->mutex_flag & LOCK_INITED)) {
169 mp->mutex_type = (uint8_t)type;
170 atomic_or_16(&mp->mutex_flag, LOCK_INITED);
171 mp->mutex_magic = MUTEX_MAGIC;
172 } else if (type != mp->mutex_type ||
173 ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) {
175 } else if (mutex_consistent(mp) != 0) {
180 register_lock(mp);
182 (void) memset(mp, 0, sizeof (*mp));
183 mp->mutex_type = (uint8_t)type;
184 mp->mutex_flag = LOCK_INITED;
185 mp->mutex_magic = MUTEX_MAGIC;
189 mp->mutex_ceiling = ceil;
200 ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
208 * Delete mp from list of ceiling mutexes owned by curthread.
212 _ceil_mylist_del(mutex_t *mp)
221 if (mcp->mxchain_mx == mp) {
231 * Add mp to the list of ceiling mutexes owned by curthread.
235 _ceil_mylist_add(mutex_t *mp)
242 mcp->mxchain_mx = mp;
414 spin_lock_set(mutex_t *mp)
419 if (set_lock_byte(&mp->mutex_lockw) == 0) {
420 mp->mutex_owner = (uintptr_t)self;
427 if (mutex_queuelock_adaptive(mp) == 0 ||
428 set_lock_byte(&mp->mutex_lockw) == 0) {
429 mp->mutex_owner = (uintptr_t)self;
437 if (mutex_queuelock_adaptive(mp) == 0 ||
438 set_lock_byte(&mp->mutex_lockw) == 0) {
439 mp->mutex_owner = (uintptr_t)self;
447 (void) ___lwp_mutex_timedlock(mp, NULL, self);
451 spin_lock_clear(mutex_t *mp)
455 mp->mutex_owner = 0;
456 if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) {
457 (void) ___lwp_mutex_wakeup(mp, 0);
982 mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp)
986 int mtype = mp->mutex_type;
992 self->ul_wchan = mp;
995 self->ul_td_evbuf.eventdata = mp;
1003 DTRACE_PROBE1(plockstat, mutex__block, mp);
1010 if ((error = ___lwp_mutex_timedlock(mp, tsp, self)) != 0 &&
1022 if (mp->mutex_ownerpid == udp->pid) {
1040 ASSERT(mp->mutex_owner == (uintptr_t)self);
1041 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
1042 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
1044 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
1045 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1056 mutex_trylock_kernel(mutex_t *mp)
1060 int mtype = mp->mutex_type;
1069 if ((error = ___lwp_mutex_trylock(mp, self)) != 0 &&
1081 if (mp->mutex_ownerpid == udp->pid) {
1094 ASSERT(mp->mutex_owner == (uintptr_t)self);
1095 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
1097 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1246 mutex_trylock_adaptive(mutex_t *mp, int tryhard)
1252 volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw;
1253 volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner;
1259 ASSERT(!(mp->mutex_type & USYNC_PROCESS));
1261 if (MUTEX_OWNED(mp, self))
1267 if (mp->mutex_flag & LOCK_NOTRECOVERABLE) {
1268 ASSERT(mp->mutex_type & LOCK_ROBUST);
1301 if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1)
1303 DTRACE_PROBE1(plockstat, mutex__spin, mp);
1336 new_lockword = spinners_decr(&mp->mutex_lockword);
1361 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
1362 ASSERT(mp->mutex_type & LOCK_ROBUST);
1368 (void) clear_lockbyte(&mp->mutex_lockword);
1376 DTRACE_PROBE3(plockstat, mutex__spun, mp, 0, count);
1379 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1383 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count);
1385 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
1386 if (mp->mutex_flag & LOCK_OWNERDEAD) {
1387 ASSERT(mp->mutex_type & LOCK_ROBUST);
1400 mutex_queuelock_adaptive(mutex_t *mp)
1408 ASSERT(mp->mutex_type == USYNC_THREAD);
1413 lockp = (volatile uint8_t *)&mp->mutex_lockw;
1414 ownerp = (volatile uint64_t *)&mp->mutex_owner;
1436 mutex_trylock_process(mutex_t *mp, int tryhard)
1441 volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64;
1450 (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
1451 self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST));
1454 ASSERT(mp->mutex_type & USYNC_PROCESS);
1456 if (shared_mutex_held(mp))
1462 if (mp->mutex_flag & LOCK_NOTRECOVERABLE) {
1463 ASSERT(mp->mutex_type & LOCK_ROBUST);
1475 if (set_lock_byte(&mp->mutex_lockw) == 0) {
1476 mp->mutex_ownerpid = udp->pid;
1477 mp->mutex_owner = (uintptr_t)self;
1484 mp->mutex_owner = (uintptr_t)self;
1485 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1504 if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1)
1506 DTRACE_PROBE1(plockstat, mutex__spin, mp);
1512 set_lock_byte(&mp->mutex_lockw) == 0) {
1513 mp->mutex_ownerpid = udp->pid;
1514 mp->mutex_owner = (uintptr_t)self;
1522 mp->mutex_owner = (uintptr_t)self;
1523 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1531 new_lockword = spinners_decr(&mp->mutex_lockword);
1551 if (set_lock_byte(&mp->mutex_lockw) == 0) {
1552 mp->mutex_ownerpid = udp->pid;
1553 mp->mutex_owner = (uintptr_t)self;
1559 mp->mutex_owner = (uintptr_t)self;
1560 /* mp->mutex_ownerpid was set by set_lock_byte64() */
1567 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
1568 ASSERT(mp->mutex_type & LOCK_ROBUST);
1573 mp->mutex_owner = 0;
1574 /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
1575 (void) clear_lockbyte64(&mp->mutex_lockword64);
1583 DTRACE_PROBE3(plockstat, mutex__spun, mp, 0, count);
1586 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1590 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count);
1592 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
1593 if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) {
1594 ASSERT(mp->mutex_type & LOCK_ROBUST);
1595 if (mp->mutex_flag & LOCK_OWNERDEAD)
1597 else if (mp->mutex_type & USYNC_PROCESS_ROBUST)
1614 mutex_wakeup(mutex_t *mp)
1626 qp = queue_lock(mp, MX);
1629 mp->mutex_waiters = more;
1639 mutex_wakeup_all(mutex_t *mp)
1665 qp = queue_lock(mp, MX);
1670 ASSERT(ulwp->ul_wchan == mp);
1682 mp->mutex_waiters = 0;
1705 mutex_unlock_queue(mutex_t *mp, int release_all)
1711 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
1713 mp->mutex_owner = 0;
1714 old_lockword = clear_lockbyte(&mp->mutex_lockword);
1719 mutex_wakeup_all(mp);
1721 lwpid = mutex_wakeup(mp);
1733 mutex_unlock_process(mutex_t *mp, int release_all)
1738 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
1740 mp->mutex_owner = 0;
1743 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
1744 self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)) {
1746 mp->mutex_ownerpid = 0;
1747 old_lockword = clear_lockbyte(&mp->mutex_lockword);
1751 (void) ___lwp_mutex_wakeup(mp, release_all);
1758 /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
1759 old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64);
1763 (void) ___lwp_mutex_wakeup(mp, release_all);
1778 * We failed set_lock_byte(&mp->mutex_lockw) before coming here.
1782 mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp,
1792 self->ul_wchan = mp;
1794 self->ul_td_evbuf.eventdata = mp;
1802 DTRACE_PROBE1(plockstat, mutex__block, mp);
1810 qp = queue_lock(mp, MX);
1812 mp->mutex_waiters = 1;
1814 if (set_lock_byte(&mp->mutex_lockw) == 0) {
1815 mp->mutex_owner = (uintptr_t)self;
1816 mp->mutex_waiters = dequeue_self(qp);
1835 qp = queue_lock(mp, MX);
1838 mp->mutex_waiters = queue_waiter(qp)? 1 : 0;
1843 if (set_lock_byte(&mp->mutex_lockw) == 0) {
1844 mp->mutex_owner = (uintptr_t)self;
1848 mp->mutex_waiters = 1;
1852 self->ul_wchan == mp);
1855 mp->mutex_waiters = dequeue_self(qp);
1867 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
1868 ASSERT(mp->mutex_type & LOCK_ROBUST);
1873 mp->mutex_owner = 0;
1874 (void) clear_lockbyte(&mp->mutex_lockword);
1884 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
1885 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
1887 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
1888 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
1889 if (mp->mutex_flag & LOCK_OWNERDEAD) {
1890 ASSERT(mp->mutex_type & LOCK_ROBUST);
1899 mutex_recursion(mutex_t *mp, int mtype, int try)
1901 ASSERT(mutex_held(mp));
1906 if (mp->mutex_rcount == RECURSION_MAX) {
1907 DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN);
1910 mp->mutex_rcount++;
1911 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0);
1915 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
1928 register_lock(mutex_t *mp)
1931 uint_t hash = LOCK_HASH(mp);
1954 if (rlp->robust_lock == mp) /* already registered */
1968 if (rlp->robust_lock == mp) { /* already registered */
1986 rlp->robust_lock = mp;
1994 rlp->robust_lock = mp;
2004 (void) ___lwp_mutex_register(mp, &rlp->robust_lock);
2044 mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try)
2048 int mtype = mp->mutex_type;
2049 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
2064 if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_held(mp))
2065 return (mutex_recursion(mp, mtype, try));
2068 tsp == NULL && mutex_held(mp))
2069 lock_error(mp, "mutex_lock", NULL, NULL);
2074 DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM);
2077 ceil = mp->mutex_ceiling;
2080 DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL);
2083 if ((error = _ceil_mylist_add(mp)) != 0) {
2084 DTRACE_PROBE2(plockstat, mutex__error, mp, error);
2093 register_lock(mp);
2098 error = mutex_trylock_kernel(mp);
2100 error = mutex_lock_kernel(mp, tsp, msp);
2109 mp->mutex_lockw = LOCKSET;
2114 mp->mutex_lockw = LOCKSET;
2138 error = mutex_trylock_process(mp, try == MUTEX_LOCK);
2140 error = mutex_lock_kernel(mp, tsp, msp);
2142 error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK);
2144 error = mutex_lock_queue(self, msp, mp, tsp);
2152 remember_lock(mp);
2158 (void) _ceil_mylist_del(mp);
2177 fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try)
2191 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
2193 if (set_lock_byte(&mp->mutex_lockw) == 0) {
2194 mp->mutex_ownerpid = udp->pid;
2195 mp->mutex_owner = (uintptr_t)self;
2197 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2202 if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) {
2203 mp->mutex_owner = (uintptr_t)self;
2204 /* mp->mutex_ownerpid was set by set_lock_byte64() */
2206 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2211 if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp))
2212 return (mutex_recursion(mp, mtype, try));
2215 if (mutex_trylock_process(mp, 1) == 0)
2217 return (mutex_lock_kernel(mp, tsp, NULL));
2228 mutex_lock_impl(mutex_t *mp, timespec_t *tsp)
2231 int mtype = mp->mutex_type;
2234 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
2236 lock_error(mp, "mutex_lock", NULL, "mutex is misaligned");
2251 if (mp->mutex_lockw == 0) {
2253 mp->mutex_lockw = LOCKSET;
2254 mp->mutex_owner = (uintptr_t)self;
2256 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2259 if (mtype && MUTEX_OWNER(mp) == self)
2260 return (mutex_recursion(mp, mtype, MUTEX_LOCK));
2274 MUTEX_OWNER(mp) == self && !self->ul_async_safe) {
2275 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
2289 return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK));
2291 if (set_lock_byte(&mp->mutex_lockw) == 0) {
2292 mp->mutex_owner = (uintptr_t)self;
2294 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2298 if (mtype && MUTEX_OWNER(mp) == self)
2299 return (mutex_recursion(mp, mtype, MUTEX_LOCK));
2300 if (mutex_trylock_adaptive(mp, 1) != 0)
2301 return (mutex_lock_queue(self, NULL, mp, tsp));
2306 return (mutex_lock_internal(mp, tsp, MUTEX_LOCK));
2312 mutex_lock(mutex_t *mp)
2315 return (mutex_lock_impl(mp, NULL));
2319 mutex_enter(mutex_t *mp)
2322 int attr = mp->mutex_type & ALL_ATTRIBUTES;
2329 mutex_panic(mp, "mutex_enter: bad mutex type");
2331 ret = mutex_lock(mp);
2333 mutex_panic(mp, "recursive mutex_enter");
2335 mutex_panic(mp, "excessive recursive mutex_enter");
2337 mutex_panic(mp, "unknown mutex_enter failure");
2342 pthread_mutex_timedlock(pthread_mutex_t *_RESTRICT_KYWD mp,
2350 error = mutex_lock_impl((mutex_t *)mp, &tslocal);
2357 pthread_mutex_reltimedlock_np(pthread_mutex_t *_RESTRICT_KYWD mp,
2365 error = mutex_lock_impl((mutex_t *)mp, &tslocal);
2373 mutex_trylock(mutex_t *mp)
2377 int mtype = mp->mutex_type;
2395 if (mp->mutex_lockw == 0) {
2397 mp->mutex_lockw = LOCKSET;
2398 mp->mutex_owner = (uintptr_t)self;
2400 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2403 if (mtype && MUTEX_OWNER(mp) == self)
2404 return (mutex_recursion(mp, mtype, MUTEX_TRY));
2417 return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY));
2419 if (set_lock_byte(&mp->mutex_lockw) == 0) {
2420 mp->mutex_owner = (uintptr_t)self;
2422 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2426 if (mtype && MUTEX_OWNER(mp) == self)
2427 return (mutex_recursion(mp, mtype, MUTEX_TRY));
2436 return (mutex_lock_internal(mp, NULL, MUTEX_TRY));
2440 mutex_unlock_internal(mutex_t *mp, int retain_robust_flags)
2444 int mtype = mp->mutex_type;
2451 !mutex_held(mp))
2454 if (self->ul_error_detection && !mutex_held(mp))
2455 lock_error(mp, "mutex_unlock", NULL, NULL);
2457 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
2458 mp->mutex_rcount--;
2459 DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
2463 if ((msp = MUTEX_STATS(mp, udp)) != NULL)
2467 (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) {
2469 mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED);
2470 mp->mutex_flag |= LOCK_NOTRECOVERABLE;
2472 release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0);
2476 mp->mutex_owner = 0;
2477 /* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */
2478 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2479 mp->mutex_lockw = LOCKCLEAR;
2481 error = ___lwp_mutex_unlock(mp);
2484 mutex_unlock_process(mp, release_all);
2486 if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) {
2493 forget_lock(mp);
2495 if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp))
2504 mutex_unlock(mutex_t *mp)
2507 int mtype = mp->mutex_type;
2526 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
2528 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
2529 mp->mutex_rcount--;
2530 DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
2539 mp->mutex_owner = 0;
2540 mp->mutex_lockword = 0;
2542 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2554 if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) {
2567 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
2569 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
2570 mp->mutex_rcount--;
2571 DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
2583 if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp))
2585 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
2586 mp->mutex_rcount--;
2587 DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
2590 mutex_unlock_process(mp, 0);
2597 return (mutex_unlock_internal(mp, 0));
2601 mutex_exit(mutex_t *mp)
2604 int attr = mp->mutex_type & ALL_ATTRIBUTES;
2608 mutex_panic(mp, "mutex_exit: bad mutex type");
2610 ret = mutex_unlock(mp);
2612 mutex_panic(mp, "mutex_exit: not owner");
2614 mutex_panic(mp, "unknown mutex_exit failure");
2628 lmutex_lock(mutex_t *mp)
2633 ASSERT(mp->mutex_type == USYNC_THREAD);
2644 ASSERT(mp->mutex_lockw == 0);
2645 mp->mutex_lockw = LOCKSET;
2646 mp->mutex_owner = (uintptr_t)self;
2647 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2649 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
2654 if (set_lock_byte(&mp->mutex_lockw) == 0) {
2655 mp->mutex_owner = (uintptr_t)self;
2656 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2657 } else if (mutex_trylock_adaptive(mp, 1) != 0) {
2658 (void) mutex_lock_queue(self, msp, mp, NULL);
2667 lmutex_unlock(mutex_t *mp)
2672 ASSERT(mp->mutex_type == USYNC_THREAD);
2682 mp->mutex_owner = 0;
2683 mp->mutex_lockword = 0;
2684 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2686 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
2691 if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) {
2706 sig_mutex_lock(mutex_t *mp)
2711 (void) mutex_lock(mp);
2715 sig_mutex_unlock(mutex_t *mp)
2719 (void) mutex_unlock(mp);
2724 sig_mutex_trylock(mutex_t *mp)
2730 if ((error = mutex_trylock(mp)) != 0)
2739 sig_cond_wait(cond_t *cv, mutex_t *mp)
2745 error = __cond_wait(cv, mp);
2747 sig_mutex_unlock(mp);
2749 sig_mutex_lock(mp);
2759 sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts)
2765 error = __cond_reltimedwait(cv, mp, ts);
2767 sig_mutex_unlock(mp);
2769 sig_mutex_lock(mp);
2782 cancel_safe_mutex_lock(mutex_t *mp)
2784 (void) mutex_lock(mp);
2789 cancel_safe_mutex_trylock(mutex_t *mp)
2793 if ((error = mutex_trylock(mp)) == 0)
2799 cancel_safe_mutex_unlock(mutex_t *mp)
2805 (void) mutex_unlock(mp);
2837 volatile mutex_t *mp = (volatile mutex_t *)mparg;
2841 return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid);
2848 volatile mutex_t *mp = (volatile mutex_t *)mparg;
2852 return (MUTEX_OWNED(mp, curthread));
2858 mutex_destroy(mutex_t *mp)
2860 if (mp->mutex_type & USYNC_PROCESS)
2861 forget_lock(mp);
2862 (void) memset(mp, 0, sizeof (*mp));
2863 tdb_sync_obj_deregister(mp);
2870 mutex_consistent(mutex_t *mp)
2876 if (mutex_held(mp) &&
2877 (mp->mutex_type & LOCK_ROBUST) &&
2878 (mp->mutex_flag & LOCK_INITED) &&
2879 (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) {
2880 mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED);
2881 mp->mutex_rcount = 0;
2895 mutex_t *mp = (mutex_t *)lock;
2897 (void) memset(mp, 0, sizeof (*mp));
2899 mp->mutex_type = USYNC_PROCESS;
2901 mp->mutex_type = USYNC_THREAD;
2902 mp->mutex_flag = LOCK_INITED;
2903 mp->mutex_magic = MUTEX_MAGIC;
2912 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) &&
2929 mutex_t *mp = (mutex_t *)lock;
2934 if (set_lock_byte(&mp->mutex_lockw) != 0)
2937 mp->mutex_owner = (uintptr_t)self;
2938 if (mp->mutex_type == USYNC_PROCESS)
2939 mp->mutex_ownerpid = self->ul_uberdata->pid;
2940 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
2949 mutex_t *mp = (mutex_t *)lock;
2951 volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw;
2956 DTRACE_PROBE1(plockstat, mutex__spin, mp);
2973 mp->mutex_owner = (uintptr_t)self;
2974 if (mp->mutex_type == USYNC_PROCESS)
2975 mp->mutex_ownerpid = self->ul_uberdata->pid;
2978 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count);
2980 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
2987 mutex_t *mp = (mutex_t *)lock;
2991 mp->mutex_owner = 0;
2992 mp->mutex_ownerpid = 0;
2993 DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2994 (void) atomic_swap_32(&mp->mutex_lockword, 0);
3112 mutex_t *mp;
3127 if ((mp = *lockptr) != NULL &&
3128 mutex_held(mp) &&
3129 (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) ==
3131 mp->mutex_rcount = 0;
3132 if (!(mp->mutex_flag & LOCK_UNMAPPED))
3133 mp->mutex_flag |= LOCK_OWNERDEAD;
3134 (void) mutex_unlock_internal(mp, 1);
3178 cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3199 self->ul_cvmutex = mp;
3202 if (mp->mutex_flag & LOCK_OWNERDEAD) {
3203 mp->mutex_flag &= ~LOCK_OWNERDEAD;
3204 mp->mutex_flag |= LOCK_NOTRECOVERABLE;
3206 release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0);
3207 lwpid = mutex_unlock_queue(mp, release_all);
3240 mqp = queue_lock(mp, MX);
3251 mp->mutex_waiters = dequeue_self(mqp);
3295 cond_wait_check_alignment(cond_t *cvp, mutex_t *mp)
3297 if ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1))
3298 lock_error(mp, "cond_wait", cvp, "mutex is misaligned");
3300 lock_error(mp, "cond_wait", cvp, "condvar is misaligned");
3304 cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3311 cond_wait_check_alignment(cvp, mp);
3330 error = cond_sleep_queue(cvp, mp, tsp);
3335 if ((merror = mutex_lock_impl(mp, NULL)) != 0)
3352 cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3354 int mtype = mp->mutex_type;
3358 if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp))
3364 mp->mutex_owner = 0;
3365 /* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */
3367 mp->mutex_lockw = LOCKCLEAR;
3382 error = ___lwp_cond_wait(cvp, mp, tsp, 1);
3391 cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3398 cond_wait_check_alignment(cvp, mp);
3406 error = cond_sleep_kernel(cvp, mp, tsp);
3415 if ((merror = mutex_lock_impl(mp, NULL)) != 0)
3431 cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
3433 int mtype = mp->mutex_type;
3438 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
3476 if (!mutex_held(mp))
3477 lock_error(mp, "cond_wait", cvp, NULL);
3478 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0)
3479 lock_error(mp, "recursive mutex in cond_wait",
3483 lock_error(mp, "cond_wait", cvp,
3488 lock_error(mp, "cond_wait", cvp,
3500 rcount = mp->mutex_rcount;
3501 mp->mutex_rcount = 0;
3505 error = cond_wait_kernel(cvp, mp, tsp);
3507 error = cond_wait_queue(cvp, mp, tsp);
3508 mp->mutex_rcount = rcount;
3530 __cond_wait(cond_t *cvp, mutex_t *mp)
3536 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) &&
3537 !mutex_held(mp))
3545 (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted |
3548 return (cond_wait_queue(cvp, mp, NULL));
3553 return (cond_wait_common(cvp, mp, NULL));
3558 cond_wait(cond_t *cvp, mutex_t *mp)
3563 error = __cond_wait(cvp, mp);
3576 pthread_mutex_t *_RESTRICT_KYWD mp)
3580 error = cond_wait((cond_t *)cvp, (mutex_t *)mp);
3588 __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
3594 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) &&
3595 !mutex_held(mp))
3601 error = cond_wait_common(cvp, mp, &reltime);
3617 cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
3622 error = __cond_timedwait(cvp, mp, abstime);
3635 pthread_mutex_t *_RESTRICT_KYWD mp,
3640 error = cond_timedwait((cond_t *)cvp, (mutex_t *)mp, abstime);
3652 __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
3656 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) &&
3657 !mutex_held(mp))
3660 return (cond_wait_common(cvp, mp, &tslocal));
3664 cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
3669 error = __cond_reltimedwait(cvp, mp, reltime);
3679 pthread_mutex_t *_RESTRICT_KYWD mp,
3684 error = cond_reltimedwait((cond_t *)cvp, (mutex_t *)mp, reltime);
3704 mutex_t *mp;
3750 mp = ulwp->ul_cvmutex; /* the mutex he will acquire */
3752 ASSERT(mp != NULL);
3754 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
3765 mqp = queue_lock(mp, MX);
3767 mp->mutex_waiters = 1;
3834 mutex_t *mp;
3879 mp = ulwp->ul_cvmutex; /* his mutex */
3881 ASSERT(mp != NULL);
3882 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
3891 if (mp != mp_cache) {
3892 mp_cache = mp;
3895 mqp = queue_lock(mp, MX);
3898 mp->mutex_waiters = 1;