Lines Matching refs:lock

56         mutex           lock;
93 { ASSERT(find_thread(NULL) == (r)->lock.holder); }
124 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder)
134 extern void recursive_lock_init(recursive_lock *lock, const char *name);
136 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
138 extern void recursive_lock_destroy(recursive_lock *lock);
139 extern status_t recursive_lock_lock(recursive_lock *lock);
140 extern status_t recursive_lock_trylock(recursive_lock *lock);
141 extern void recursive_lock_unlock(recursive_lock *lock);
142 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
144 extern void rw_lock_init(rw_lock* lock, const char* name);
146 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
147 extern void rw_lock_destroy(rw_lock* lock);
148 extern status_t rw_lock_write_lock(rw_lock* lock);
150 extern void mutex_init(mutex* lock, const char* name);
152 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
153 extern void mutex_destroy(mutex* lock);
156 // for the lock is atomically. I.e. if "from" guards the object "to" belongs
166 extern status_t _rw_lock_read_lock(rw_lock* lock);
167 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
169 extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked);
170 extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked);
172 extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
173 extern void _mutex_unlock(mutex* lock, bool threadsLocked);
174 extern status_t _mutex_trylock(mutex* lock);
175 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
180 rw_lock_read_lock(rw_lock* lock)
183 return rw_lock_write_lock(lock);
185 int32 oldCount = atomic_add(&lock->count, 1);
187 return _rw_lock_read_lock(lock);
194 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
198 return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
200 int32 oldCount = atomic_add(&lock->count, 1);
202 return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
209 rw_lock_read_unlock(rw_lock* lock)
212 rw_lock_write_unlock(lock);
214 int32 oldCount = atomic_add(&lock->count, -1);
216 _rw_lock_read_unlock(lock, false);
222 rw_lock_write_unlock(rw_lock* lock)
224 _rw_lock_write_unlock(lock, false);
229 mutex_lock(mutex* lock)
232 return _mutex_lock(lock, false);
234 if (atomic_add(&lock->count, -1) < 0)
235 return _mutex_lock(lock, false);
242 mutex_lock_threads_locked(mutex* lock)
245 return _mutex_lock(lock, true);
247 if (atomic_add(&lock->count, -1) < 0)
248 return _mutex_lock(lock, true);
255 mutex_trylock(mutex* lock)
258 return _mutex_trylock(lock);
260 if (atomic_test_and_set(&lock->count, -1, 0) != 0)
268 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
271 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
273 if (atomic_add(&lock->count, -1) < 0)
274 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
281 mutex_unlock(mutex* lock)
284 if (atomic_add(&lock->count, 1) < -1)
286 _mutex_unlock(lock, false);
291 mutex_transfer_lock(mutex* lock, thread_id thread)
294 lock->holder = thread;