Lines Matching refs:lock

53         mutex           lock;
90 { ASSERT(find_thread(NULL) == (r)->lock.holder); }
121 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder)
131 extern void recursive_lock_init(recursive_lock *lock, const char *name);
133 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
135 extern void recursive_lock_destroy(recursive_lock *lock);
136 extern status_t recursive_lock_lock(recursive_lock *lock);
137 extern status_t recursive_lock_trylock(recursive_lock *lock);
138 extern void recursive_lock_unlock(recursive_lock *lock);
139 extern int32 recursive_lock_get_recursion(recursive_lock *lock);
141 extern void rw_lock_init(rw_lock* lock, const char* name);
143 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
144 extern void rw_lock_destroy(rw_lock* lock);
145 extern status_t rw_lock_write_lock(rw_lock* lock);
147 extern void mutex_init(mutex* lock, const char* name);
149 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
150 extern void mutex_destroy(mutex* lock);
153 // for the lock is atomically. I.e. if "from" guards the object "to" belongs
163 extern status_t _rw_lock_read_lock(rw_lock* lock);
164 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
166 extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked);
167 extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked);
169 extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
170 extern void _mutex_unlock(mutex* lock, bool threadsLocked);
171 extern status_t _mutex_trylock(mutex* lock);
172 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
177 rw_lock_read_lock(rw_lock* lock)
180 return rw_lock_write_lock(lock);
182 int32 oldCount = atomic_add(&lock->count, 1);
184 return _rw_lock_read_lock(lock);
191 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
195 return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
197 int32 oldCount = atomic_add(&lock->count, 1);
199 return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
206 rw_lock_read_unlock(rw_lock* lock)
209 rw_lock_write_unlock(lock);
211 int32 oldCount = atomic_add(&lock->count, -1);
213 _rw_lock_read_unlock(lock, false);
219 rw_lock_write_unlock(rw_lock* lock)
221 _rw_lock_write_unlock(lock, false);
226 mutex_lock(mutex* lock)
229 return _mutex_lock(lock, false);
231 if (atomic_add(&lock->count, -1) < 0)
232 return _mutex_lock(lock, false);
239 mutex_lock_threads_locked(mutex* lock)
242 return _mutex_lock(lock, true);
244 if (atomic_add(&lock->count, -1) < 0)
245 return _mutex_lock(lock, true);
252 mutex_trylock(mutex* lock)
255 return _mutex_trylock(lock);
257 if (atomic_test_and_set(&lock->count, -1, 0) != 0)
265 mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
268 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
270 if (atomic_add(&lock->count, -1) < 0)
271 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
278 mutex_unlock(mutex* lock)
281 if (atomic_add(&lock->count, 1) < -1)
283 _mutex_unlock(lock, false);
288 mutex_transfer_lock(mutex* lock, thread_id thread)
291 lock->holder = thread;