Lines Matching refs:lock

169 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
183 slow_enter (obj, lock, THREAD) ;
186 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
189 markOop dhw = lock->displaced_header();
192 // Recursive stack-lock.
211 if (mark == (markOop) lock) {
227 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
234 lock->set_displaced_header(mark);
235 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
242 assert(lock != mark->locker(), "must not re-lock the same lock");
243 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
244 lock->set_displaced_header(NULL);
251 lock->set_displaced_header (NULL) ;
256 // The object header will never be displaced to this lock,
258 // must be non-zero to avoid looking like a re-entrant lock,
260 lock->set_displaced_header(markOopDesc::unused_mark());
268 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
269 fast_exit (object, lock, THREAD) ;
273 // Class Loader support to workaround deadlocks on the class loader lock objects
275 // complete_exit()/reenter() are used to wait on a nested lock
276 // i.e. to give up an outer lock completely and then re-enter
277 // Used when holding nested locks - lock acquisition order: lock1 then lock2
282 // 5) lock lock2
489 // Avoid live-lock
505 // thread hold the associated inflation lock. The following code simply restricts
507 // on the inflationlock, 1 thread holding the inflation lock and using
680 // even the current thread owns the lock. The reason
744 // Be aware of this method could revoke bias of the lock object.
745 // This method querys the ownership of the lock handle specified by 'h_obj'.
746 // If the current thread owns the lock, it returns owner_self. If no
747 // thread owns the lock, it returns owner_none. Otherwise, it will return
949 // A large MAXPRIVATE value reduces both list lock contention
1047 // block in hand. This avoids some lock traffic and redundant
1116 // a global gOmInUseList under the global list lock so these
1221 // CASE: inflation in progress - inflating over a stack-lock.
1247 // This reduces coherency traffic and lock contention on the global free list.
1280 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1284 // decides to release the lock while the value is 0, the unlock will fail
1288 // drop the lock (restoring the header from the basiclock to the object)
1296 // The owner can't die or unwind past the lock while our INFLATING
1366 // live-lock -- "Inflated" is an absorbing state.
1406 // global list lock. deflate_idle_monitors() acquires the global
1407 // list lock to scan for non-busy monitors to the global free list.
1409 // downside would have been the additional cost of acquiring the global list lock
1513 // And in case the vm thread is acquiring a lock during a safepoint
1668 // the list of extant blocks without taking a lock.