Searched defs:lock (Results 201 - 210 of 210) sorted by relevance

123456789

/openjdk7/hotspot/src/share/vm/c1/
H A Dc1_GraphBuilder.cpp2038 append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci);
2926 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
2927 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
2928 // in the interpreter frame (the method lock if a sync method)
2993 // lock synchronized method
2995 state->lock(NULL);
3557 void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) { argument
3558 assert(lock !
3579 fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) argument
3780 Value lock; local
[all...]
H A Dc1_LIRGenerator.cpp638 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { argument
641 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
642 __ load_stack_address_monitor(monitor_no, lock);
643 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
644 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
648 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) { argument
651 LIR_Opr hdr = lock;
652 lock = new_hdr;
653 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
654 __ load_stack_address_monitor(monitor_no, lock);
2633 LIR_Opr lock = new_register(T_INT); local
2640 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); local
[all...]
H A Dc1_LIR.hpp1752 LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info) argument
1756 , _lock(lock)
2136 void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2137 void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
/openjdk7/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/
H A DconcurrentMarkSweepGeneration.hpp94 Mutex* lock() const { return _lock; } function in class:VALUE_OBJ_CLASS_SPEC
107 bool par_isMarked(HeapWord* addr) const; // do not lock checks
178 Mutex _par_lock; // an advisory lock used in case of parallel access
222 // lock and pop
228 // lock and push
890 Mutex* bitMapLock() const { return _markBitMap.lock(); }
1731 Mutex* _freelistLock; // Free list lock (in space)
/openjdk7/langtools/src/share/classes/com/sun/tools/javac/tree/
H A DJCTree.java1004 public JCExpression lock; field in class:JCTree.JCSynchronized
1006 protected JCSynchronized(JCExpression lock, JCBlock body) { argument
1007 this.lock = lock;
1014 public JCExpression getExpression() { return lock; }
2134 JCSynchronized Synchronized(JCExpression lock, JCBlock body); argument
/openjdk7/hotspot/src/share/vm/gc_implementation/g1/
H A Dg1CollectedHeap.cpp4395 "Should only be true while someone holds the lock.");
4404 // The lock is already held, and this is recursive.
4911 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
5892 Mutex* lock = (par) ? ParGCRareEvent_lock : NULL; local
5893 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
5969 // a lock). So we can only verify that [bottom(),pre_dummy_top()]
6184 // first before we take the lock.
6209 "the heap lock should already be held by or for this thread");
6538 // the secondary free list we have to take the lock before
/openjdk7/hotspot/src/share/vm/opto/
H A DgraphKit.cpp2872 // Create the counters for this fast lock.
2880 LockNode *lock = new (C) LockNode(C, tf); local
2882 lock->init_req( TypeFunc::Control, control() );
2883 lock->init_req( TypeFunc::Memory , mem );
2884 lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
2885 lock->init_req( TypeFunc::FramePtr, frameptr() );
2886 lock->init_req( TypeFunc::ReturnAdr, top() );
2888 lock->init_req(TypeFunc::Parms + 0, obj);
2889 lock->init_req(TypeFunc::Parms + 1, box);
2890 lock
[all...]
/openjdk7/hotspot/src/cpu/x86/vm/
H A Dassembler_x86.cpp1586 void Assembler::lock() { function in class:Assembler
5551 // See whether the lock is currently biased toward our thread and
5642 lock();
5679 lock();
5716 lock();
5722 // Fall through to the normal CAS-based lock, because no matter what
6152 // See whether the lock is currently biased toward our thread and
6217 lock();
6246 lock();
6275 lock();
[all...]
/openjdk7/hotspot/src/os/bsd/vm/
H A Dos_bsd.cpp1130 bool lock = os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack(); local
1131 if (lock) {
1149 if (lock) os::Bsd::createThread_lock()->unlock();
1167 if (lock) {
2288 // XXX: Do we need a lock around this as per Linux?
3169 // don't need lock (actually we can skip locking even it can be called
4815 // Initialize lock used to serialize thread creation (see os::create_thread)
5643 // Paranoia to ensure our locked and lock-free paths interact
5708 // Paranoia to ensure our locked and lock-free paths interact
5748 // Note that we signal() _after dropping the lock fo
[all...]
/openjdk7/hotspot/src/os/linux/vm/
H A Dos_linux.cpp955 bool lock = os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack(); local
956 if (lock) {
972 if (lock) os::Linux::createThread_lock()->unlock();
988 if (lock) {
3097 // don't need lock (actually we can skip locking even it can be called
4616 // Initialize lock used to serialize thread creation (see os::create_thread)
5382 // Paranoia to ensure our locked and lock-free paths interact
5447 // Paranoia to ensure our locked and lock-free paths interact
5487 // Note that we signal() _after dropping the lock for "immortal" Events.
5576 // since we are doing a lock
[all...]

Completed in 185 milliseconds

123456789