0N/A/*
3057N/A * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
2362N/A * published by the Free Software Foundation.
0N/A *
2362N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
0N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
0N/A * or visit www.oracle.com if you need additional information or have any
2362N/A * questions.
2362N/A *
2362N/A */
0N/A
0N/A#include "precompiled.hpp"
0N/A#include "opto/locknode.hpp"
0N/A#include "opto/parse.hpp"
2548N/A#include "opto/rootnode.hpp"
0N/A#include "opto/runtime.hpp"
0N/A
2607N/A//=============================================================================
0N/Aconst RegMask &BoxLockNode::in_RegMask(uint i) const {
0N/A return _inmask;
2607N/A}
0N/A
2607N/Aconst RegMask &BoxLockNode::out_RegMask() const {
0N/A return *Matcher::idealreg2regmask[Op_RegP];
2081N/A}
2081N/A
2081N/Auint BoxLockNode::size_of() const { return sizeof(*this); }
2081N/A
2081N/ABoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
0N/A _slot(slot), _is_eliminated(false) {
0N/A init_class_id(Class_BoxLock);
0N/A init_flags(Flag_rematerialize);
0N/A OptoReg::Name reg = OptoReg::stack2reg(_slot);
0N/A _inmask.Insert(reg);
0N/A}
0N/A
0N/A//-----------------------------hash--------------------------------------------
0N/Auint BoxLockNode::hash() const {
0N/A if (EliminateNestedLocks)
0N/A return NO_HASH; // Each locked region has own BoxLock node
0N/A return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
0N/A}
0N/A
0N/A//------------------------------cmp--------------------------------------------
0N/Auint BoxLockNode::cmp( const Node &n ) const {
0N/A if (EliminateNestedLocks)
0N/A return (&n == this); // Always fail except on self
0N/A const BoxLockNode &bn = (const BoxLockNode &)n;
0N/A return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
0N/A}
0N/A
0N/ABoxLockNode* BoxLockNode::box_node(Node* box) {
0N/A // Chase down the BoxNode after RA which may spill box nodes.
0N/A while (!box->is_BoxLock()) {
0N/A // if (box_node->is_SpillCopy()) {
0N/A // Node *m = box_node->in(1);
0N/A // if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
0N/A // box_node = m->in(m->as_Mach()->operand_index(2));
0N/A // continue;
0N/A // }
0N/A // }
0N/A assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
2607N/A // Only BoxLock nodes with the same stack slot are merged.
0N/A // So it is enough to trace one path to find the slot value.
2607N/A box = box->in(1);
2607N/A }
2607N/A return box->as_BoxLock();
0N/A}
0N/A
0N/AOptoReg::Name BoxLockNode::reg(Node* box) {
0N/A return box_node(box)->in_RegMask(0).find_first_elem();
0N/A}
0N/A
0N/A// Is BoxLock node used for one simple lock region (same box and obj)?
0N/Abool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
2607N/A LockNode* lock = NULL;
0N/A bool has_one_lock = false;
0N/A for (uint i = 0; i < this->outcnt(); i++) {
0N/A Node* n = this->raw_out(i);
0N/A assert(!n->is_Phi(), "should not merge BoxLock nodes");
0N/A if (n->is_AbstractLock()) {
0N/A AbstractLockNode* alock = n->as_AbstractLock();
0N/A // Check lock's box since box could be referenced by Lock's debug info.
2607N/A if (alock->box_node() == this) {
0N/A if (alock->obj_node()->eqv_uncast(obj)) {
0N/A if ((unique_lock != NULL) && alock->is_Lock()) {
2607N/A if (lock == NULL) {
0N/A lock = alock->as_Lock();
0N/A has_one_lock = true;
0N/A } else if (lock != alock->as_Lock()) {
0N/A has_one_lock = false;
0N/A }
2548N/A }
0N/A } else {
0N/A return false; // Different objects
0N/A }
0N/A }
0N/A }
0N/A }
0N/A#ifdef ASSERT
0N/A // Verify that FastLock and Safepoint reference only this lock region.
0N/A for (uint i = 0; i < this->outcnt(); i++) {
0N/A Node* n = this->raw_out(i);
0N/A if (n->is_FastLock()) {
0N/A FastLockNode* flock = n->as_FastLock();
0N/A assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
0N/A }
0N/A // Don't check monitor info in safepoints since the referenced object could
0N/A // be different from the locked object. It could be Phi node of different
0N/A // cast nodes which point to this locked object.
2607N/A // We assume that no other objects could be referenced in monitor info
0N/A // associated with this BoxLock node because all associated locks and
0N/A // unlocks are reference only this one object.
0N/A }
2607N/A#endif
0N/A if (unique_lock != NULL && has_one_lock) {
0N/A *unique_lock = lock;
2607N/A }
0N/A return true;
2607N/A}
0N/A
0N/A//=============================================================================
2607N/A//-----------------------------hash--------------------------------------------
2607N/Auint FastLockNode::hash() const { return NO_HASH; }
0N/A
0N/A//------------------------------cmp--------------------------------------------
0N/Auint FastLockNode::cmp( const Node &n ) const {
0N/A return (&n == this); // Always fail except on self
0N/A}
0N/A
2607N/A//=============================================================================
2607N/A//-----------------------------hash--------------------------------------------
0N/Auint FastUnlockNode::hash() const { return NO_HASH; }
0N/A
0N/A//------------------------------cmp--------------------------------------------
0N/Auint FastUnlockNode::cmp( const Node &n ) const {
0N/A return (&n == this); // Always fail except on self
2081N/A}
0N/A
0N/A//
0N/A// Create a counter which counts the number of times this lock is acquired
0N/A//
0N/Avoid FastLockNode::create_lock_counter(JVMState* state) {
0N/A BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
0N/A OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
0N/A _counters = blnc->counters();
0N/A}
0N/A
0N/A//=============================================================================
0N/A//------------------------------do_monitor_enter-------------------------------
0N/Avoid Parse::do_monitor_enter() {
2607N/A kill_dead_locals();
0N/A
0N/A // Null check; get casted pointer.
0N/A Node* obj = null_check(peek());
0N/A // Check for locking null object
0N/A if (stopped()) return;
0N/A
0N/A // the monitor object is not part of debug info expression stack
3057N/A pop();
3057N/A
3057N/A // Insert a FastLockNode which takes as arguments the current thread pointer,
3057N/A // the obj pointer & the address of the stack slot pair used for the lock.
3057N/A shared_lock(obj);
3057N/A}
3057N/A
3057N/A//------------------------------do_monitor_exit--------------------------------
3057N/Avoid Parse::do_monitor_exit() {
3057N/A kill_dead_locals();
3057N/A
3057N/A pop(); // Pop oop to unlock
3057N/A // Because monitors are guaranteed paired (else we bail out), we know
3057N/A // the matching Lock for this Unlock. Hence we know there is no need
3057N/A // for a null check on Unlock.
3057N/A shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
3057N/A}
3057N/A