macro.cpp revision 3058
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
#include "libadt/vectset.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/locknode.hpp"
#include "opto/loopnode.hpp"
#include "opto/memnode.hpp"
#include "opto/phaseX.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "runtime/sharedRuntime.hpp"
//
// Replace any references to "oldref" in inputs to "use" with "newref".
// Returns the number of replacements made.
//
int nreplacements = 0;
if (j < req)
else
break;
}
}
return nreplacements;
}
// Copy debug information and adjust JVMState information
// Clone old SafePointScalarObjectNodes, adjusting their field contents.
if (old_unique != C->unique()) {
}
}
}
}
}
Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) {
if (mask != 0) {
} else {
}
// Fast path taken.
// Fast path not-taken, i.e. slow path
if (return_fast_path) {
return fast_taken;
} else {
return slow_taken;
}
}
//--------------------copy_predefined_input_for_runtime_call--------------------
void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) {
// Set fixed predefined input arguments
}
//------------------------------make_slow_call---------------------------------
CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) {
// Slow-path call
? (CallNode*)new (C, size) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
: (CallNode*)new (C, size) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM );
// Slow path call has no side-effects, uses few values
return call;
}
{
// For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
else {
}
}
}
break;
}
if (pn->_is_io_use)
else
break;
if (pn->_is_io_use)
else
break;
break;
default:
assert(false, "unexpected projection from allocation node.");
}
}
}
// Eliminate a card mark sequence. p2x is a ConvP2XNode
if (!UseG1GC) {
// The load is checking if the card has been written so
// replace it with zero to fold the test.
continue;
}
}
} else {
// It could be only one user, URShift node, in Object.clone() instrinsic
// but the new allocation is passed to arraycopy stub and it could not
// be scalar replaced. So we don't check the case.
// Remove G1 post barrier.
// Search for CastP2X->Xor->URShift->Cmp path which
// checks if the store done to a different from the value's region.
// And replace Cmp with #0 (false) to collapse G1 post barrier.
xorx = u;
break;
}
}
"missing region check in G1 post barrier");
// Remove G1 pre barrier.
// Search "if (marking != 0)" check and set it to "false".
// There is no G1 pre barrier if previous stored value is NULL
// (for example, after initialization).
int ind = 1;
ind = 2;
}
}
}
}
}
// Now CastP2X can be removed since it is used only on dead path
// which currently still alive until igvn optimize it.
}
}
// Search for a memory operation for the specified memory slice.
static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) {
while (true) {
return mem; // hit one of our sentinels
} else if (mem->is_MergeMem()) {
// we can safely skip over safepoints, calls, locks and membars because we
// already know that the object is safe to eliminate.
return in;
}
} else {
assert(false, "unexpected projection");
}
// Array elements references have the same alias_idx
// but different offset and different instance_id.
return mem;
} else {
}
} else if (mem->is_ClearArray()) {
// Can not bypass initialization of the instance
// we are looking.
// We are looking for stored value, return Initialize node
// or memory edge from Allocate node.
return init;
else
}
// Otherwise skip it (the call updated 'mem' value).
assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
return NULL;
}
} else {
return mem;
}
}
}
//
// Given a Memory Phi, compute a value Phi containing the values from stores
// on the input paths.
// Note: this function is recursive, its depth is limied by the "level" argument
// Returns the computed Phi, or NULL if it cannot compute it.
Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level) {
// Check if an appropriate value phi already exists.
return phi;
}
}
// Check if an appropriate new value phi already exists.
return new_phi;
if (level <= 0) {
return NULL; // Give up: phi tree too deep
}
// create a new Phi for the value
} else {
// hit a sentinel, return appropriate 0 value
continue;
}
if (val->is_Initialize()) {
}
return NULL; // can't find a value on this path
}
return NULL;
}
assert(false, "Object is not scalar replaceable if a LoadStore node access its field");
return NULL;
} else {
#ifdef ASSERT
assert(false, "unknown node on this path");
#endif
return NULL; // unknown node on this path
}
}
}
// Set Phi's inputs
} else {
}
}
return phi;
}
// Search the last value stored into the object's field.
Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc) {
while (!done) {
return NULL; // found a loop, give up
}
done = true; // hit a sentinel, return appropriate 0 value
} else if (mem->is_Initialize()) {
done = true; // Something go wrong.
done = true;
}
done = true;
// try to find a phi's unique input
continue;
} else if (unique_input == NULL) {
unique_input = n;
} else if (unique_input != n) {
unique_input = top;
break;
}
}
mem = unique_input;
} else {
done = true;
}
} else {
assert(false, "unexpected node");
}
}
// hit a sentinel, return appropriate 0 value
// attempt to produce a Phi reflecting the values on the input paths of the Phi
return phi;
} else {
// Kill all new Phis
while(value_phis.is_nonempty()) {
value_phis.pop();
}
}
}
}
// Something go wrong.
return NULL;
}
// Check the possibility of scalar replacement.
bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
// Scan the uses of the allocation to check for anything that would
// prevent us from eliminating it.
bool can_eliminate = true;
// All users were eliminated.
} else if (!res->is_CheckCastPP()) {
can_eliminate = false;
} else {
can_eliminate = false;
} else if (res_type->isa_aryptr()) {
if (length < 0) {
can_eliminate = false;
}
}
}
j < jmax && can_eliminate; j++) {
can_eliminate = false;
break;
}
k < kmax && can_eliminate; k++) {
DEBUG_ONLY(disq_node = n;)
if (n->is_Load() || n->is_LoadStore()) {
} else {
}
can_eliminate = false;
}
}
} else if (use->is_SafePoint()) {
// Object is passed as argument.
can_eliminate = false;
}
can_eliminate = false;
} else {
}
} else {
}
} else {
}else {
}
}
can_eliminate = false;
}
}
}
#ifndef PRODUCT
if (PrintEliminateAllocations) {
if (can_eliminate) {
else
} else {
else
#ifdef ASSERT
}
#endif /*ASSERT*/
}
}
#endif
return can_eliminate;
}
// Do scalar replacement.
bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
int nfields = 0;
int array_base;
int element_size;
}
if (res_type->isa_instptr()) {
// find the fields of the class which will be needed for safepoint debug information
} else {
// find the array's elements which will be needed for safepoint debug information
}
}
//
// Process the safepoint uses
//
while (safepoints.length() > 0) {
#ifdef ASSERT
#endif
// Scan object's fields adding an input to the safepoint for each field.
for (int j = 0; j < nfields; j++) {
} else {
}
const Type *field_type;
// The next code is taken from Parse::do_get_xxx().
// This can happen if the constant oop is non-perm.
// Do not "join" in the previous type; it doesn't add value,
// and may yield a vacuous result if the field is of interface type.
} else {
}
if (UseCompressedOops) {
}
} else {
}
// We weren't able to find a value for this field,
// give up on eliminating this allocation.
// Remove any extra entries we added to the safepoint.
for (int k = 0; k < j; k++) {
}
// rollback processed safepoints
while (safepoints_done.length() > 0) {
// remove any extra entries we added to the safepoint
for (int k = 0; k < nfields; k++) {
}
// Now make a pass over the debug information replacing any references
// to SafePointScalarObjectNode with the allocated object.
}
}
}
}
#ifndef PRODUCT
if (PrintEliminateAllocations) {
} else { // Array's element
}
else
}
#endif
return false;
}
// Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
// to be able scalar replace the allocation.
if (field_val->is_EncodeP()) {
} else {
field_val = transform_later(new (C, 2) DecodeNNode(field_val, field_val->bottom_type()->make_ptr()));
}
}
}
// Now make a pass over the debug information replacing any references
// to the allocated object with "sobj"
}
}
}
return true;
}
// Process users of eliminated allocation.
if (n->is_Store()) {
#ifdef ASSERT
// Verify that there is no dependent MemBarVolatile nodes,
// they should be removed during IGVN, see MemBarNode::Ideal().
p < pmax; p++) {
"MemBarVolatile should be eliminated for non-escaping object");
}
#endif
} else {
}
}
} else {
}
}
}
//
// Process other users of allocation's projections
//
if (use->is_Initialize()) {
// Eliminate Initialize node.
}
#ifdef ASSERT
if (mem->is_MergeMem()) {
} else {
}
#endif
}
// raw memory addresses used only by the initialization
} else {
assert(false, "only Initialize or AddP expected");
}
}
}
if (_fallthroughcatchproj != NULL) {
}
if (_memproj_fallthrough != NULL) {
}
if (_memproj_catchall != NULL) {
}
if (_ioproj_fallthrough != NULL) {
}
if (_ioproj_catchall != NULL) {
}
if (_catchallcatchproj != NULL) {
}
}
return false;
}
return false;
}
return false;
}
while (p != NULL) {
p = p->caller();
}
}
#ifndef PRODUCT
if (PrintEliminateAllocations) {
if (alloc->is_AllocateArray())
else
}
#endif
return true;
}
//---------------------------set_eden_pointers-------------------------
if (UseTLAB) { // Private allocation: load from TLS
} else { // Shared allocation: load from globals
}
}
Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
return value;
}
Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
return mem;
}
//=============================================================================
//
// A L L O C A T I O N
//
// Allocation attempts to be fast in the case of frequent small objects.
// It breaks down like this:
//
// 1) Size in doublewords is computed. This is a constant for objects and
// variable for most arrays. Doubleword units are used to avoid size
// overflow of huge doubleword arrays. We need doublewords in the end for
// rounding.
//
// 2) Size is checked for being 'too large'. Too-large allocations will go
// the slow path into the VM. The slow path can throw any required
// exceptions, and does all the special checks for very large arrays. The
// size test can constant-fold away for objects. For objects with
// finalizers it constant-folds the otherway: you always go slow with
// finalizers.
//
// 3) If NOT using TLABs, this is the contended loop-back point.
// Load-Locked the heap top. If using TLABs normal-load the heap top.
//
// 4) Check that heap top + size*8 < max. If we fail go the slow ` route.
// NOTE: "top+size*8" cannot wrap the 4Gig line! Here's why: for largish
// "size*8" we always enter the VM, where "largish" is a constant picked small
// enough that there's always space between the eden max and 4Gig (old space is
// there so it's quite large) and large enough that the cost of entering the VM
// is dwarfed by the cost to initialize the space.
//
// 5) If NOT using TLABs, Store-Conditional the adjusted heap top back
// down. If contended, repeat at step 3. If using TLABs normal-store
// adjusted heap top back down; there is no contention.
//
// fields.
//
// 7) Merge with the slow-path; cast the raw memory pointer to the correct
// oop flavor.
//
//=============================================================================
// FastAllocateSizeLimit value is in DOUBLEWORDS.
// Allocations bigger than this always go the slow route.
// This value must be small enough that allocation attempts that need to
// trigger exceptions go the slow route. Also, it must be small enough so
// that heap_top + size_in_bytes does not wrap around the 4Gig limit.
//=============================================================================j//
// %%% Here is an old comment from parseHelper.cpp; is it outdated?
// The allocator will coalesce int->oop copies away. See comment in
// coalesce.cpp about how this works. It depends critically on the exact
// code shape produced here, so if you are changing this code shape
// make sure the GC info for the heap-top is correct in and around the
// slow-path call.
//
)
{
if (storestore != NULL) {
// Break this link that is no longer useful and confuses register allocation
}
// We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
// they will not be used if "always_slow" is set
// The initial slow comparison is a size check, the comparison
// we want to do is a BoolTest::gt
bool always_slow = false;
if (tv >= 0) {
} else {
}
if (C->env()->dtrace_alloc_probes() ||
(UseConcMarkSweepGC && CMSIncrementalMode))) {
// Force slow-path allocation
always_slow = true;
}
// generate the initial test if necessary
if (initial_slow_test != NULL ) {
// Now make the initial failure test. Usually a too-big test but
// might be a TRUE for finalizers or a fancy class check for
// newInstance0.
// Plug the failing-too-big test into the slow-path region
} else { // No initial test, just fall into next case
toobig_false = ctrl;
}
// generate the fast allocation code unless we know that the initial test will always go slow
if (!always_slow) {
// Fast path modifies only raw memory.
if (mem->is_MergeMem()) {
}
// Load Eden::end. Loop invariant and hoisted.
//
// Note: We set the control input on "eden_end" and "old_eden_top" when using
// a TLAB to work around a bug where these values were being moved across
// a safepoint. These are not oops, so they cannot be include in the oop
// map, but they can be changed by a GC. The proper way to fix this would
// be to set the raw memory state when generating a SafepointNode. However
// this will require extensive changes to the loop optimization in order to
// prevent a degradation of the optimization.
// See comment in memnode.hpp, around line 227 in class LoadPNode.
// allocate the Region and Phi nodes for the result
// We need a Region for the loop-back contended case.
if (UseTLAB) {
} else {
// Now handle the passing-too-big test. We fall into the contended
// loop-back merge point.
}
// Load(-locked) the heap top.
// See note above concerning the control input when using a TLAB
? new (C, 3) LoadPNode (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM)
// Add to heap top to get a new heap top
// Check for needing a GC; compare against heap end
IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
// Plug the failing-heap-space-need-gc test into the slow-path region
if (initial_slow_test) {
// This completes all paths into the slow merge point
} else { // No initial slow path needed!
// Just fall from the need-GC path straight into the VM call.
}
// No need for a GC. Setup for the Store-Conditional
// Grab regular I/O before optional prefetch may change it.
// Slow-path does no I/O so just set it to the original I/O.
// Name successful fast-path variables
// Store (-conditional) the modified eden top back down.
// StorePConditional produces flags for a test PLUS a modified raw
// memory state.
if (UseTLAB) {
} else {
// If not using TLABs, check to see if there was contention.
IfNode *contention_iff = new (C, 2) IfNode (needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN);
// If contention, loopback and try again.
// Fast-path succeeded with no contention!
// Bump total allocated bytes for this thread
#ifdef _LP64
#else
#endif
0, new_alloc_bytes, T_LONG);
}
// If initialization is performed by an array copy, any required
// MemBarStoreStore was already added. If the object does not
// escape no need for a MemBarStoreStore. Otherwise we need a
// MemBarStoreStore so that stores that initialize this object
// can't be reordered with a subsequent store that makes this
// object accessible by other threads.
// No InitializeNode or no stores captured by zeroing
// elimination. Simply add the MemBarStoreStore after object
// initialization.
} else {
// Add the MemBarStoreStore after the InitializeNode so that
// all stores performing the initialization that were moved
// before the InitializeNode happen before the storestore
// barrier.
// The MemBarStoreStore depends on control and memory coming
// from the InitializeNode
// All nodes that depended on the InitializeNode for control
// and memory must now depend on the MemBarNode that itself
// depends on the InitializeNode
}
}
if (C->env()->dtrace_extended_probes()) {
// Slow-path call
"dtrace_object_alloc",
TypeRawPtr::BOTTOM);
// Get base of thread-local storage area
}
// Plug in the successful fast-path into the result merge point
} else {
slow_region = ctrl;
}
// Generate slow-path call
}
// Copy debug information and adjust JVMState information, then replace
// allocate node with the call
if (!always_slow) {
} else {
// Hook i_o projection to avoid its elimination during allocation
// replacement (when only a slow call is generated).
}
// Identify the output projections from the allocate node and
// adjust any references to them.
// The control and io projections look like:
//
// v---Proj(ctrl) <-----+ v---CatchProj(ctrl)
// Allocate Catch
// ^---Proj(io) <-------+ ^---CatchProj(io)
//
// We are interested in the CatchProj nodes.
//
// An allocate node has separate memory projections for the uses on
// the control and i_o paths. Replace the control memory projection with
// result_phi_rawmem (unless we are only generating a slow call when
// both memory projections are combined)
// back up iterator
--i;
}
}
// Now change uses of _memproj_catchall to use _memproj_fallthrough and delete
// _memproj_catchall so we end up with a call that has only 1 memory projection.
if (_memproj_catchall != NULL ) {
if (_memproj_fallthrough == NULL) {
}
// back up iterator
--i;
}
}
// An allocate node has separate i_o projections for the uses on the control
// and i_o paths. Always replace the control i_o projection with result i_o
// otherwise incoming i_o become dead when only a slow call is generated
// (it is different from memory projections where both projections are
// combined in such case).
if (_ioproj_fallthrough != NULL) {
// back up iterator
--i;
}
}
// Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete
// _ioproj_catchall so we end up with a call that has only 1 i_o projection.
if (_ioproj_catchall != NULL ) {
if (_ioproj_fallthrough == NULL) {
}
// back up iterator
--i;
}
}
// if we generated only a slow call, we are done
if (always_slow) {
// Now we can unhook i_o.
} else {
// Case of new array with negative size known during compilation.
// AllocateArrayNode::Ideal() optimization disconnect unreachable
// following code since call to runtime will throw exception.
// As result there will be no users of i_o after the call.
// Leave i_o attached to this call to avoid problems in preceding graph.
}
return;
}
if (_fallthroughcatchproj != NULL) {
} else {
}
// no uses of the allocation result
slow_result = top();
} else {
}
// Plug slow-path into result merge point
// This completes all paths into the result merge point
}
// Helper for PhaseMacroExpand::expand_allocate_common.
// Initializes the newly-allocated storage.
Node*
Node* size_in_bytes) {
// Store the klass & mark bits
// For now only enable fast locking for non-array types
mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
} else {
}
rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT);
// Array length
rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
// conservatively small header size:
if (k->is_array_klass()) // we know the exact header size in most cases:
}
// Clear the object body, if necessary.
// The init has somehow disappeared; be cautious and clear everything.
//
// This can happen if a node is allocated but an uncommon trap occurs
// immediately. In this case, the Initialize gets associated with the
// trap, and may be placed in a different (outer) loop, if the Allocate
// is in a loop. If (this is rare) the inner loop gets unrolled, then
// there can be two Allocates to one Initialize. The answer in all these
// edge cases is safety first. It is always safe to clear immediately
// within an Allocate, and then (maybe or maybe not) clear some more later.
if (!ZeroTLAB)
&_igvn);
} else {
if (!init->is_complete()) {
// Try to win by zeroing only what the init does not store.
// We can also try to do some peephole optimizations,
// such as combining some adjacent subword stores.
}
// We have no more use for this link, since the AllocateNode goes away:
// (If we keep the link, it just confuses the register allocator,
// who thinks he sees a real use of the address by the membar.)
}
return rawmem;
}
// Generate prefetch instructions for next allocations.
// Generate prefetch allocation with watermark check.
// As an allocation hits the watermark, we will prefetch starting
// at a "distance" away from watermark.
TypeRawPtr::BOTTOM );
// I/O is used for Prefetch
// check against new_eden_top
// true node, add prefetchdistance
// adding prefetches
}
i_o = pf_phi_abio;
// Insert a prefetch for each allocation.
// This code is used for Sparc with BIS.
TypeRawPtr::BOTTOM );
// Generate several prefetch instructions.
// Next cache address.
// Prefetch
}
} else if( AllocatePrefetchStyle > 0 ) {
// Insert a prefetch for each allocation only on the fast-path
// Generate several prefetch instructions.
// Do not let it float too high, since if eden_top == eden_end,
// both might be null.
if( i == 0 ) { // Set control for first prefetch, next follows it
}
}
}
return i_o;
}
}
k->is_type_array_klass()) {
// Don't zero type array during slow allocation in VM since
// it will be initialized later by arraycopy in compiled code.
} else {
}
}
//-------------------mark_eliminated_box----------------------------------
//
// During EA obj may point to several objects but after few ideal graph
// transformations (CCP) it may point to only one non escaping object
// (but still using phi), corresponding locks and unlocks will be marked
// for elimination. Later obj could be replaced with a new node (new phi)
// and which does not have escape information. And later after some graph
// reshape other locks and unlocks (which were not marked for elimination
// before) are connected to this new obj (phi) but they still will not be
// marked for elimination since new obj has no escape information.
// Mark all associated (same box and obj) lock and unlock nodes for
// elimination if some of them marked already.
return;
if (oldbox->is_BoxLock() &&
// Box is used only in one lock region. Mark this box as eliminated.
// Check lock's box since box could be referenced by Lock's debug info.
// Mark eliminated all related locks and unlocks.
alock->set_non_esc_obj();
}
}
}
return;
}
// Create new "eliminated" BoxLock node and use it in monitor debug info
// instead of oldbox for the same object.
// Note: BoxLock node is marked eliminated only here and it is used
// to indicate that all associated lock and unlock nodes are marked
// for elimination.
newbox->set_eliminated();
// Replace old box node with new box for all users of the same object.
bool next_edge = true;
if (u->is_AbstractLock()) {
// Replace Box and mark eliminated all related locks and unlocks.
alock->set_non_esc_obj();
next_edge = false;
}
}
next_edge = false;
}
// Replace old box in monitor debug info.
// Loop over monitors
_igvn.hash_delete(u);
next_edge = false;
}
}
}
}
if (next_edge) i++;
}
}
//-----------------------mark_eliminated_locking_nodes-----------------------
if (EliminateNestedLocks) {
return;
// Only Lock node has JVMState needed here.
// Mark eliminated related nested locks and unlocks.
// Note: BoxLock node is marked eliminated only here
// and it is used to indicate that all associated lock
// and unlock nodes are marked for elimination.
if (u->is_AbstractLock()) {
alock = u->as_AbstractLock();
// Verify that this Box is referenced only by related locks.
// Mark all related locks and unlocks.
alock->set_nested();
}
}
}
}
return;
}
// Process locks for non escaping object
} // EliminateNestedLocks
// Look for all locks of this object and mark them and
// corresponding BoxLock nodes as eliminated.
if (o->is_AbstractLock() &&
alock = o->as_AbstractLock();
// Replace old box node with new eliminated box for all users
// of the same object and mark related locks as eliminated.
}
}
}
}
// eliminate the node without expanding it.
//
// eliminated. This should be investigated as a future enhancement.
//
if (!alock->is_eliminated()) {
return false;
}
#ifdef ASSERT
if (!alock->is_coarsened()) {
// Check that new "eliminated" BoxLock node is created.
}
#endif
while (p != NULL) {
p = p->caller();
}
}
#ifndef PRODUCT
if (PrintEliminateLocks) {
} else {
}
}
#endif
// There are 2 projections from the lock. The lock node will
// be deleted when its last use is subsumed below.
_fallthroughproj != NULL &&
// The input to a Lock is merged memory, so extract its RawMem input
// (unless the MergeMem has been optimized away.)
// Seach for MemBarAcquireLock node and delete it also.
// Delete FastLock node also if this Lock node is unique user
// (a loop peeling may clone a Lock node).
}
}
// Seach for MemBarReleaseLock node and delete it also.
}
return true;
}
//------------------------------expand_lock_node----------------------
// Make the merge point
if (UseOptoBiasInlining) {
/*
* See the full description in MacroAssembler::biased_locking_enter().
*
* if( (mark_word & biased_lock_mask) == biased_lock_pattern ) {
* // The object is biased.
* proto_node = klass->prototype_header;
* o_node = thread | proto_node;
* x_node = o_node ^ mark_word;
* if( (x_node & ~age_mask) == 0 ) { // Biased to the current thread ?
* // Done.
* } else {
* if( (x_node & biased_lock_mask) != 0 ) {
* // The klass's prototype header is no longer biased.
* cas(&mark_word, mark_word, proto_node)
* goto cas_lock;
* } else {
* // The klass's prototype header is still biased.
* if( (x_node & epoch_mask) != 0 ) { // Expired epoch?
* old = mark_word;
* new = o_node;
* } else {
* // Different thread or anonymous biased.
* old = mark_word & (epoch_mask | age_mask | biased_lock_mask);
* new = thread | old;
* }
* // Try to rebias.
* if( cas(&mark_word, old, new) == 0 ) {
* // Done.
* } else {
* goto slow_path; // Failed.
* }
* }
* }
* } else {
* // The object is not biased.
* cas_lock:
* if( FastLock(obj) == 0 ) {
* // Done.
* } else {
* slow_path:
* OptoRuntime::complete_monitor_locking_Java(obj);
* }
* }
*/
// create a Phi for the memory state
// First, check mark word for the biased lock pattern.
Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
// Get fast path - mark word has the biased lock pattern.
markOopDesc::biased_lock_pattern, true);
// fast_lock_region->in(1) is set to slow path.
// Now check that the lock is biased to the current thread and has
// the same epoch and bias as Klass::_prototype_header.
// Special-case a fresh allocation to avoid building nodes:
if (klass_node == NULL) {
klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
#ifdef _LP64
} else
#endif
}
Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type());
// Get slow path - mark word does NOT match the value.
(~markOopDesc::age_mask_in_place), 0);
// region->in(3) is set to fast path - the object is biased to the current thread.
// Mark word does NOT match the value (thread | Klass::_prototype_header).
// First, check biased pattern.
// Get fast path - _prototype_header has the same biased lock pattern.
markOopDesc::biased_lock_mask_in_place, 0, true);
// fast_lock_region->in(2) - the prototype header is no longer biased
// and we have to revoke the bias on this object.
// We are going to try to reset the mark of this object to the prototype
// value and fall through to the CAS-based locking scheme.
// Second, check epoch bits.
// Get slow path - mark word does NOT match epoch bits.
// The epoch of the current bias is not valid, attempt to rebias the object
// toward the current thread.
// rebiased_region->in(1) is set to fast path.
// The epoch of the current bias is still valid but we know
// nothing about the owner; it might be set or it might be clear.
// Try to acquire the bias of the object using an atomic operation.
// If this fails we will go in to the runtime to revoke the object's bias.
// Get slow path - Failed to CAS.
// region->in(4) is set to fast path - the object is rebiased to the current thread.
// Failed to CAS.
// Call CAS-based locking scheme (FastLock node).
// Get slow path - FastLock failed to lock the object.
// region->in(2) is set to fast path - the object is locked to the current thread.
// Reset lock's memory edge.
} else {
// create a Phi for the memory state
// Optimize test; set region slot 2
}
// Make slow path call
CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box );
// Slow path can only throw asynchronous exceptions, which are always
// de-opted. So the compiler thinks the slow-call can never throw an
// exception. If it DOES throw an exception we would need the debug
// info removed first (since if it throws there is no monitor).
// Capture slow path
// disconnect fall-through projection from call and create a new one
// hook up users of fall-through projection to region
// region inputs are now complete
}
//------------------------------expand_unlock_node----------------------
// No need for a null check on unlock
// Make the merge point
if (UseOptoBiasInlining) {
// Check for biased locking unlock case, which is a no-op.
// See the full description in MacroAssembler::biased_locking_exit().
// create a Phi for the memory state
Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
} else {
// create a Phi for the memory state
}
// Optimize test; set region slot 2
CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box );
// No exceptions for unlocking
// Capture slow path
// disconnect fall-through projection from call and create a new one
// hook up users of fall-through projection to region
// region inputs are now complete
}
//---------------------------eliminate_macro_nodes----------------------
// Eliminate scalar replaced allocations and associated locks.
void PhaseMacroExpand::eliminate_macro_nodes() {
if (C->macro_count() == 0)
return;
// First, attempt to eliminate locks
int cnt = C->macro_count();
for (int i=0; i < cnt; i++) {
Node *n = C->macro_node(i);
if (n->is_AbstractLock()) { // Lock and Unlock nodes
// Before elimination mark all associated (same box and obj)
// lock and unlock nodes.
}
}
bool progress = true;
while (progress) {
progress = false;
for (int i = C->macro_count(); i > 0; i--) {
bool success = false;
if (n->is_AbstractLock()) {
}
}
}
// Next, attempt to eliminate allocations
progress = true;
while (progress) {
progress = false;
for (int i = C->macro_count(); i > 0; i--) {
bool success = false;
switch (n->class_id()) {
case Node::Class_Allocate:
case Node::Class_AllocateArray:
break;
case Node::Class_Lock:
case Node::Class_Unlock:
break;
default:
n->Opcode() == Op_Opaque1 ||
}
}
}
}
//------------------------------expand_macro_nodes----------------------
// Returns true if a failure occurred.
bool PhaseMacroExpand::expand_macro_nodes() {
// Last attempt to eliminate macro nodes.
// Make sure expansion will not cause node limit to be exceeded.
// Worst case is a macro node gets expanded into about 50 nodes.
// Allow 50% more for optimization.
return true;
// Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
bool progress = true;
while (progress) {
progress = false;
for (int i = C->macro_count(); i > 0; i--) {
bool success = false;
if (n->Opcode() == Op_LoopLimit) {
// Remove it from macro list and put on IGVN worklist to optimize.
C->remove_macro_node(n);
success = true;
success = true;
}
}
}
// expand "macro" nodes
// nodes are removed from the macro list as they are processed
while (C->macro_count() > 0) {
int macro_count = C->macro_count();
// node is unreachable, so don't try to expand it
C->remove_macro_node(n);
continue;
}
switch (n->class_id()) {
case Node::Class_Allocate:
expand_allocate(n->as_Allocate());
break;
case Node::Class_AllocateArray:
break;
case Node::Class_Lock:
expand_lock_node(n->as_Lock());
break;
case Node::Class_Unlock:
expand_unlock_node(n->as_Unlock());
break;
default:
assert(false, "unknown node type in macro list");
}
if (C->failing()) return true;
}
_igvn.set_delay_transform(false);
if (C->failing()) return true;
return false;
}