/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "compiler/compileLog.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.hpp"
#include "opto/c2compiler.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/escape.hpp"
#include "opto/phaseX.hpp"
#include "opto/rootnode.hpp"
_collecting(true),
_verify(false),
_compile(C),
_node_map(C->comp_arena()) {
// Add unknown java object.
// Add ConP(#NULL) and ConN(#NULL) nodes.
if (UseCompressedOops) {
}
}
// are represented by ideal Macro nodes.
for( int i=0; i < cnt; i++ ) {
Node *n = C->macro_node(i);
if ( n->is_Allocate() )
return true;
if( n->is_Lock() ) {
return true;
}
}
return false;
}
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
// to create space for them in ConnectionGraph::_nodes[].
// Perform escape analysis
if (congraph->compute_escape()) {
// There are non escaping objects.
C->set_congraph(congraph);
}
// Cleanup.
}
// Worklists used by EA.
// 1. Populate Connection Graph (CG) with PointsTo nodes.
// Initialize worklist
}
// Create PointsTo nodes and add them to Connection Graph. Called
// only once per ideal node since ideal_nodes is Unique_Node list.
if (ptn->is_JavaObject()) {
if ((n->is_Allocate() || n->is_CallStaticJava()) &&
// Only allocations and java static calls results are interesting.
}
}
}
if (n->is_MergeMem()) {
// Collect all MergeMem nodes to add memory slices for
// scalar replaceable objects in split_unique_types().
} else if (OptimizePtrCompare && n->is_Cmp() &&
// Collect compare pointers nodes.
} else if (n->is_MemBarStoreStore()) {
// Collect all MemBarStoreStore nodes so that depending on the
// escape status of the associated Allocate node some of them
// may be eliminated.
#ifdef ASSERT
} else if(n->is_AddP()) {
// Collect address nodes for graph verification.
addp_worklist.append(n);
#endif
}
ideal_nodes.push(m);
}
}
if (non_escaped_worklist.length() == 0) {
_collecting = false;
return false; // Nothing to do.
}
// Add final simple edges to graph.
while(delayed_worklist.size() > 0) {
add_final_edges(n);
}
#ifdef ASSERT
if (VerifyConnectionGraph) {
// Verify that no new simple edges could be created and all
// local vars has edges.
_verify = true;
}
}
_verify = false;
}
#endif
// 2. Finish Graph construction by propagating references to all
// java objects through graph.
// All objects escaped or hit time or iterations limits.
_collecting = false;
return false;
}
// 3. Adjust scalar_replaceable state of nonescaping objects and push
// scalar replaceable allocations on alloc_worklist for processing
// in split_unique_types().
ptn->scalar_replaceable()) {
if (ptn->scalar_replaceable()) {
}
}
}
#ifdef ASSERT
if (VerifyConnectionGraph) {
// Verify that graph is complete - no new edges could be added or needed.
}
assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
null_obj->edge_count() == 0 &&
!null_obj->arraycopy_src() &&
#endif
_collecting = false;
} // TracePhase t3("connectionGraph")
// 4. Optimize ideal graph based on EA information.
if (has_non_escaping_obj) {
}
#ifndef PRODUCT
if (PrintEscapeAnalysis) {
}
#endif
#ifdef ASSERT
if (VerifyConnectionGraph) {
}
}
#endif
// 5. Separate memory graph for scalar replaceable allcations.
// Now use the escape information to create unique types for
// scalar replaceable objects.
if (C->failing()) return false;
#ifdef ASSERT
C->method()->print_short_name();
if(!EliminateAllocations) {
} else if(!has_scalar_replaceable_candidates) {
} else if(C->AliasLevel() < 3) {
}
#endif
}
return has_non_escaping_obj;
}
// Utility function for nodes that load an object
void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
#ifdef ASSERT
} else {
}
#endif
}
}
// Populate Connection Graph with PointsTo nodes and create simple
// connection graph edges.
return; // No need to redefine PointsTo node during first iteration.
if (n->is_Call()) {
// Arguments to allocation and locking don't escape.
if (n->is_AbstractLock()) {
// Put Lock and Unlock nodes on IGVN worklist to process them during
// first IGVN optimization when escape information is still available.
} else if (n->is_Allocate()) {
add_call_node(n->as_Call());
} else {
if (n->is_CallStaticJava()) {
return; // Skip uncommon traps
}
// Don't mark as processed since call's arguments have to be processed.
delayed_worklist->push(n);
// Check if a call returns an object.
if (n->as_Call()->returns_pointer() &&
add_call_node(n->as_Call());
}
}
return;
}
// Put this check here to process call arguments since some call nodes
// point to phantom_obj.
return; // Skip predefined nodes.
switch (opcode) {
case Op_AddP: {
// Field nodes are created for all field types. They are used in
// adjust_scalar_replaceable_state() and split_unique_types().
// Note, non-oop fields will have only base edges in Connection
// Graph because such fields are not used for oop loads and stores.
} else {
}
break;
}
case Op_CastX2P: {
break;
}
case Op_CastPP:
case Op_CheckCastPP:
case Op_EncodeP:
case Op_DecodeN: {
break;
}
case Op_CMoveP: {
// Do not add edges during first iteration because some could be
// not defined yet.
delayed_worklist->push(n);
break;
}
case Op_ConP:
case Op_ConN: {
// assume all oop constants globally escape except for null
} else {
}
add_java_object(n, es);
break;
}
case Op_CreateEx: {
// assume that all exception objects globally escape
break;
}
case Op_LoadKlass:
case Op_LoadNKlass: {
// Unknown class is loaded
break;
}
case Op_LoadP:
case Op_LoadN:
case Op_LoadPLocked: {
break;
}
case Op_Parm: {
break;
}
case Op_PartialSubtypeCheck: {
// Produces Null or notNull and is used in only in CmpP so
// phantom_obj could be used.
break;
}
case Op_Phi: {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
// Do not add edges during first iteration because some could be
// not defined yet.
delayed_worklist->push(n);
}
break;
}
case Op_Proj: {
// we are only interested in the oop result projection from a call
n->in(0), delayed_worklist);
}
break;
}
case Op_Rethrow: // Exception object escapes
case Op_Return: {
// Treat Return value as LocalVar with GlobalEscape escape state.
}
break;
}
case Op_GetAndSetP:
case Op_GetAndSetN: {
// fallthrough
}
case Op_StoreP:
case Op_StoreN:
case Op_StorePConditional:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN: {
break; // skip dead nodes
}
if (adr_type->isa_oopptr() ||
#ifdef ASSERT
// Verify a raw address for a store captured by Initialize node.
}
#endif
} else {
// Ignore copy the displaced header to the BoxNode (OSR compilation).
if (adr->is_BoxLock())
break;
// Stored value escapes in unsafe access.
// Pointer stores in G1 barriers looks like unsafe access.
// Ignore such stores to be able scalar replace non-escaping
// allocations.
PtrQueue::byte_offset_of_buf())) {
break; // G1 pre barier previous oop value store.
}
PtrQueue::byte_offset_of_buf())) {
break; // G1 post barier card address store.
}
}
}
}
break;
}
#ifdef ASSERT
n->dump(1);
assert(false, "not unsafe or G1 barrier raw StoreP");
#endif
}
break;
}
case Op_AryEq:
case Op_StrComp:
case Op_StrEquals:
case Op_StrIndexOf: {
break;
}
case Op_ThreadLocal: {
break;
}
default:
; // Do nothing for nodes not related to EA.
}
return;
}
#ifdef ASSERT
/* Should not be called for not pointer type. */ \
n->dump(1); \
break;
#else
break;
#endif
// Add final simple edges to graph.
#ifdef ASSERT
return; // This method does not change graph for JavaObject.
#endif
if (n->is_Call()) {
process_call_arguments(n->as_Call());
return;
}
"node should be registered already");
switch (opcode) {
case Op_AddP: {
break;
}
case Op_CastPP:
case Op_CheckCastPP:
case Op_EncodeP:
case Op_DecodeN: {
break;
}
case Op_CMoveP: {
continue; // ignore NULL
continue; // ignore top or inputs which go back this node
}
break;
}
case Op_LoadP:
case Op_LoadN:
case Op_LoadPLocked: {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
break;
}
ELSE_FAIL("Op_LoadP");
}
case Op_Phi: {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
continue; // ignore NULL
continue; // ignore top or inputs which go back this node
}
break;
}
ELSE_FAIL("Op_Phi");
}
case Op_Proj: {
// we are only interested in the oop result projection from a call
break;
}
ELSE_FAIL("Op_Proj");
}
case Op_Rethrow: // Exception object escapes
case Op_Return: {
// Treat Return value as LocalVar with GlobalEscape escape state.
break;
}
ELSE_FAIL("Op_Return");
}
case Op_StoreP:
case Op_StoreN:
case Op_StorePConditional:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
case Op_GetAndSetP:
case Op_GetAndSetN: {
#ifdef ASSERT
n->dump(1);
break;
}
#endif
}
if (adr_type->isa_oopptr() ||
// Point Address to Value
break;
// Stored value escapes in unsafe access.
// Add edge to object for unsafe access with offset.
}
break;
}
ELSE_FAIL("Op_StoreP");
}
case Op_AryEq:
case Op_StrComp:
case Op_StrEquals:
case Op_StrIndexOf: {
// char[] arrays passed to string intrinsic do not escape but
// they are not scalar replaceable. Adjust escape state for them.
// Start from in(2) edge since in(1) is memory edge.
}
}
}
break;
}
default: {
// This method should be called only for EA specific nodes which may
// miss some edges when they were created.
#ifdef ASSERT
n->dump(1);
#endif
guarantee(false, "unknown node");
}
}
return;
}
if (call->is_Allocate()) {
bool scalar_replaceable = true;
if (call->is_AllocateArray()) {
} else {
// Not scalar replaceable if the length is not constant or too big.
scalar_replaceable = false;
}
}
} else { // Allocate instance
}
}
ptn->set_scalar_replaceable(false);
}
} else if (call->is_CallStaticJava()) {
// Call nodes could be different types:
//
// 1. CallDynamicJavaNode (what happened during call is unknown):
//
// - mapped to GlobalEscape JavaObject node if oop is returned;
//
// - all oop arguments are escaping globally;
//
// 2. CallStaticJavaNode (execute bytecode analysis if possible):
//
// - the same as CallDynamicJavaNode if can't do bytecode analysis;
//
// - mapped to GlobalEscape JavaObject node if unknown oop is returned;
// - mapped to NoEscape JavaObject node if non-escaping object allocated
// during call is returned;
// - mapped to ArgEscape LocalVar node pointed to object arguments
// which are returned and does not escape during call;
//
// - oop arguments escaping status is defined by bytecode analysis;
//
// For a static call, we know exactly what method is being called.
// Use bytecode estimator to record whether the call's return value escapes.
// Returns a newly allocated unescaped object.
} else {
if (call_analyzer->is_return_allocated()) {
// Returns a newly allocated unescaped object, simply
// update dependency information.
// Mark it as NoEscape so that objects referenced by
// it's fields will be marked as NoEscape at least.
} else {
// Determine whether any arguments are returned.
bool ret_arg = false;
ret_arg = true;
break;
}
}
if (ret_arg) {
} else {
// Returns unknown object.
}
}
}
} else {
// An other type of call, assume the worst case:
// returned value is unknown and globally escapes.
}
}
bool is_arraycopy = false;
#ifdef ASSERT
case Op_Allocate:
case Op_AllocateArray:
case Op_Lock:
case Op_Unlock:
assert(false, "should be done already");
break;
#endif
case Op_CallLeafNoFP:
// fall through
case Op_CallLeaf: {
// Stub calls, objects do not escape but they are not scale replaceable.
// Adjust escape state for outgoing arguments.
bool src_has_oops = false;
continue;
//
// The inline_native_clone() case when the arraycopy stub is called
// after the allocation before Initialize and CheckCastPP nodes.
// Or normal arraycopy for object arrays case.
//
// Set AddP's base (Allocate) as not scalar replaceable since
// pointer to the base (with offset) is passed as argument.
//
}
}
//
// src or dst could be j.l.Object when other is basic type array:
//
// arraycopy(char[],0,Object*,0,size);
// arraycopy(Object*,0,char[],0,size);
//
// Don't add edges in such cases.
//
#ifdef ASSERT
if (!(is_arraycopy ||
))) {
}
#endif
// Always process arraycopy's destination object since
// we need to add all possible edges to references in
// source object.
continue;
}
if (arg_is_arraycopy_dest) {
}
// Special arraycopy edge:
// A destination object's field can't have the source object
// as base since objects escape states are not related.
// Only escape state of destination object's fields affects
// escape state of fields in source object.
}
}
}
}
break;
}
case Op_CallStaticJava: {
// For a static call, we know exactly what method is being called.
// Use bytecode estimator to record the call's escape affects
#ifdef ASSERT
#endif
// fall-through if not a Java method or no analyzer information
if (call_analyzer != NULL) {
call_analyzer->is_arg_returned(k)) {
// The call returns arguments.
}
}
if (!call_analyzer->is_arg_stack(k)) {
// The argument global escapes
} else {
if (!call_analyzer->is_arg_local(k)) {
// The argument itself doesn't escape, but any fields might
}
}
}
}
// The call returns arguments.
if (!call_analyzer->is_return_local()) {
// Returns also unknown object.
}
}
break;
}
}
default: {
// Fall-through here if not a Java method or no analyzer information
// or some other type of call, assume the worst case: all arguments
// globally escape.
}
}
}
}
}
}
// Finish Graph construction.
// Normally only 1-3 passes needed to build Connection Graph depending
// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
// Set limit to 20 to catch situation when something did go wrong and
// bailout Escape Analysis.
// Also limit build time to 30 sec (60 in debug VM).
#ifdef ASSERT
#else
#define CG_BUILD_TIME_LIMIT 30.0
#endif
// Propagate GlobalEscape and ArgEscape escape states and check that
// we still have non-escaping objects. The method pushs on _worklist
// Field nodes which reference phantom_object.
return false; // Nothing to do.
}
// Now propagate references to all JavaObject nodes.
int iterations = 0;
do {
while ((new_edges > 0) &&
(iterations++ < CG_BUILD_ITER_LIMIT) &&
new_edges = 0;
// Propagate references to phantom_object for nodes pushed on _worklist
// by find_non_escaped_objects() and find_field_value().
}
if (new_edges > 0) {
// Update escape states on each iteration if graph was updated.
return false; // Nothing to do.
}
}
}
if ((iterations < CG_BUILD_ITER_LIMIT) &&
// Find fields which have unknown value.
if (field->edge_count() == 0) {
// This code may added new edges to phantom_object.
// Need an other cycle to propagate references to phantom_object.
}
}
} else {
new_edges = 0; // Bailout
}
} while (new_edges > 0);
// Bailout if passed limits.
if ((iterations >= CG_BUILD_ITER_LIMIT) ||
}
assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
// Possible infinite build_connection_graph loop,
// bailout (no changes to ideal graph were made).
return false;
}
#ifdef ASSERT
if (Verbose && PrintEscapeAnalysis) {
}
#endif
// Find fields initialized by NULL for non-escaping Allocations.
// Adding references to NULL object does not change escape states
// since it does not escape. Also no fields are added to NULL object.
add_java_object_edges(null_obj, false);
}
}
if (n->is_Allocate()) {
// The object allocated by this Allocate node will never be
// seen by an other thread. Mark it so that when it is
// expanded no MemBarStoreStore is added.
}
}
return true; // Finished graph construction.
}
// Propagate GlobalEscape and ArgEscape escape states to all nodes
// and check that we still have non-escaping java objects.
// First, put all nodes with GlobalEscape and ArgEscape states on worklist.
}
}
// Set escape states to referenced nodes (edges list).
while (escape_worklist.length() > 0) {
// GlobalEscape or ArgEscape state of field means it has unknown value.
// New edge was added
}
}
PointsToNode* e = i.get();
if (e->is_Arraycopy()) {
// Propagate only fields escape state through arraycopy edge.
if (e->fields_escape_state() < field_es) {
escape_worklist.push(e);
}
// fields_escape_state is also set to 'es' if it is less than 'es'.
if (e->escape_state() < es) {
set_escape_state(e, es);
escape_worklist.push(e);
}
} else {
// Propagate field escape state.
bool es_changed = false;
if (e->fields_escape_state() < field_es) {
es_changed = true;
}
if ((e->escape_state() < field_es) &&
// Change escape state of referenced fileds.
set_escape_state(e, field_es);
es_changed = true;;
} else if (e->escape_state() < es) {
set_escape_state(e, es);
es_changed = true;;
}
if (es_changed) {
escape_worklist.push(e);
}
}
}
}
// Remove escaped objects from non_escaped list.
}
// Find fields in non-escaped allocations which have unknown value.
}
}
return (non_escaped_worklist.length() > 0);
}
// Add all references to JavaObject node by walking over all uses.
int new_edges = 0;
if (populate_worklist) {
// Populate _worklist by uses of jobj's uses.
if (use->is_Arraycopy())
continue;
// Put on worklist all field's uses (loads) and
// related field nodes (same base and offset).
}
}
}
// Add reference from jobj to field and from field to jobj (field's base).
new_edges++;
}
continue;
}
if (use->is_Arraycopy()) {
continue;
// Added edge from Arraycopy node to arraycopy's source java object
new_edges++;
}
// and stop here.
continue;
}
continue; // No new edge added, there was such edge already.
new_edges++;
if (use->is_LocalVar()) {
if (use->arraycopy_dst()) {
PointsToNode* e = i.get();
if (e->is_Arraycopy()) {
continue;
// Add edge from arraycopy's destination java object to Arraycopy node.
new_edges++;
}
}
}
}
} else {
// Added new edge to stored in field values.
// Put on worklist all field's uses (loads) and
// related field nodes (same base and offset).
}
}
return new_edges;
}
// Put on worklist all related field nodes.
// Loop over all bases of this field and push on worklist Field nodes
// with the same offset and base (since they may reference the same field).
// Check if the base was source object of arraycopy and go over arraycopy's
// destination objects since values stored to a field of source object are
// accessable by uses (loads) of fields of destination objects.
if (base->arraycopy_src()) {
if (arycp->is_Arraycopy()) {
// Look for the same arracopy reference.
}
}
}
}
}
}
}
// Put on worklist all related field nodes.
if (base->is_LocalVar()) {
PointsToNode* f = j.get();
f = PointsToNode::get_use_node(f);
continue;
add_to_worklist(f);
}
}
}
} else {
if (// Skip phantom_object since it is only used to indicate that
// this field's content globally escapes.
(base != phantom_obj) &&
// NULL object node does not have fields.
PointsToNode* f = i.get();
// Skip arraycopy edge since store to destination object field
// does not update value in source object field.
if (f->is_Arraycopy()) {
continue;
}
continue;
add_to_worklist(f);
}
}
}
}
}
// Find fields which have unknown value.
// Escaped fields should have init value already.
int new_edges = 0;
if (base->is_JavaObject()) {
// Skip Allocate's fields which will be processed later.
return 0;
}
}
// New edge was added
new_edges++;
}
return new_edges;
}
// Find fields initializing values for allocations.
int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) {
int new_edges = 0;
if (init_val == phantom_obj) {
// Do nothing for Allocate nodes since its fields values are "known".
if (alloc->is_Allocate())
return 0;
#ifdef ASSERT
}
#endif
// Non-escaped allocation returned from Java or runtime call have
// unknown values in fields.
// New edge was added
new_edges++;
}
}
}
return new_edges;
}
// Do nothing for Call nodes since its fields values are unknown.
if (!alloc->is_Allocate())
return 0;
bool visited_bottom_offset = false;
// Check if an oop field's initializing value is recorded and add
// a corresponding NULL if field's value if it is not recorded.
// Connection Graph does not record a default initialization by NULL
// captured by Initialize node.
//
continue; // Not oop field
if (!visited_bottom_offset) {
// OffsetBot is used to reference array's element,
// always add reference to NULL to all Field nodes since we don't
// known which element is referenced.
// New edge was added
new_edges++;
visited_bottom_offset = true;
}
}
} else {
// Check only oop fields.
if (adr_type->isa_rawptr()) {
#ifdef ASSERT
// Raw pointers are used for initializing stores so skip it
// since it should be recorded already
#endif
continue;
}
// StoreP::memory_type() == T_ADDRESS
// Make sure initializing store has the same type as this AddP.
// This AddP may reference non existing field because it is on a
// dead branch of bimorphic call which is not eliminated yet.
#ifdef ASSERT
if (VerifyConnectionGraph) {
// Verify that AddP already points to all objects the value points to.
if (val->is_JavaObject()) {
missed_obj = val;
}
} else {
}
if (obj->is_JavaObject()) {
missed_obj = obj;
break;
}
}
}
}
if (missed_obj != NULL) {
missed_obj->dump();
}
}
#endif
} else {
// There could be initializing stores which follow allocation.
// For example, a volatile field store is not collected
// by Initialize node.
//
// Need to check for dependent loads to separate such stores from
// stores which follow loads. For now, add initial value NULL so
// that compare pointers optimization works correctly.
}
}
// A field's initializing value was not recorded. Add NULL.
// New edge was added
new_edges++;
}
}
}
}
}
return new_edges;
}
// Adjust scalar_replaceable state after Connection Graph is built.
// Search for non-escaping objects which are not scalar replaceable
// and mark them to propagate the state to referenced objects.
// 1. An object is not scalar replaceable if the field into which it is
// stored has unknown offset (stored into unknown element of an array).
//
jobj->set_scalar_replaceable(false);
return;
}
}
// 2. An object is not scalar replaceable if it is merged with other objects.
// Mark all objects.
jobj->set_scalar_replaceable(false);
ptn->set_scalar_replaceable(false);
}
}
if (!jobj->scalar_replaceable()) {
return;
}
}
// Non-escaping object node should point only to field nodes.
// 3. An object is not scalar replaceable if it has a field with unknown
// offset (array's element is accessed in loop).
jobj->set_scalar_replaceable(false);
return;
}
// 4. Currently an object is not scalar replaceable if a LoadStore node
// access its field since the field value is unknown after it.
//
if (n->fast_out(i)->is_LoadStore()) {
jobj->set_scalar_replaceable(false);
return;
}
}
// 5. Or the address may point to more then one object. This may produce
// the false positive result (set not scalar replaceable)
// since the flow-insensitive escape analysis can't separate
// the case when stores overwrite the field's value from the case
// when stores happened on different control branches.
//
// Note: it will disable scalar replacement in some cases:
//
// Point p[] = new Point[1];
// p[0] = new Point(); // Will be not scalar replaced
//
// but it will save us from incorrect optimizations in next cases:
//
// Point p[] = new Point[1];
// if ( x ) p[0] = new Point(); // Will be not scalar replaced
//
// Don't take into account LocalVar nodes which
// may point to only one object which should be also
// this field's base by now.
// Mark all bases.
jobj->set_scalar_replaceable(false);
base->set_scalar_replaceable(false);
}
}
}
}
}
#ifdef ASSERT
// Verify that graph is complete - no new edges could be added.
int new_edges = 0;
}
// Verify that escape state is final.
(non_escaped_length == length) &&
// Verify fields information.
// Verify that field has all bases
if (ptn->is_JavaObject()) {
} else {
PointsToNode* e = i.get();
if (e->is_JavaObject()) {
}
}
}
// Verify that all fields have initializing values.
if (field->edge_count() == 0) {
}
}
}
}
}
}
}
}
}
#endif
// Optimize ideal graph.
if (EliminateLocks) {
// Mark locks before changing ideal graph.
for( int i=0; i < cnt; i++ ) {
Node *n = C->macro_node(i);
if (n->is_AbstractLock()) { // Lock and Unlock nodes
if (!alock->is_non_esc_obj()) {
// The lock could be marked eliminated by lock coarsening
// code during first IGVN before EA. Replace coarsened flag
alock->set_non_esc_obj();
}
}
}
}
}
if (OptimizePtrCompare) {
// Add ConI(#CC_GT) and ConI(#CC_EQ).
// Optimize objects compare.
while (ptr_cmp_worklist.length() != 0) {
#ifndef PRODUCT
if (PrintOptimizePtrCompare) {
tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
if (Verbose) {
n->dump(1);
}
}
#endif
}
}
// cleanup
}
// For MemBarStoreStore nodes added in library_call.cpp, check
// escape status of associated AllocateNode and optimize out
// MemBarStoreStore node if the allocated object never escapes.
while (storestore_worklist.length() != 0) {
if (not_global_escape(alloc)) {
}
}
}
// Optimize objects compare.
// Check simple cases first.
// Comparing the same not escaping object.
return _pcmp_eq;
}
// Comparing not escaping allocation.
return _pcmp_neq; // This includes nullness check.
}
}
}
// Comparing not escaping allocation.
return _pcmp_neq; // This includes nullness check.
}
}
}
// Klass or String constants compare. Need to be careful with
// compressed pointers - compare types of ConN and ConP instead of nodes.
return _pcmp_eq;
} else {
return _pcmp_neq;
}
}
return NULL; // Sets are not disjoint
}
// Sets are disjoint.
if (set1_has_unknown_ptr && set2_has_null_ptr ||
// Check nullness of unknown object.
return NULL;
}
// Disjointness by itself is not sufficient since
// alias analysis is not complete for escaped objects.
// Disjoint sets are definitely unrelated only when
// at least one set has only not escaping allocations.
if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
if (ptn1->non_escaping_allocation()) {
return _pcmp_neq;
}
}
if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
if (ptn2->non_escaping_allocation()) {
return _pcmp_neq;
}
}
return NULL;
}
// Connection Graph constuction functions.
return;
}
}
return;
}
}
return;
}
bool unsafe = false;
if (unsafe) {
}
}
return;
}
// Add edge from arraycopy node to source object.
src->set_arraycopy_src();
// Add edge from destination object to arraycopy node.
dst->set_arraycopy_dst();
}
// Check only oop fields.
if (!adr_type->isa_aryptr() ||
// OffsetBot is used to reference array's element. Ignore first AddP.
}
}
if (adr_type->isa_instptr()) {
} else {
// Check for unsafe oop field access
(*unsafe) = true;
break;
}
}
}
} else if (adr_type->isa_aryptr()) {
// Ignore array length load.
// Ignore first AddP.
} else {
}
// Allocation initialization, ThreadLocal field access, unsafe access
break;
}
}
}
}
}
// Returns unique pointed java object or NULL.
// If the node was created after the escape computation we can't answer.
if (idx >= nodes_size()) {
return NULL;
}
if (ptn->is_JavaObject()) {
return ptn->as_JavaObject();
}
// Check all java objects it points to.
PointsToNode* e = i.get();
if (e->is_JavaObject()) {
jobj = e->as_JavaObject();
} else if (jobj != e) {
return NULL;
}
}
}
return jobj;
}
// Return true if this node points only to non-escaping allocations.
if (is_JavaObject()) {
Node* n = ideal_node();
if (n->is_Allocate() || n->is_CallStaticJava()) {
} else {
return false;
}
}
// Check all java objects it points to.
PointsToNode* e = i.get();
if (e->is_JavaObject()) {
Node* n = e->ideal_node();
!(n->is_Allocate() || n->is_CallStaticJava())) {
return false;
}
}
}
return true;
}
// Return true if we know the node does not escape globally.
// If the node was created after the escape computation we can't answer.
if (idx >= nodes_size()) {
return false;
}
// If we have already computed a value, return it.
return false;
if (ptn->is_JavaObject()) {
return true; // (es < PointsToNode::GlobalEscape);
}
// Check all java objects it points to.
return false;
}
return true;
}
// Helper functions
// Return true if this node points to specified node or nodes it points to.
if (is_JavaObject()) {
return (this == ptn);
}
return true;
}
return false;
}
// Return true if one node points to an other.
if (this == ptn) {
return true;
} else if (ptn->is_JavaObject()) {
} else if (this->is_JavaObject()) {
}
for (int j = 0; j < ptn_count; j++) {
return true;
}
}
return false;
}
#ifdef ASSERT
// Return true if bases point to this java object.
return true;
}
return false;
}
#endif
// We are computing a raw address for a store captured by an Initialize
// compute an appropriate address type. AddP cases #3 and #5 (see below).
"offset must be a constant or it is initialization of array");
return offs;
}
}
//
// AddP cases for Base and Address inputs:
// case #1. Direct object's field reference:
// Allocate
// |
// Proj #5 ( oop result )
// |
// CheckCastPP (cast to instance type)
// | |
// AddP ( base == address )
//
// case #2. Indirect object's field reference:
// Phi
// |
// CastPP (cast to instance type)
// | |
// AddP ( base == address )
//
// case #3. Raw object's field reference for Initialize node:
// Allocate
// |
// Proj #5 ( oop result )
// top |
// \ |
// AddP ( base == top )
//
// case #4. Array's element reference:
// {CheckCastPP | CastPP}
// | | |
// | AddP ( array's element offset )
// | |
// AddP ( array's offset )
//
// case #5. Raw object's field reference for arraycopy stub call:
// The inline_native_clone() case when the arraycopy stub is called
// after the allocation before Initialize and CheckCastPP nodes.
// Allocate
// |
// Proj #5 ( oop result )
// | |
// AddP ( base == address )
//
// case #6. Constant Pool, ThreadLocal, CastX2P or
// Raw object's field reference:
// {ConP, ThreadLocal, CastX2P, raw Load}
// top |
// \ |
// AddP ( base == top )
//
// case #7. Klass's field reference.
// LoadKlass
// | |
// AddP ( base == address )
//
// case #8. narrow Klass's field reference.
// LoadNKlass
// |
// DecodeN
// | |
// AddP ( base == address )
//
// Case #6 (unsafe access) may have several chained AddP nodes.
}
}
return base;
}
//
// Find array's offset to push it on worklist first and
// as result process an array's element offset first (pushed second)
// to avoid CastPP for the array's offset.
// Otherwise the inserted CastPP (LocalVar) will point to what
// the AddP (Field) points to. Which would be wrong since
// the algorithm expects the CastPP has the same point as
// as AddP's base CheckCastPP (LocalVar).
//
// ArrayAllocation
// |
// CheckCastPP
// |
// memProj (from ArrayAllocation CheckCastPP)
// | ||
// | || Int (element index)
// | || | ConI (log(element size))
// | || | /
// | || LShift
// | || /
// | AddP (array's element offset)
// | |
// | | ConI (array's offset: #12(32-bits) or #24(64-bits))
// | / /
// AddP (array's offset)
// |
//
return addp2;
}
return NULL;
}
//
// Adjust the type and inputs of an AddP which computes the
// address of a field of an instance
//
if (t == NULL) {
// We are computing a raw address for a store captured by an Initialize
// compute an appropriate address type (cases #3 and #5).
assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
}
"old type must be non-instance or match new type");
// The type 't' could be subclass of 'base_t'.
// As result t->offset() could be large then base_t's size and it will
// cause the failure in add_offset() with narrow oops since TypeOopPtr()
// constructor verifies correctness of the offset.
//
// It could happened on subclass's branch (from the type profiling
// inlining) which was not eliminated during parsing since the exactness
// of the allocation type was not propagated to the subclass type check.
//
// Or the type 't' could be not related to 'base_t' at all.
// It could happened when CHA type is different from MDO type on a dead path
// (for example, from instanceof check) which is not collapsed during parsing.
//
// Do nothing for such AddP node and don't process its users since
// this code branch will go away.
//
if (!t->is_known_instance() &&
return false; // bail out
}
// Do NOT remove the next line: ensure a new alias index is allocated
// for the instance type. Note: C++ will not remove it since the call
// has side effect.
// record the allocation in the node map
// Set addp's Base and Address to 'base'.
// Skip AddP cases #3 and #5.
} else {
} else {
// AddP case #4 (adr is array's element offset AddP node)
#ifdef ASSERT
#endif
}
}
}
// Put on IGVN worklist since at least addp's type was changed above.
return true;
}
//
// Create a new version of orig_phi if necessary. Returns either the newly
// created phi or an existing phi. Sets create_new to indicate whether a new
// phi was created. Cache the last newly created phi in the node map.
//
PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) {
new_created = false;
// nothing to do if orig_phi is bottom memory or matches alias_idx
if (phi_alias_idx == alias_idx) {
return orig_phi;
}
// Have we recently created a Phi for this alias index?
return result;
}
// Previous check may fail when the same wide memory Phi was split into Phis
// for different memory slices. Search all Phis for this region.
}
}
}
if (C->do_escape_analysis() == true && !C->failing()) {
// Retry compilation without escape analysis.
// If this is the first failure, the sentinel string will "stick"
// to the Compile object, and the C2Compiler will see it and retry.
}
return NULL;
}
new_created = true;
return result;
}
//
// Return a new version of Memory Phi "orig_phi" with the inputs having the
// specified alias index.
//
PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) {
bool new_phi_created;
if (!new_phi_created) {
return result;
}
bool finished = false;
while(!finished) {
if (new_phi_created) {
// found an phi for which we created a new split, push current one on worklist and begin
// processing new one
idx = 1;
continue;
} else {
}
}
if (C->failing()) {
return NULL;
}
}
#ifdef ASSERT
// verify that the new Phi has an input for each input of the original
#endif
// Check if all new phi's inputs have specified alias index.
// Otherwise use old phi.
}
// we have finished processing a Phi, see if there are any more to do
if (!finished) {
}
}
return result;
}
//
// The next methods are derived from methods in MemNode.
//
Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
// TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
// means an array I have not precisely typed yet. Do not do any
// alias stuff with it any time soon.
// Update input if it is progress over what we have now
}
return mem;
}
//
// Move memory users to their memory slices.
//
// Move users first
if (use->is_MergeMem()) {
continue; // Nothing to do
}
// Replace previous general reference to mem node.
--imax;
--i;
// Don't move related membars.
continue;
}
alias_idx == general_idx) {
continue; // Nothing to do
}
// Move to general memory slice.
--i;
#ifdef ASSERT
// Don't move related cardmark.
continue;
}
// Memory nodes should have new memory input.
"Following memory nodes should have new memory input or be on the same memory slice");
// Phi nodes should be split and moved already.
} else {
assert(false, "should not be here");
#endif
}
}
}
//
// Search memory chain of "mem" to find a MemNode whose address
// is the specified alias index.
//
Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) {
return orig_mem;
break; // hit one of our sentinels
break; // Dead
break; // Found
break; // Do not skip store to general memory slice.
}
}
if (!is_instance)
continue; // don't search further for non-instance types
// skip over a call which does not affect this memory slice
break; // hit one of our sentinels
}
} else if (proj_in->is_Initialize()) {
// Stop if this is the initialization for the object instance which
// which contains this memory slice, otherwise skip over it.
}
}
} else if (result->is_MergeMem()) {
// Didn't find instance memory, search through general slice recursively.
if (C->failing()) {
return NULL;
}
}
} else {
break;
}
} else if (result->is_ClearArray()) {
// Can not bypass initialization of the instance
// we are looking for.
break;
}
// Otherwise skip it (the call updated 'result' value).
break;
}
}
}
if (!is_instance) {
// Push all non-instance Phis on the orig_phis worklist to update inputs
// during Phase 4 if needed.
} else if (C->get_alias_index(t) != alias_idx) {
// Create a new Phi with the specified alias index type.
}
}
// the result is either MemNode, PhiNode, InitializeNode.
return result;
}
//
// Convert the types of unescaped object to instance types where possible,
// propagate the new type information through the graph, and update memory
// edges and MergeMem inputs to reflect the new type.
//
// We start with allocations (and calls which may be allocations) on alloc_worklist.
// The processing is done in 4 phases:
//
// Phase 1: Process possible allocations from alloc_worklist. Create instance
// types for the CheckCastPP for allocations where possible.
// Propagate the the new types through users as follows:
// casts and Phi: push users on alloc_worklist
// AddP: cast Base and Address inputs to the instance type
// push any AddP users on alloc_worklist and push any memnode
// users onto memnode_worklist.
// Phase 2: Process MemNode's from memnode_worklist. compute new address type and
// search the Memory chain for a store with the appropriate type
// address type. If a Phi is found, create a new version with
// the appropriate memory slices from each of the Phi inputs.
// For stores, process the users as follows:
// MemNode: push on memnode_worklist
// MergeMem: push on mergemem_worklist
// Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice
// moving the first node encountered of each instance type to the
// the input corresponding to its alias index.
// appropriate memory slice.
// Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes.
//
// In the following example, the CheckCastPP nodes are the cast of allocation
// results and the allocation of node 29 is unescaped and eligible to be an
// instance type.
//
// We start with:
//
// 7 Parm #memory
// 10 ConI "12"
// 19 CheckCastPP "Foo"
// 20 AddP _ 19 19 10 Foo+12 alias_index=4
// 29 CheckCastPP "Foo"
// 30 AddP _ 29 29 10 Foo+12 alias_index=4
//
// 40 StoreP 25 7 20 ... alias_index=4
// 50 StoreP 35 40 30 ... alias_index=4
// 60 StoreP 45 50 20 ... alias_index=4
// 70 LoadP _ 60 30 ... alias_index=4
// 80 Phi 75 50 60 Memory alias_index=4
// 90 LoadP _ 80 30 ... alias_index=4
// 100 LoadP _ 80 20 ... alias_index=4
//
//
// Phase 1 creates an instance type for node 29 assigning it an instance id of 24
// and creating a new alias index for node 30. This gives:
//
// 7 Parm #memory
// 10 ConI "12"
// 19 CheckCastPP "Foo"
// 20 AddP _ 19 19 10 Foo+12 alias_index=4
// 29 CheckCastPP "Foo" iid=24
// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
//
// 40 StoreP 25 7 20 ... alias_index=4
// 50 StoreP 35 40 30 ... alias_index=6
// 60 StoreP 45 50 20 ... alias_index=4
// 70 LoadP _ 60 30 ... alias_index=6
// 80 Phi 75 50 60 Memory alias_index=4
// 90 LoadP _ 80 30 ... alias_index=6
// 100 LoadP _ 80 20 ... alias_index=4
//
// In phase 2, new memory inputs are computed for the loads and stores,
// And a new version of the phi is created. In phase 4, the inputs to
// node 80 are updated and then the memory nodes are updated with the
// values computed in phase 2. This results in:
//
// 7 Parm #memory
// 10 ConI "12"
// 19 CheckCastPP "Foo"
// 20 AddP _ 19 19 10 Foo+12 alias_index=4
// 29 CheckCastPP "Foo" iid=24
// 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24
//
// 40 StoreP 25 7 20 ... alias_index=4
// 50 StoreP 35 7 30 ... alias_index=6
// 60 StoreP 45 40 20 ... alias_index=4
// 70 LoadP _ 50 30 ... alias_index=6
// 80 Phi 75 40 60 Memory alias_index=4
// 120 Phi 75 50 50 Memory alias_index=6
// 90 LoadP _ 120 30 ... alias_index=6
// 100 LoadP _ 80 20 ... alias_index=4
//
// Phase 1: Process possible allocations from alloc_worklist.
// Create instance types for the CheckCastPP for allocations where possible.
//
// (Note: don't forget to change the order of the second AddP node on
// the alloc_worklist if the order of the worklist processing is changed,
// see the comment in find_second_addp().)
//
while (alloc_worklist.length() != 0) {
if (n->is_Call()) {
// copy escape information to call node
// We have an allocation or call which returns a Java object,
// see if it is unescaped.
continue;
// Find CheckCastPP for the allocate or for the return value of a call
n = alloc->result_cast();
if (n == NULL) { // No uses except Initialize node
if (alloc->is_Allocate()) {
// Set the scalar_replaceable flag for allocation
// so it could be eliminated if it has no uses.
}
continue;
}
if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
continue;
}
// The inline code for Object.clone() casts the allocation result to
// java.lang.Object and then to the actual type of the allocated
// object. Detect this case and use the second cast.
// Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
// the allocation result is cast to java.lang.Object and then
// to the actual Array type.
&& (alloc->is_AllocateArray() ||
if (use->is_CheckCastPP()) {
break;
}
}
n = cast2;
} else {
// Non-scalar replaceable if the allocation type is unknown statically
// (reflection allocation), the object can't be restored during
// deoptimization without precise type.
continue;
}
}
if (alloc->is_Allocate()) {
// Set the scalar_replaceable flag for allocation
// so it could be eliminated.
}
// in order for an object to be scalar-replaceable, it must be:
// - a direct allocation (not a call returning an object)
// - non-escaping
// - eligible to be a unique type
// - not determined to be ineligible by escape analysis
if (t == NULL)
continue; // not a TypeOopPtr
igvn->hash_delete(n);
n->raise_bottom_type(tinst);
igvn->hash_insert(n);
// First, put on the worklist all Field edges from Connection Graph
// which is more accurate then putting immediate users from Ideal Graph.
"only AddP nodes are Field edges in CG");
}
}
}
// An allocation may have an Initialize which has raw stores. Scan
// the users of the raw allocation result and push AddP users
// on alloc_worklist.
}
}
}
}
} else if (n->is_AddP()) {
#ifdef ASSERT
#endif
return;
}
} else if (n->is_Phi() ||
n->is_CheckCastPP() ||
n->is_EncodeP() ||
n->is_DecodeN() ||
continue; // already processed
}
#ifdef ASSERT
#endif
return;
} else {
if (tn_type->isa_narrowoop()) {
} else {
}
if (tn_type->isa_narrowoop()) {
} else {
}
} else {
"unexpected type");
continue; // Skip dead path with different type
}
}
} else {
debug_only(n->dump();)
assert(false, "EA: unexpected node");
continue;
}
// push allocation's users on appropriate worklist
}
use->is_CheckCastPP() ||
use->is_EncodeP() ||
use->is_DecodeN() ||
#ifdef ASSERT
} else if (use->is_MergeMem()) {
assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
} else if (use->is_SafePoint()) {
// Look for MergeMem nodes for calls which reference unique allocation
// (through CheckCastPP nodes) even for debug info.
if (m->is_MergeMem()) {
}
} else {
n->dump();
assert(false, "EA: missing allocation reference path");
}
#endif
}
}
}
// New alias types were created in split_AddP().
// Phase 2: Process MemNode's from memnode_worklist. compute new address type and
// compute new values for Memory inputs (the Memory inputs are not
// actually updated until phase 4.)
if (memnode_worklist.length() == 0)
return; // nothing to do
while (memnode_worklist.length() != 0) {
continue;
if (n->is_Phi() || n->is_ClearArray()) {
// we don't need to do anything, but the users must be pushed
} else if (n->is_MemBar()) { // Initialize, MemBar nodes
// we don't need to do anything, but the users must be pushed
if (n == NULL)
continue;
} else {
continue;
return;
}
// We delay the memory edge update since we need old one in
// MergeMem code below when instances memory slices are separated.
}
if (n->is_Load()) {
continue; // don't push users
} else if (n->is_LoadStore()) {
// get the memory projection
n = use;
break;
}
}
}
}
// push user on appropriate worklist
continue;
#ifdef ASSERT
} else if (use->is_MergeMem()) {
assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
} else {
if (!(op == Op_StoreCM ||
n->dump();
assert(false, "EA: missing memory path");
}
#endif
}
}
}
// Phase 3: Process MergeMem nodes from mergemem_worklist.
// Walk each memory slice moving the first node encountered of each
// instance type to the the input corresponding to its alias index.
// Note: we don't want to use MergeMemStream here because we only want to
// scan inputs which exist at the start, not ones we add during processing.
// Note 2: MergeMem may already contains instance memory slices added
// during find_inst_mem() call when memory nodes were processed above.
continue;
// First, update mergemem by moving memory nodes to corresponding slices
// if their type became more precise since this mergemem was created.
if (idx == i) {
} else {
}
}
}
}
// Find any instance of the current type if we haven't encountered
// already a memory slice of the instance along the memory chain.
if (nmm->is_empty_memory(m)) {
return;
}
}
}
}
}
// Find the rest of instances values
// Didn't find instance memory, search through general slice recursively.
return;
}
}
}
}
// Phase 4: Update the inputs of non-instance memory Phis and
// the Memory input of memnodes
// First update the inputs of any non-instance Phi's from
// which we split out an instance Phi. Note we don't have
// to recursively process Phi's encounted on the input memory
// chains as is done in split_memory_phi() since they will
// also be processed here.
return;
}
}
}
}
// Update the memory inputs of MemNodes with the value we computed
// in Phase 2 and move stores memory users to corresponding memory slices.
// Disable memory split verification code until the fix for 6984348.
// Currently it produces false negative results since it does not cover all cases.
#if 0 // ifdef ASSERT
#endif
if (n->is_Mem()) {
#if 0 // ifdef ASSERT
}
#endif
if (!n->is_Load()) {
// Move memory users of a store first.
move_inst_mem(n, orig_phis);
}
// Now update memory input
igvn->hash_delete(n);
igvn->hash_insert(n);
} else {
}
}
#if 0 // ifdef ASSERT
// Verify that memory was split correctly
while (old_mems.is_nonempty()) {
}
#endif
}
#ifndef PRODUCT
static const char *node_type_names[] = {
"UnknownType",
"JavaObject",
"LocalVar",
"Field",
"Arraycopy"
};
static const char *esc_names[] = {
"UnknownEscape",
"NoEscape",
"ArgEscape",
"GlobalEscape"
};
if (print_state) {
}
if (is_Field()) {
if (f->is_oop())
if (f->offset() > 0)
PointsToNode* b = i.get();
}
}
PointsToNode* e = i.get();
tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
}
PointsToNode* u = i.get();
bool is_base = false;
if (PointsToNode::is_base_use(u)) {
is_base = true;
}
}
else
}
bool first = true;
for (int i = 0; i < ptnodes_length; i++) {
continue;
if (first) {
first = false;
}
// Print all locals and fields which reference this allocation
if (use->is_LocalVar()) {
} else if (Verbose) {
}
}
}
}
}
#endif