escape.cpp revision 4040
0N/A/*
3677N/A * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#include "precompiled.hpp"
1879N/A#include "ci/bcEscapeAnalyzer.hpp"
1879N/A#include "compiler/compileLog.hpp"
1879N/A#include "libadt/vectset.hpp"
1879N/A#include "memory/allocation.hpp"
1879N/A#include "opto/c2compiler.hpp"
1879N/A#include "opto/callnode.hpp"
1879N/A#include "opto/cfgnode.hpp"
0N/A#include "opto/compile.hpp"
0N/A#include "opto/escape.hpp"
0N/A#include "opto/phaseX.hpp"
0N/A#include "opto/rootnode.hpp"
0N/A
0N/AConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
0N/A _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
0N/A _collecting(true),
0N/A _verify(false),
0N/A _compile(C),
0N/A _igvn(igvn),
0N/A _node_map(C->comp_arena()) {
0N/A // Add unknown java object.
0N/A add_java_object(C->top(), PointsToNode::GlobalEscape);
0N/A phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
0N/A // Add ConP(#NULL) and ConN(#NULL) nodes.
0N/A Node* oop_null = igvn->zerocon(T_OBJECT);
0N/A assert(oop_null->_idx < nodes_size(), "should be created already");
0N/A add_java_object(oop_null, PointsToNode::NoEscape);
0N/A null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
0N/A if (UseCompressedOops) {
0N/A Node* noop_null = igvn->zerocon(T_NARROWOOP);
0N/A assert(noop_null->_idx < nodes_size(), "should be created already");
0N/A map_ideal_node(noop_null, null_obj);
0N/A }
0N/A _pcmp_neq = NULL; // Should be initialized
0N/A _pcmp_eq = NULL;
0N/A}
0N/A
0N/Abool ConnectionGraph::has_candidates(Compile *C) {
0N/A // EA brings benefits only when the code has allocations and/or locks which
0N/A // are represented by ideal Macro nodes.
0N/A int cnt = C->macro_count();
0N/A for( int i=0; i < cnt; i++ ) {
0N/A Node *n = C->macro_node(i);
0N/A if ( n->is_Allocate() )
0N/A return true;
0N/A if( n->is_Lock() ) {
0N/A Node* obj = n->as_Lock()->obj_node()->uncast();
0N/A if( !(obj->is_Parm() || obj->is_Con()) )
0N/A return true;
0N/A }
0N/A }
0N/A return false;
0N/A}
0N/A
0N/Avoid ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
0N/A Compile::TracePhase t2("escapeAnalysis", &Phase::_t_escapeAnalysis, true);
0N/A ResourceMark rm;
0N/A
0N/A // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
0N/A // to create space for them in ConnectionGraph::_nodes[].
0N/A Node* oop_null = igvn->zerocon(T_OBJECT);
0N/A Node* noop_null = igvn->zerocon(T_NARROWOOP);
0N/A ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
0N/A // Perform escape analysis
0N/A if (congraph->compute_escape()) {
0N/A // There are non escaping objects.
0N/A C->set_congraph(congraph);
0N/A }
0N/A // Cleanup.
0N/A if (oop_null->outcnt() == 0)
0N/A igvn->hash_delete(oop_null);
0N/A if (noop_null->outcnt() == 0)
0N/A igvn->hash_delete(noop_null);
0N/A}
0N/A
0N/Abool ConnectionGraph::compute_escape() {
0N/A Compile* C = _compile;
0N/A PhaseGVN* igvn = _igvn;
0N/A
0N/A // Worklists used by EA.
0N/A Unique_Node_List delayed_worklist;
0N/A GrowableArray<Node*> alloc_worklist;
0N/A GrowableArray<Node*> ptr_cmp_worklist;
0N/A GrowableArray<Node*> storestore_worklist;
0N/A GrowableArray<PointsToNode*> ptnodes_worklist;
0N/A GrowableArray<JavaObjectNode*> java_objects_worklist;
0N/A GrowableArray<JavaObjectNode*> non_escaped_worklist;
0N/A GrowableArray<FieldNode*> oop_fields_worklist;
0N/A DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
0N/A
0N/A { Compile::TracePhase t3("connectionGraph", &Phase::_t_connectionGraph, true);
0N/A
0N/A // 1. Populate Connection Graph (CG) with PointsTo nodes.
0N/A ideal_nodes.map(C->unique(), NULL); // preallocate space
0N/A // Initialize worklist
0N/A if (C->root() != NULL) {
0N/A ideal_nodes.push(C->root());
0N/A }
0N/A for( uint next = 0; next < ideal_nodes.size(); ++next ) {
0N/A Node* n = ideal_nodes.at(next);
0N/A // Create PointsTo nodes and add them to Connection Graph. Called
0N/A // only once per ideal node since ideal_nodes is Unique_Node list.
0N/A add_node_to_connection_graph(n, &delayed_worklist);
0N/A PointsToNode* ptn = ptnode_adr(n->_idx);
0N/A if (ptn != NULL) {
0N/A ptnodes_worklist.append(ptn);
0N/A if (ptn->is_JavaObject()) {
0N/A java_objects_worklist.append(ptn->as_JavaObject());
0N/A if ((n->is_Allocate() || n->is_CallStaticJava()) &&
0N/A (ptn->escape_state() < PointsToNode::GlobalEscape)) {
0N/A // Only allocations and java static calls results are interesting.
0N/A non_escaped_worklist.append(ptn->as_JavaObject());
0N/A }
0N/A } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
0N/A oop_fields_worklist.append(ptn->as_Field());
0N/A }
0N/A }
0N/A if (n->is_MergeMem()) {
0N/A // Collect all MergeMem nodes to add memory slices for
0N/A // scalar replaceable objects in split_unique_types().
0N/A _mergemem_worklist.append(n->as_MergeMem());
0N/A } else if (OptimizePtrCompare && n->is_Cmp() &&
0N/A (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
0N/A // Collect compare pointers nodes.
0N/A ptr_cmp_worklist.append(n);
0N/A } else if (n->is_MemBarStoreStore()) {
0N/A // Collect all MemBarStoreStore nodes so that depending on the
0N/A // escape status of the associated Allocate node some of them
0N/A // may be eliminated.
0N/A storestore_worklist.append(n);
0N/A#ifdef ASSERT
342N/A } else if(n->is_AddP()) {
0N/A // Collect address nodes for graph verification.
0N/A addp_worklist.append(n);
0N/A#endif
0N/A }
0N/A for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
0N/A Node* m = n->fast_out(i); // Get user
0N/A ideal_nodes.push(m);
0N/A }
0N/A }
342N/A if (non_escaped_worklist.length() == 0) {
0N/A _collecting = false;
0N/A return false; // Nothing to do.
0N/A }
0N/A // Add final simple edges to graph.
0N/A while(delayed_worklist.size() > 0) {
0N/A Node* n = delayed_worklist.pop();
0N/A add_final_edges(n);
0N/A }
0N/A int ptnodes_length = ptnodes_worklist.length();
0N/A
0N/A#ifdef ASSERT
0N/A if (VerifyConnectionGraph) {
0N/A // Verify that no new simple edges could be created and all
0N/A // local vars has edges.
0N/A _verify = true;
0N/A for (int next = 0; next < ptnodes_length; ++next) {
0N/A PointsToNode* ptn = ptnodes_worklist.at(next);
0N/A add_final_edges(ptn->ideal_node());
0N/A if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
0N/A ptn->dump();
0N/A assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
0N/A }
0N/A }
0N/A _verify = false;
0N/A }
0N/A#endif
0N/A
0N/A // 2. Finish Graph construction by propagating references to all
0N/A // java objects through graph.
0N/A if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist,
0N/A java_objects_worklist, oop_fields_worklist)) {
0N/A // All objects escaped or hit time or iterations limits.
0N/A _collecting = false;
0N/A return false;
0N/A }
0N/A
0N/A // 3. Adjust scalar_replaceable state of nonescaping objects and push
0N/A // scalar replaceable allocations on alloc_worklist for processing
0N/A // in split_unique_types().
0N/A int non_escaped_length = non_escaped_worklist.length();
1703N/A for (int next = 0; next < non_escaped_length; next++) {
1703N/A JavaObjectNode* ptn = non_escaped_worklist.at(next);
1703N/A if (ptn->escape_state() == PointsToNode::NoEscape &&
0N/A ptn->scalar_replaceable()) {
0N/A adjust_scalar_replaceable_state(ptn);
0N/A if (ptn->scalar_replaceable()) {
0N/A alloc_worklist.append(ptn->ideal_node());
0N/A }
0N/A }
0N/A }
0N/A
0N/A#ifdef ASSERT
0N/A if (VerifyConnectionGraph) {
0N/A // Verify that graph is complete - no new edges could be added or needed.
0N/A verify_connection_graph(ptnodes_worklist, non_escaped_worklist,
0N/A java_objects_worklist, addp_worklist);
0N/A }
0N/A assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
0N/A assert(null_obj->escape_state() == PointsToNode::NoEscape &&
0N/A null_obj->edge_count() == 0 &&
0N/A !null_obj->arraycopy_src() &&
0N/A !null_obj->arraycopy_dst(), "sanity");
0N/A#endif
0N/A
0N/A _collecting = false;
0N/A
0N/A } // TracePhase t3("connectionGraph")
0N/A
0N/A // 4. Optimize ideal graph based on EA information.
0N/A bool has_non_escaping_obj = (non_escaped_worklist.length() > 0);
0N/A if (has_non_escaping_obj) {
0N/A optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
0N/A }
0N/A
3802N/A#ifndef PRODUCT
0N/A if (PrintEscapeAnalysis) {
0N/A dump(ptnodes_worklist); // Dump ConnectionGraph
0N/A }
0N/A#endif
0N/A
0N/A bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
0N/A#ifdef ASSERT
2346N/A if (VerifyConnectionGraph) {
0N/A int alloc_length = alloc_worklist.length();
0N/A for (int next = 0; next < alloc_length; ++next) {
0N/A Node* n = alloc_worklist.at(next);
0N/A PointsToNode* ptn = ptnode_adr(n->_idx);
0N/A assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
0N/A }
0N/A }
0N/A#endif
0N/A
0N/A // 5. Separate memory graph for scalar replaceable allcations.
0N/A if (has_scalar_replaceable_candidates &&
0N/A C->AliasLevel() >= 3 && EliminateAllocations) {
0N/A // Now use the escape information to create unique types for
0N/A // scalar replaceable objects.
2346N/A split_unique_types(alloc_worklist);
0N/A if (C->failing()) return false;
0N/A C->print_method("After Escape Analysis", 2);
0N/A
342N/A#ifdef ASSERT
342N/A } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
2346N/A tty->print("=== No allocations eliminated for ");
2346N/A C->method()->print_short_name();
0N/A if(!EliminateAllocations) {
0N/A tty->print(" since EliminateAllocations is off ===");
0N/A } else if(!has_scalar_replaceable_candidates) {
342N/A tty->print(" since there are no scalar replaceable candidates ===");
0N/A } else if(C->AliasLevel() < 3) {
0N/A tty->print(" since AliasLevel < 3 ===");
0N/A }
0N/A tty->cr();
0N/A#endif
0N/A }
0N/A return has_non_escaping_obj;
0N/A}
0N/A
0N/A// Utility function for nodes that load an object
0N/Avoid ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
0N/A // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
0N/A // ThreadLocal has RawPtr type.
0N/A const Type* t = _igvn->type(n);
0N/A if (t->make_ptr() != NULL) {
0N/A Node* adr = n->in(MemNode::Address);
0N/A#ifdef ASSERT
0N/A if (!adr->is_AddP()) {
0N/A assert(_igvn->type(adr)->isa_rawptr(), "sanity");
0N/A } else {
0N/A assert((ptnode_adr(adr->_idx) == NULL ||
0N/A ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
0N/A }
1703N/A#endif
0N/A add_local_var_and_edge(n, PointsToNode::NoEscape,
0N/A adr, delayed_worklist);
0N/A }
0N/A}
0N/A
0N/A// Populate Connection Graph with PointsTo nodes and create simple
0N/A// connection graph edges.
0N/Avoid ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
0N/A assert(!_verify, "this method sould not be called for verification");
0N/A PhaseGVN* igvn = _igvn;
0N/A uint n_idx = n->_idx;
0N/A PointsToNode* n_ptn = ptnode_adr(n_idx);
0N/A if (n_ptn != NULL)
0N/A return; // No need to redefine PointsTo node during first iteration.
0N/A
0N/A if (n->is_Call()) {
0N/A // Arguments to allocation and locking don't escape.
0N/A if (n->is_AbstractLock()) {
0N/A // Put Lock and Unlock nodes on IGVN worklist to process them during
0N/A // first IGVN optimization when escape information is still available.
0N/A record_for_optimizer(n);
0N/A } else if (n->is_Allocate()) {
0N/A add_call_node(n->as_Call());
0N/A record_for_optimizer(n);
0N/A } else {
1601N/A if (n->is_CallStaticJava()) {
0N/A const char* name = n->as_CallStaticJava()->_name;
0N/A if (name != NULL && strcmp(name, "uncommon_trap") == 0)
0N/A return; // Skip uncommon traps
0N/A }
0N/A // Don't mark as processed since call's arguments have to be processed.
0N/A delayed_worklist->push(n);
0N/A // Check if a call returns an object.
0N/A if (n->as_Call()->returns_pointer() &&
0N/A n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
0N/A add_call_node(n->as_Call());
0N/A }
0N/A }
0N/A return;
0N/A }
0N/A // Put this check here to process call arguments since some call nodes
0N/A // point to phantom_obj.
0N/A if (n_ptn == phantom_obj || n_ptn == null_obj)
0N/A return; // Skip predefined nodes.
0N/A
0N/A int opcode = n->Opcode();
0N/A switch (opcode) {
0N/A case Op_AddP: {
0N/A Node* base = get_addp_base(n);
1601N/A PointsToNode* ptn_base = ptnode_adr(base->_idx);
1601N/A // Field nodes are created for all field types. They are used in
1601N/A // adjust_scalar_replaceable_state() and split_unique_types().
0N/A // Note, non-oop fields will have only base edges in Connection
0N/A // Graph because such fields are not used for oop loads and stores.
0N/A int offset = address_offset(n, igvn);
0N/A add_field(n, PointsToNode::NoEscape, offset);
0N/A if (ptn_base == NULL) {
0N/A delayed_worklist->push(n); // Process it later.
1703N/A } else {
1703N/A n_ptn = ptnode_adr(n_idx);
1703N/A add_base(n_ptn->as_Field(), ptn_base);
1703N/A }
1703N/A break;
1703N/A }
1703N/A case Op_CastX2P: {
1703N/A map_ideal_node(n, phantom_obj);
1703N/A break;
1703N/A }
1703N/A case Op_CastPP:
1703N/A case Op_CheckCastPP:
1703N/A case Op_EncodeP:
1703N/A case Op_DecodeN: {
1703N/A add_local_var_and_edge(n, PointsToNode::NoEscape,
0N/A n->in(1), delayed_worklist);
0N/A break;
0N/A }
0N/A case Op_CMoveP: {
0N/A add_local_var(n, PointsToNode::NoEscape);
0N/A // Do not add edges during first iteration because some could be
0N/A // not defined yet.
0N/A delayed_worklist->push(n);
0N/A break;
0N/A }
0N/A case Op_ConP:
0N/A case Op_ConN: {
0N/A // assume all oop constants globally escape except for null
0N/A PointsToNode::EscapeState es;
0N/A if (igvn->type(n) == TypePtr::NULL_PTR ||
0N/A igvn->type(n) == TypeNarrowOop::NULL_PTR) {
0N/A es = PointsToNode::NoEscape;
0N/A } else {
0N/A es = PointsToNode::GlobalEscape;
0N/A }
0N/A add_java_object(n, es);
0N/A break;
0N/A }
0N/A case Op_CreateEx: {
0N/A // assume that all exception objects globally escape
0N/A add_java_object(n, PointsToNode::GlobalEscape);
0N/A break;
0N/A }
0N/A case Op_LoadKlass:
0N/A case Op_LoadNKlass: {
0N/A // Unknown class is loaded
0N/A map_ideal_node(n, phantom_obj);
0N/A break;
0N/A }
0N/A case Op_LoadP:
0N/A case Op_LoadN:
0N/A case Op_LoadPLocked: {
0N/A add_objload_to_connection_graph(n, delayed_worklist);
0N/A break;
0N/A }
0N/A case Op_Parm: {
0N/A map_ideal_node(n, phantom_obj);
0N/A break;
0N/A }
0N/A case Op_PartialSubtypeCheck: {
0N/A // Produces Null or notNull and is used in only in CmpP so
1601N/A // phantom_obj could be used.
1601N/A map_ideal_node(n, phantom_obj); // Result is unknown
1601N/A break;
1601N/A }
0N/A case Op_Phi: {
0N/A // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
0N/A // ThreadLocal has RawPtr type.
0N/A const Type* t = n->as_Phi()->type();
0N/A if (t->make_ptr() != NULL) {
0N/A add_local_var(n, PointsToNode::NoEscape);
3677N/A // Do not add edges during first iteration because some could be
3677N/A // not defined yet.
3677N/A delayed_worklist->push(n);
3677N/A }
3677N/A break;
3677N/A }
0N/A case Op_Proj: {
0N/A // we are only interested in the oop result projection from a call
0N/A if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
0N/A n->in(0)->as_Call()->returns_pointer()) {
0N/A add_local_var_and_edge(n, PointsToNode::NoEscape,
0N/A n->in(0), delayed_worklist);
0N/A }
0N/A break;
0N/A }
0N/A case Op_Rethrow: // Exception object escapes
0N/A case Op_Return: {
0N/A if (n->req() > TypeFunc::Parms &&
0N/A igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
0N/A // Treat Return value as LocalVar with GlobalEscape escape state.
0N/A add_local_var_and_edge(n, PointsToNode::GlobalEscape,
0N/A n->in(TypeFunc::Parms), delayed_worklist);
0N/A }
0N/A break;
0N/A }
0N/A case Op_GetAndSetP:
0N/A case Op_GetAndSetN: {
0N/A add_objload_to_connection_graph(n, delayed_worklist);
0N/A // fallthrough
0N/A }
0N/A case Op_StoreP:
0N/A case Op_StoreN:
0N/A case Op_StorePConditional:
0N/A case Op_CompareAndSwapP:
0N/A case Op_CompareAndSwapN: {
0N/A Node* adr = n->in(MemNode::Address);
0N/A const Type *adr_type = igvn->type(adr);
0N/A adr_type = adr_type->make_ptr();
0N/A if (adr_type->isa_oopptr() ||
0N/A (opcode == Op_StoreP || opcode == Op_StoreN) &&
0N/A (adr_type == TypeRawPtr::NOTNULL &&
0N/A adr->in(AddPNode::Address)->is_Proj() &&
0N/A adr->in(AddPNode::Address)->in(0)->is_Allocate())) {
0N/A delayed_worklist->push(n); // Process it later.
0N/A#ifdef ASSERT
0N/A assert(adr->is_AddP(), "expecting an AddP");
0N/A if (adr_type == TypeRawPtr::NOTNULL) {
0N/A // Verify a raw address for a store captured by Initialize node.
0N/A int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
0N/A assert(offs != Type::OffsetBot, "offset must be a constant");
0N/A }
1601N/A#endif
0N/A } else {
0N/A // Ignore copy the displaced header to the BoxNode (OSR compilation).
0N/A if (adr->is_BoxLock())
0N/A break;
0N/A // Stored value escapes in unsafe access.
0N/A if ((opcode == Op_StoreP) && (adr_type == TypeRawPtr::BOTTOM)) {
0N/A // Pointer stores in G1 barriers looks like unsafe access.
0N/A // Ignore such stores to be able scalar replace non-escaping
0N/A // allocations.
0N/A if (UseG1GC && adr->is_AddP()) {
0N/A Node* base = get_addp_base(adr);
0N/A if (base->Opcode() == Op_LoadP &&
0N/A base->in(MemNode::Address)->is_AddP()) {
0N/A adr = base->in(MemNode::Address);
0N/A Node* tls = get_addp_base(adr);
0N/A if (tls->Opcode() == Op_ThreadLocal) {
0N/A int offs = (int)igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
0N/A if (offs == in_bytes(JavaThread::satb_mark_queue_offset() +
0N/A PtrQueue::byte_offset_of_buf())) {
0N/A break; // G1 pre barier previous oop value store.
0N/A }
0N/A if (offs == in_bytes(JavaThread::dirty_card_queue_offset() +
3932N/A PtrQueue::byte_offset_of_buf())) {
0N/A break; // G1 post barier card address store.
0N/A }
0N/A }
0N/A }
0N/A }
0N/A delayed_worklist->push(n); // Process unsafe access later.
0N/A break;
0N/A }
0N/A#ifdef ASSERT
0N/A n->dump(1);
0N/A assert(false, "not unsafe or G1 barrier raw StoreP");
0N/A#endif
0N/A }
0N/A break;
0N/A }
0N/A case Op_AryEq:
0N/A case Op_StrComp:
0N/A case Op_StrEquals:
0N/A case Op_StrIndexOf: {
0N/A add_local_var(n, PointsToNode::ArgEscape);
0N/A delayed_worklist->push(n); // Process it later.
0N/A break;
0N/A }
Error!

 

There was an error!

null

java.lang.NullPointerException