0N/A/*
3845N/A * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#include "precompiled.hpp"
1879N/A#include "classfile/systemDictionary.hpp"
1879N/A#include "compiler/compileLog.hpp"
1879N/A#include "memory/allocation.inline.hpp"
1879N/A#include "oops/objArrayKlass.hpp"
1879N/A#include "opto/addnode.hpp"
1879N/A#include "opto/cfgnode.hpp"
1879N/A#include "opto/compile.hpp"
1879N/A#include "opto/connode.hpp"
1879N/A#include "opto/loopnode.hpp"
1879N/A#include "opto/machnode.hpp"
1879N/A#include "opto/matcher.hpp"
1879N/A#include "opto/memnode.hpp"
1879N/A#include "opto/mulnode.hpp"
1879N/A#include "opto/phaseX.hpp"
1879N/A#include "opto/regmask.hpp"
1879N/A
0N/A// Portions of code courtesy of Clifford Click
0N/A
0N/A// Optimization - Graph Style
0N/A
74N/Astatic Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st);
74N/A
0N/A//=============================================================================
0N/Auint MemNode::size_of() const { return sizeof(*this); }
0N/A
0N/Aconst TypePtr *MemNode::adr_type() const {
0N/A Node* adr = in(Address);
0N/A const TypePtr* cross_check = NULL;
0N/A DEBUG_ONLY(cross_check = _adr_type);
0N/A return calculate_adr_type(adr->bottom_type(), cross_check);
0N/A}
0N/A
0N/A#ifndef PRODUCT
0N/Avoid MemNode::dump_spec(outputStream *st) const {
0N/A if (in(Address) == NULL) return; // node is dead
0N/A#ifndef ASSERT
0N/A // fake the missing field
0N/A const TypePtr* _adr_type = NULL;
0N/A if (in(Address) != NULL)
0N/A _adr_type = in(Address)->bottom_type()->isa_ptr();
0N/A#endif
0N/A dump_adr_type(this, _adr_type, st);
0N/A
0N/A Compile* C = Compile::current();
0N/A if( C->alias_type(_adr_type)->is_volatile() )
0N/A st->print(" Volatile!");
0N/A}
0N/A
0N/Avoid MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
0N/A st->print(" @");
0N/A if (adr_type == NULL) {
0N/A st->print("NULL");
0N/A } else {
0N/A adr_type->dump_on(st);
0N/A Compile* C = Compile::current();
0N/A Compile::AliasType* atp = NULL;
0N/A if (C->have_alias_type(adr_type)) atp = C->alias_type(adr_type);
0N/A if (atp == NULL)
0N/A st->print(", idx=?\?;");
0N/A else if (atp->index() == Compile::AliasIdxBot)
0N/A st->print(", idx=Bot;");
0N/A else if (atp->index() == Compile::AliasIdxTop)
0N/A st->print(", idx=Top;");
0N/A else if (atp->index() == Compile::AliasIdxRaw)
0N/A st->print(", idx=Raw;");
0N/A else {
0N/A ciField* field = atp->field();
0N/A if (field) {
0N/A st->print(", name=");
0N/A field->print_name_on(st);
0N/A }
0N/A st->print(", idx=%d;", atp->index());
0N/A }
0N/A }
0N/A}
0N/A
0N/Aextern void print_alias_types();
0N/A
0N/A#endif
0N/A
74N/ANode *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) {
74N/A const TypeOopPtr *tinst = t_adr->isa_oopptr();
223N/A if (tinst == NULL || !tinst->is_known_instance_field())
74N/A return mchain; // don't try to optimize non-instance types
74N/A uint instance_id = tinst->instance_id();
253N/A Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory);
74N/A Node *prev = NULL;
74N/A Node *result = mchain;
74N/A while (prev != result) {
74N/A prev = result;
253N/A if (result == start_mem)
605N/A break; // hit one of our sentinels
74N/A // skip over a call which does not affect this memory slice
74N/A if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
74N/A Node *proj_in = result->in(0);
253N/A if (proj_in->is_Allocate() && proj_in->_idx == instance_id) {
605N/A break; // hit one of our sentinels
253N/A } else if (proj_in->is_Call()) {
74N/A CallNode *call = proj_in->as_Call();
74N/A if (!call->may_modify(t_adr, phase)) {
74N/A result = call->in(TypeFunc::Memory);
74N/A }
74N/A } else if (proj_in->is_Initialize()) {
74N/A AllocateNode* alloc = proj_in->as_Initialize()->allocation();
74N/A // Stop if this is the initialization for the object instance which
74N/A // which contains this memory slice, otherwise skip over it.
74N/A if (alloc != NULL && alloc->_idx != instance_id) {
74N/A result = proj_in->in(TypeFunc::Memory);
74N/A }
74N/A } else if (proj_in->is_MemBar()) {
74N/A result = proj_in->in(TypeFunc::Memory);
253N/A } else {
253N/A assert(false, "unexpected projection");
74N/A }
1100N/A } else if (result->is_ClearArray()) {
1100N/A if (!ClearArrayNode::step_through(&result, instance_id, phase)) {
1100N/A // Can not bypass initialization of the instance
1100N/A // we are looking for.
1100N/A break;
1100N/A }
1100N/A // Otherwise skip it (the call updated 'result' value).
74N/A } else if (result->is_MergeMem()) {
74N/A result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
74N/A }
74N/A }
74N/A return result;
74N/A}
74N/A
74N/ANode *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase) {
74N/A const TypeOopPtr *t_oop = t_adr->isa_oopptr();
223N/A bool is_instance = (t_oop != NULL) && t_oop->is_known_instance_field();
74N/A PhaseIterGVN *igvn = phase->is_IterGVN();
74N/A Node *result = mchain;
74N/A result = optimize_simple_memory_chain(result, t_adr, phase);
74N/A if (is_instance && igvn != NULL && result->is_Phi()) {
74N/A PhiNode *mphi = result->as_Phi();
74N/A assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
74N/A const TypePtr *t = mphi->adr_type();
163N/A if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM ||
223N/A t->isa_oopptr() && !t->is_oopptr()->is_known_instance() &&
247N/A t->is_oopptr()->cast_to_exactness(true)
247N/A ->is_oopptr()->cast_to_ptr_type(t_oop->ptr())
247N/A ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) {
74N/A // clone the Phi with our address type
74N/A result = mphi->split_out_instance(t_adr, igvn);
74N/A } else {
74N/A assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
74N/A }
74N/A }
74N/A return result;
74N/A}
74N/A
64N/Astatic Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
64N/A uint alias_idx = phase->C->get_alias_index(tp);
64N/A Node *mem = mmem;
64N/A#ifdef ASSERT
64N/A {
64N/A // Check that current type is consistent with the alias index used during graph construction
64N/A assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
64N/A bool consistent = adr_check == NULL || adr_check->empty() ||
64N/A phase->C->must_alias(adr_check, alias_idx );
64N/A // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
64N/A if( !consistent && adr_check != NULL && !adr_check->empty() &&
169N/A tp->isa_aryptr() && tp->offset() == Type::OffsetBot &&
64N/A adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
64N/A ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
64N/A adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
64N/A adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
64N/A // don't assert if it is dead code.
64N/A consistent = true;
64N/A }
64N/A if( !consistent ) {
64N/A st->print("alias_idx==%d, adr_check==", alias_idx);
64N/A if( adr_check == NULL ) {
64N/A st->print("NULL");
64N/A } else {
64N/A adr_check->dump();
64N/A }
64N/A st->cr();
64N/A print_alias_types();
64N/A assert(consistent, "adr_check must match alias idx");
64N/A }
64N/A }
64N/A#endif
1735N/A // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
64N/A // means an array I have not precisely typed yet. Do not do any
64N/A // alias stuff with it any time soon.
1735N/A const TypeOopPtr *toop = tp->isa_oopptr();
64N/A if( tp->base() != Type::AnyPtr &&
1735N/A !(toop &&
1735N/A toop->klass() != NULL &&
1735N/A toop->klass()->is_java_lang_Object() &&
1735N/A toop->offset() == Type::OffsetBot) ) {
64N/A // compress paths and change unreachable cycles to TOP
64N/A // If not, we can update the input infinitely along a MergeMem cycle
64N/A // Equivalent code in PhiNode::Ideal
64N/A Node* m = phase->transform(mmem);
605N/A // If transformed to a MergeMem, get the desired slice
64N/A // Otherwise the returned node represents memory for every slice
64N/A mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m;
64N/A // Update input if it is progress over what we have now
64N/A }
64N/A return mem;
64N/A}
64N/A
0N/A//--------------------------Ideal_common---------------------------------------
0N/A// Look for degenerate control and memory inputs. Bypass MergeMem inputs.
0N/A// Unhook non-raw memories from complete (macro-expanded) initializations.
0N/ANode *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
0N/A // If our control input is a dead region, kill all below the region
0N/A Node *ctl = in(MemNode::Control);
0N/A if (ctl && remove_dead_region(phase, can_reshape))
0N/A return this;
305N/A ctl = in(MemNode::Control);
305N/A // Don't bother trying to transform a dead node
4330N/A if (ctl && ctl->is_top()) return NodeSentinel;
0N/A
708N/A PhaseIterGVN *igvn = phase->is_IterGVN();
708N/A // Wait if control on the worklist.
708N/A if (ctl && can_reshape && igvn != NULL) {
708N/A Node* bol = NULL;
708N/A Node* cmp = NULL;
708N/A if (ctl->in(0)->is_If()) {
708N/A assert(ctl->is_IfTrue() || ctl->is_IfFalse(), "sanity");
708N/A bol = ctl->in(0)->in(1);
708N/A if (bol->is_Bool())
708N/A cmp = ctl->in(0)->in(1)->in(1);
708N/A }
708N/A if (igvn->_worklist.member(ctl) ||
708N/A (bol != NULL && igvn->_worklist.member(bol)) ||
708N/A (cmp != NULL && igvn->_worklist.member(cmp)) ) {
708N/A // This control path may be dead.
708N/A // Delay this memory node transformation until the control is processed.
708N/A phase->is_IterGVN()->_worklist.push(this);
708N/A return NodeSentinel; // caller will return NULL
708N/A }
708N/A }
0N/A // Ignore if memory is dead, or self-loop
0N/A Node *mem = in(MemNode::Memory);
4330N/A if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
4330N/A assert(mem != this, "dead loop in MemNode::Ideal");
0N/A
2967N/A if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
2967N/A // This memory slice may be dead.
2967N/A // Delay this mem node transformation until the memory is processed.
2967N/A phase->is_IterGVN()->_worklist.push(this);
2967N/A return NodeSentinel; // caller will return NULL
2967N/A }
2967N/A
0N/A Node *address = in(MemNode::Address);
4330N/A const Type *t_adr = phase->type(address);
4330N/A if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL
4330N/A
4330N/A if (can_reshape && igvn != NULL &&
1746N/A (igvn->_worklist.member(address) ||
4330N/A igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) {
420N/A // The address's base and type may change when the address is processed.
420N/A // Delay this mem node transformation until the address is processed.
420N/A phase->is_IterGVN()->_worklist.push(this);
420N/A return NodeSentinel; // caller will return NULL
420N/A }
420N/A
1062N/A // Do NOT remove or optimize the next lines: ensure a new alias index
1062N/A // is allocated for an oop pointer type before Escape Analysis.
1062N/A // Note: C++ will not remove it since the call has side effect.
4330N/A if (t_adr->isa_oopptr()) {
1062N/A int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
1062N/A }
1062N/A
708N/A#ifdef ASSERT
708N/A Node* base = NULL;
708N/A if (address->is_AddP())
708N/A base = address->in(AddPNode::Base);
4330N/A if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
4330N/A !t_adr->isa_rawptr()) {
4330N/A // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
4330N/A Compile* C = phase->C;
4330N/A tty->cr();
4330N/A tty->print_cr("===== NULL+offs not RAW address =====");
4330N/A if (C->is_dead_node(this->_idx)) tty->print_cr("'this' is dead");
4330N/A if ((ctl != NULL) && C->is_dead_node(ctl->_idx)) tty->print_cr("'ctl' is dead");
4330N/A if (C->is_dead_node(mem->_idx)) tty->print_cr("'mem' is dead");
4330N/A if (C->is_dead_node(address->_idx)) tty->print_cr("'address' is dead");
4330N/A if (C->is_dead_node(base->_idx)) tty->print_cr("'base' is dead");
4330N/A tty->cr();
4330N/A base->dump(1);
4330N/A tty->cr();
4330N/A this->dump(2);
4330N/A tty->print("this->adr_type(): "); adr_type()->dump(); tty->cr();
4330N/A tty->print("phase->type(address): "); t_adr->dump(); tty->cr();
4330N/A tty->print("phase->type(base): "); phase->type(address)->dump(); tty->cr();
4330N/A tty->cr();
4330N/A }
708N/A assert(base == NULL || t_adr->isa_rawptr() ||
708N/A !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?");
708N/A#endif
708N/A
0N/A // Avoid independent memory operations
0N/A Node* old_mem = mem;
0N/A
36N/A // The code which unhooks non-raw memories from complete (macro-expanded)
36N/A // initializations was removed. After macro-expansion all stores catched
36N/A // by Initialize node became raw stores and there is no information
36N/A // which memory slices they modify. So it is unsafe to move any memory
36N/A // operation above these stores. Also in most cases hooked non-raw memories
36N/A // were already unhooked by using information from detect_ptr_independence()
36N/A // and find_previous_store().
0N/A
0N/A if (mem->is_MergeMem()) {
0N/A MergeMemNode* mmem = mem->as_MergeMem();
0N/A const TypePtr *tp = t_adr->is_ptr();
64N/A
64N/A mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty);
0N/A }
0N/A
0N/A if (mem != old_mem) {
0N/A set_req(MemNode::Memory, mem);
4326N/A if (can_reshape && old_mem->outcnt() == 0) {
4326N/A igvn->_worklist.push(old_mem);
4326N/A }
305N/A if (phase->type( mem ) == Type::TOP) return NodeSentinel;
0N/A return this;
0N/A }
0N/A
0N/A // let the subclass continue analyzing...
0N/A return NULL;
0N/A}
0N/A
0N/A// Helper function for proving some simple control dominations.
119N/A// Attempt to prove that all control inputs of 'dom' dominate 'sub'.
0N/A// Already assumes that 'dom' is available at 'sub', and that 'sub'
0N/A// is not a constant (dominated by the method's StartNode).
0N/A// Used by MemNode::find_previous_store to prove that the
0N/A// control input of a memory operation predates (dominates)
0N/A// an allocation it wants to look past.
119N/Abool MemNode::all_controls_dominate(Node* dom, Node* sub) {
119N/A if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top())
119N/A return false; // Conservative answer for dead code
119N/A
193N/A // Check 'dom'. Skip Proj and CatchProj nodes.
119N/A dom = dom->find_exact_control(dom);
119N/A if (dom == NULL || dom->is_top())
119N/A return false; // Conservative answer for dead code
119N/A
193N/A if (dom == sub) {
193N/A // For the case when, for example, 'sub' is Initialize and the original
193N/A // 'dom' is Proj node of the 'sub'.
193N/A return false;
193N/A }
193N/A
155N/A if (dom->is_Con() || dom->is_Start() || dom->is_Root() || dom == sub)
119N/A return true;
119N/A
119N/A // 'dom' dominates 'sub' if its control edge and control edges
119N/A // of all its inputs dominate or equal to sub's control edge.
119N/A
119N/A // Currently 'sub' is either Allocate, Initialize or Start nodes.
163N/A // Or Region for the check in LoadNode::Ideal();
163N/A // 'sub' should have sub->in(0) != NULL.
163N/A assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() ||
163N/A sub->is_Region(), "expecting only these nodes");
119N/A
119N/A // Get control edge of 'sub'.
193N/A Node* orig_sub = sub;
119N/A sub = sub->find_exact_control(sub->in(0));
119N/A if (sub == NULL || sub->is_top())
119N/A return false; // Conservative answer for dead code
119N/A
119N/A assert(sub->is_CFG(), "expecting control");
119N/A
119N/A if (sub == dom)
119N/A return true;
119N/A
119N/A if (sub->is_Start() || sub->is_Root())
119N/A return false;
119N/A
119N/A {
119N/A // Check all control edges of 'dom'.
119N/A
119N/A ResourceMark rm;
119N/A Arena* arena = Thread::current()->resource_area();
119N/A Node_List nlist(arena);
119N/A Unique_Node_List dom_list(arena);
119N/A
119N/A dom_list.push(dom);
119N/A bool only_dominating_controls = false;
119N/A
119N/A for (uint next = 0; next < dom_list.size(); next++) {
119N/A Node* n = dom_list.at(next);
193N/A if (n == orig_sub)
193N/A return false; // One of dom's inputs dominated by sub.
119N/A if (!n->is_CFG() && n->pinned()) {
119N/A // Check only own control edge for pinned non-control nodes.
119N/A n = n->find_exact_control(n->in(0));
119N/A if (n == NULL || n->is_top())
119N/A return false; // Conservative answer for dead code
119N/A assert(n->is_CFG(), "expecting control");
193N/A dom_list.push(n);
193N/A } else if (n->is_Con() || n->is_Start() || n->is_Root()) {
119N/A only_dominating_controls = true;
119N/A } else if (n->is_CFG()) {
119N/A if (n->dominates(sub, nlist))
119N/A only_dominating_controls = true;
119N/A else
119N/A return false;
119N/A } else {
119N/A // First, own control edge.
119N/A Node* m = n->find_exact_control(n->in(0));
155N/A if (m != NULL) {
155N/A if (m->is_top())
155N/A return false; // Conservative answer for dead code
155N/A dom_list.push(m);
155N/A }
119N/A // Now, the rest of edges.
119N/A uint cnt = n->req();
119N/A for (uint i = 1; i < cnt; i++) {
119N/A m = n->find_exact_control(n->in(i));
119N/A if (m == NULL || m->is_top())
119N/A continue;
119N/A dom_list.push(m);
0N/A }
0N/A }
0N/A }
119N/A return only_dominating_controls;
0N/A }
0N/A}
0N/A
0N/A//---------------------detect_ptr_independence---------------------------------
0N/A// Used by MemNode::find_previous_store to prove that two base
0N/A// pointers are never equal.
0N/A// The pointers are accompanied by their associated allocations,
0N/A// if any, which have been previously discovered by the caller.
0N/Abool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1,
0N/A Node* p2, AllocateNode* a2,
0N/A PhaseTransform* phase) {
0N/A // Attempt to prove that these two pointers cannot be aliased.
0N/A // They may both manifestly be allocations, and they should differ.
0N/A // Or, if they are not both allocations, they can be distinct constants.
0N/A // Otherwise, one is an allocation and the other a pre-existing value.
0N/A if (a1 == NULL && a2 == NULL) { // neither an allocation
0N/A return (p1 != p2) && p1->is_Con() && p2->is_Con();
0N/A } else if (a1 != NULL && a2 != NULL) { // both allocations
0N/A return (a1 != a2);
0N/A } else if (a1 != NULL) { // one allocation a1
0N/A // (Note: p2->is_Con implies p2->in(0)->is_Root, which dominates.)
119N/A return all_controls_dominate(p2, a1);
0N/A } else { //(a2 != NULL) // one allocation a2
119N/A return all_controls_dominate(p1, a2);
0N/A }
0N/A return false;
0N/A}
0N/A
0N/A
0N/A// The logic for reordering loads and stores uses four steps:
0N/A// (a) Walk carefully past stores and initializations which we
0N/A// can prove are independent of this load.
0N/A// (b) Observe that the next memory state makes an exact match
0N/A// with self (load or store), and locate the relevant store.
0N/A// (c) Ensure that, if we were to wire self directly to the store,
0N/A// the optimizer would fold it up somehow.
0N/A// (d) Do the rewiring, and return, depending on some other part of
0N/A// the optimizer to fold up the load.
0N/A// This routine handles steps (a) and (b). Steps (c) and (d) are
0N/A// specific to loads and stores, so they are handled by the callers.
0N/A// (Currently, only LoadNode::Ideal has steps (c), (d). More later.)
0N/A//
0N/ANode* MemNode::find_previous_store(PhaseTransform* phase) {
0N/A Node* ctrl = in(MemNode::Control);
0N/A Node* adr = in(MemNode::Address);
0N/A intptr_t offset = 0;
0N/A Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
0N/A AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
0N/A
0N/A if (offset == Type::OffsetBot)
0N/A return NULL; // cannot unalias unless there are precise offsets
0N/A
74N/A const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr();
74N/A
0N/A intptr_t size_in_bytes = memory_size();
0N/A
0N/A Node* mem = in(MemNode::Memory); // start searching here...
0N/A
0N/A int cnt = 50; // Cycle limiter
0N/A for (;;) { // While we can dance past unrelated stores...
0N/A if (--cnt < 0) break; // Caught in cycle or a complicated dance?
0N/A
0N/A if (mem->is_Store()) {
0N/A Node* st_adr = mem->in(MemNode::Address);
0N/A intptr_t st_offset = 0;
0N/A Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
0N/A if (st_base == NULL)
0N/A break; // inscrutable pointer
0N/A if (st_offset != offset && st_offset != Type::OffsetBot) {
0N/A const int MAX_STORE = BytesPerLong;
0N/A if (st_offset >= offset + size_in_bytes ||
0N/A st_offset <= offset - MAX_STORE ||
0N/A st_offset <= offset - mem->as_Store()->memory_size()) {
0N/A // Success: The offsets are provably independent.
0N/A // (You may ask, why not just test st_offset != offset and be done?
0N/A // The answer is that stores of different sizes can co-exist
0N/A // in the same sequence of RawMem effects. We sometimes initialize
0N/A // a whole 'tile' of array elements with a single jint or jlong.)
0N/A mem = mem->in(MemNode::Memory);
0N/A continue; // (a) advance through independent store memory
0N/A }
0N/A }
0N/A if (st_base != base &&
0N/A detect_ptr_independence(base, alloc,
0N/A st_base,
0N/A AllocateNode::Ideal_allocation(st_base, phase),
0N/A phase)) {
0N/A // Success: The bases are provably independent.
0N/A mem = mem->in(MemNode::Memory);
0N/A continue; // (a) advance through independent store memory
0N/A }
0N/A
0N/A // (b) At this point, if the bases or offsets do not agree, we lose,
0N/A // since we have not managed to prove 'this' and 'mem' independent.
0N/A if (st_base == base && st_offset == offset) {
0N/A return mem; // let caller handle steps (c), (d)
0N/A }
0N/A
0N/A } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
0N/A InitializeNode* st_init = mem->in(0)->as_Initialize();
0N/A AllocateNode* st_alloc = st_init->allocation();
0N/A if (st_alloc == NULL)
0N/A break; // something degenerated
0N/A bool known_identical = false;
0N/A bool known_independent = false;
0N/A if (alloc == st_alloc)
0N/A known_identical = true;
0N/A else if (alloc != NULL)
0N/A known_independent = true;
119N/A else if (all_controls_dominate(this, st_alloc))
0N/A known_independent = true;
0N/A
0N/A if (known_independent) {
0N/A // The bases are provably independent: Either they are
0N/A // manifestly distinct allocations, or else the control
0N/A // of this load dominates the store's allocation.
0N/A int alias_idx = phase->C->get_alias_index(adr_type());
0N/A if (alias_idx == Compile::AliasIdxRaw) {
0N/A mem = st_alloc->in(TypeFunc::Memory);
0N/A } else {
0N/A mem = st_init->memory(alias_idx);
0N/A }
0N/A continue; // (a) advance through independent store memory
0N/A }
0N/A
0N/A // (b) at this point, if we are not looking at a store initializing
0N/A // the same allocation we are loading from, we lose.
0N/A if (known_identical) {
0N/A // From caller, can_see_stored_value will consult find_captured_store.
0N/A return mem; // let caller handle steps (c), (d)
0N/A }
0N/A
223N/A } else if (addr_t != NULL && addr_t->is_known_instance_field()) {
74N/A // Can't use optimize_simple_memory_chain() since it needs PhaseGVN.
74N/A if (mem->is_Proj() && mem->in(0)->is_Call()) {
74N/A CallNode *call = mem->in(0)->as_Call();
74N/A if (!call->may_modify(addr_t, phase)) {
74N/A mem = call->in(TypeFunc::Memory);
74N/A continue; // (a) advance through independent call memory
74N/A }
74N/A } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
74N/A mem = mem->in(0)->in(TypeFunc::Memory);
74N/A continue; // (a) advance through independent MemBar memory
1100N/A } else if (mem->is_ClearArray()) {
1100N/A if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
1100N/A // (the call updated 'mem' value)
1100N/A continue; // (a) advance through independent allocation memory
1100N/A } else {
1100N/A // Can not bypass initialization of the instance
1100N/A // we are looking for.
1100N/A return mem;
1100N/A }
74N/A } else if (mem->is_MergeMem()) {
74N/A int alias_idx = phase->C->get_alias_index(adr_type());
74N/A mem = mem->as_MergeMem()->memory_at(alias_idx);
74N/A continue; // (a) advance through independent MergeMem memory
74N/A }
0N/A }
0N/A
0N/A // Unless there is an explicit 'continue', we must bail out here,
0N/A // because 'mem' is an inscrutable memory state (e.g., a call).
0N/A break;
0N/A }
0N/A
0N/A return NULL; // bail out
0N/A}
0N/A
0N/A//----------------------calculate_adr_type-------------------------------------
0N/A// Helper function. Notices when the given type of address hits top or bottom.
0N/A// Also, asserts a cross-check of the type against the expected address type.
0N/Aconst TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) {
0N/A if (t == Type::TOP) return NULL; // does not touch memory any more?
0N/A #ifdef PRODUCT
0N/A cross_check = NULL;
0N/A #else
0N/A if (!VerifyAliases || is_error_reported() || Node::in_dump()) cross_check = NULL;
0N/A #endif
0N/A const TypePtr* tp = t->isa_ptr();
0N/A if (tp == NULL) {
0N/A assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide");
0N/A return TypePtr::BOTTOM; // touches lots of memory
0N/A } else {
0N/A #ifdef ASSERT
0N/A // %%%% [phh] We don't check the alias index if cross_check is
0N/A // TypeRawPtr::BOTTOM. Needs to be investigated.
0N/A if (cross_check != NULL &&
0N/A cross_check != TypePtr::BOTTOM &&
0N/A cross_check != TypeRawPtr::BOTTOM) {
0N/A // Recheck the alias index, to see if it has changed (due to a bug).
0N/A Compile* C = Compile::current();
0N/A assert(C->get_alias_index(cross_check) == C->get_alias_index(tp),
0N/A "must stay in the original alias category");
0N/A // The type of the address must be contained in the adr_type,
0N/A // disregarding "null"-ness.
0N/A // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
0N/A const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
0N/A assert(cross_check->meet(tp_notnull) == cross_check,
0N/A "real address must not escape from expected memory type");
0N/A }
0N/A #endif
0N/A return tp;
0N/A }
0N/A}
0N/A
0N/A//------------------------adr_phi_is_loop_invariant----------------------------
0N/A// A helper function for Ideal_DU_postCCP to check if a Phi in a counted
0N/A// loop is loop invariant. Make a quick traversal of Phi and associated
0N/A// CastPP nodes, looking to see if they are a closed group within the loop.
0N/Abool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) {
0N/A // The idea is that the phi-nest must boil down to only CastPP nodes
0N/A // with the same data. This implies that any path into the loop already
0N/A // includes such a CastPP, and so the original cast, whatever its input,
0N/A // must be covered by an equivalent cast, with an earlier control input.
0N/A ResourceMark rm;
0N/A
0N/A // The loop entry input of the phi should be the unique dominating
0N/A // node for every Phi/CastPP in the loop.
0N/A Unique_Node_List closure;
0N/A closure.push(adr_phi->in(LoopNode::EntryControl));
0N/A
0N/A // Add the phi node and the cast to the worklist.
0N/A Unique_Node_List worklist;
0N/A worklist.push(adr_phi);
0N/A if( cast != NULL ){
0N/A if( !cast->is_ConstraintCast() ) return false;
0N/A worklist.push(cast);
0N/A }
0N/A
0N/A // Begin recursive walk of phi nodes.
0N/A while( worklist.size() ){
0N/A // Take a node off the worklist
0N/A Node *n = worklist.pop();
0N/A if( !closure.member(n) ){
0N/A // Add it to the closure.
0N/A closure.push(n);
0N/A // Make a sanity check to ensure we don't waste too much time here.
0N/A if( closure.size() > 20) return false;
0N/A // This node is OK if:
0N/A // - it is a cast of an identical value
0N/A // - or it is a phi node (then we add its inputs to the worklist)
0N/A // Otherwise, the node is not OK, and we presume the cast is not invariant
0N/A if( n->is_ConstraintCast() ){
0N/A worklist.push(n->in(1));
0N/A } else if( n->is_Phi() ) {
0N/A for( uint i = 1; i < n->req(); i++ ) {
0N/A worklist.push(n->in(i));
0N/A }
0N/A } else {
0N/A return false;
0N/A }
0N/A }
0N/A }
0N/A
0N/A // Quit when the worklist is empty, and we've found no offending nodes.
0N/A return true;
0N/A}
0N/A
0N/A//------------------------------Ideal_DU_postCCP-------------------------------
0N/A// Find any cast-away of null-ness and keep its control. Null cast-aways are
0N/A// going away in this pass and we need to make this memory op depend on the
0N/A// gating null check.
163N/ANode *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
163N/A return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address));
163N/A}
0N/A
0N/A// I tried to leave the CastPP's in. This makes the graph more accurate in
0N/A// some sense; we get to keep around the knowledge that an oop is not-null
0N/A// after some test. Alas, the CastPP's interfere with GVN (some values are
0N/A// the regular oop, some are the CastPP of the oop, all merge at Phi's which
0N/A// cannot collapse, etc). This cost us 10% on SpecJVM, even when I removed
0N/A// some of the more trivial cases in the optimizer. Removing more useless
0N/A// Phi's started allowing Loads to illegally float above null checks. I gave
0N/A// up on this approach. CNC 10/20/2000
163N/A// This static method may be called not from MemNode (EncodePNode calls it).
163N/A// Only the control edge of the node 'n' might be updated.
163N/ANode *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
0N/A Node *skipped_cast = NULL;
0N/A // Need a null check? Regular static accesses do not because they are
0N/A // from constant addresses. Array ops are gated by the range check (which
0N/A // always includes a NULL check). Just check field ops.
163N/A if( n->in(MemNode::Control) == NULL ) {
0N/A // Scan upwards for the highest location we can place this memory op.
0N/A while( true ) {
0N/A switch( adr->Opcode() ) {
0N/A
0N/A case Op_AddP: // No change to NULL-ness, so peek thru AddP's
0N/A adr = adr->in(AddPNode::Base);
0N/A continue;
0 Error!

 

There was an error!

null

java.lang.NullPointerException