loopnode.hpp revision 39
/*
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
class CmpNode;
class CountedLoopEndNode;
class CountedLoopNode;
class IdealLoopTree;
class LoopNode;
class Node;
class PhaseIdealLoop;
class VectorSet;
struct small_cache;
//
// I D E A L I Z E D L O O P S
//
// Idealized loops are the set of loops I perform more interesting
// transformations on, beyond simple hoisting.
//------------------------------LoopNode---------------------------------------
// Simple loop header. Fall in path on left, loop-back path on right.
class LoopNode : public RegionNode {
// Size is bigger to hold the flags. However, the flags do not change
// the semantics so it does not appear in the hash & cmp functions.
protected:
short _loop_flags;
// Names for flag bitfields
char _unswitch_count;
enum { _unswitch_max=3 };
public:
// Names for edge indices
int unswitch_max() { return _unswitch_max; }
int unswitch_count() { return _unswitch_count; }
void set_unswitch_count(int val) {
}
}
virtual int Opcode() const;
}
#ifndef PRODUCT
#endif
};
//------------------------------Counted Loops----------------------------------
// Counted loops are all trip-counted loops, with exactly 1 trip-counter exit
// path (and maybe some other exit paths). The trip-counter exit is always
// last in the loop. The trip-counter does not have to stride by a constant,
// but it does have to stride by a loop-invariant amount; the exit value is
// also loop invariant.
// CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The
// CountedLoopNode has the incoming loop control and the loop-back-control
// which is always the IfTrue before the matching CountedLoopEndNode. The
// CountedLoopEndNode has an incoming control (possibly not the
// CountedLoopNode if there is control flow in the loop), the post-increment
// trip-counter value, and the limit. The trip-counter value is always of
// the form (Op old-trip-counter stride). The old-trip-counter is produced
// by a Phi connected to the CountedLoopNode. The stride is loop invariant.
// The Op is any commutable opcode, including Add, Mul, Xor. The
// CountedLoopEndNode also takes in the loop-invariant limit value.
// From a CountedLoopNode I can reach the matching CountedLoopEndNode via the
// loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes
// via the old-trip-counter from the Op node.
//------------------------------CountedLoopNode--------------------------------
// CountedLoopNodes head simple counted loops. CountedLoopNodes have as
// inputs the incoming loop-start control and the loop-back control, so they
// act like RegionNodes. They also take in the initial trip counter, the
// loop-invariant stride and the loop-invariant limit value. CountedLoopNodes
// produce a loop-body control and the trip counter value. Since
// CountedLoopNodes behave like RegionNodes I still have a standard CFG model.
class CountedLoopNode : public LoopNode {
// Size is bigger to hold _main_idx. However, _main_idx does not change
// the semantics so it does not appear in the hash & cmp functions.
// For Pre- and Post-loops during debugging ONLY, this holds the index of
// the Main CountedLoop. Used to assert that we understand the graph shape.
// Known trip count calculated by policy_maximally_unroll
int _trip_count;
// Expected trip count from profile data
float _profile_trip_cnt;
// Log2 of original loop bodies in unrolled loop
int _unrolled_count_log2;
// Node count prior to last unrolling - used to decide if
// unroll,optimize,unroll,optimize,... is making progress
public:
// Initialize _trip_count to the largest possible value.
// Will be reset (lower) if the loop's trip count is known.
}
virtual int Opcode() const;
CountedLoopEndNode *loopexit() const;
int stride_con() const;
bool stride_is_con() const;
// Match increment with optional truncation
static Node* match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type);
// A 'main' loop has a pre-loop and a post-loop. The 'main' loop
// can run short a few iterations and may start a few iterations in.
// It will be RCE'd and unrolled and aligned.
// A following 'post' loop will run any remaining iterations. Used
// during Range Check Elimination, the 'post' loop will do any final
// iterations with full checks. Also used by Loop Unrolling, where
// the 'post' loop will do any epilog iterations needed. Basically,
// a 'post' loop can not profitably be further unrolled or RCE'd.
// A preceding 'pre' loop will run at least 1 iteration (to do peeling),
// it may do under-flow checks for RCE and may do alignment iterations
// so the following main loop 'knows' that it is striding down cache
// lines.
// A 'main' loop that is ONLY unrolled or peeled, never RCE'd or
// Aligned, may be missing it's pre-loop.
void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; }
int trip_count() { return _trip_count; }
float profile_trip_cnt() { return _profile_trip_cnt; }
void double_unrolled_count() { _unrolled_count_log2++; }
int node_count_before_unroll() { return _node_count_before_unroll; }
#ifndef PRODUCT
#endif
};
//------------------------------CountedLoopEndNode-----------------------------
// CountedLoopEndNodes end simple trip counted loops. They act much like
// IfNodes.
class CountedLoopEndNode : public IfNode {
public:
enum { TestControl, TestValue };
}
virtual int Opcode() const;
int stride_con() const;
CountedLoopNode *loopnode() const {
return (CountedLoopNode*)ln; }
#ifndef PRODUCT
#endif
};
return NULL;
return (CountedLoopEndNode*)le;
}
inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; }
inline int CountedLoopNode::stride_con() const { return loopexit() ? loopexit()->stride_con() : 0; }
inline bool CountedLoopNode::stride_is_con() const { return loopexit() && loopexit()->stride_is_con(); }
// -----------------------------IdealLoopTree----------------------------------
class IdealLoopTree : public ResourceObj {
public:
// The head-tail backedge defines the loop.
// If tail is NULL then this loop has multiple backedges as part of the
// same loop. During cleanup I'll peel off the multiple backedges; merge
// them at the loop bottom and flow 1 real backedge into the loop.
bool _allow_optimizations; // Allow loop optimizations
_allow_optimizations(true),
{ }
// Is 'l' a member of 'this'?
// Set loop nesting depth. Accumulate has_call bits.
// Split out multiple fall-in edges from the loop header. Move them to a
// private RegionNode before the loop. This becomes the loop landing pad.
// Split out the outermost loop from this shared header.
// Merge all the backedges from the shared header into a private Region.
// Feed that region as the one backedge to this loop.
// Split shared headers and insert loop landing pads.
// Insert a LoopNode to replace the RegionNode.
// Returns TRUE if loop tree is structurally changed.
// Perform iteration-splitting on inner loops. Split iterations to avoid
// range checks or one-shot null checks.
// Driver for various flavors of iteration splitting
// Given dominators, try to find loops with calls that must always be
// executed (call dominates loop tail). These loops do not need non-call
// safepoints (ncsfpt).
// Allpaths backwards scan from loop tail, terminating each path at first safepoint
// encountered.
// Convert to counted loops where possible
// Check for Node being a loop-breaking test
// Returns true if ctrl is executed on every complete iteration
// Remove simplistic dead code from loop body
void DCE_loop_body();
// Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
// Replace with a 1-in-10 exit guess.
// Return TRUE or FALSE if the loop should never be RCE'd or aligned.
// Useful for unrolling loops with NO array accesses.
// Return TRUE or FALSE if the loop should be unswitched -- clone
// loop with an invariant test
// Micro-benchmark spamming. Remove empty loops.
// Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
// make some loop-invariant test (usually a null-check) happen before the
// loop.
// Return TRUE or FALSE if the loop should be maximally unrolled. Stash any
// known trip count in the counted loop node.
// Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
// the loop is a CountedLoop and the body is small enough.
// Return TRUE or FALSE if the loop should be range-check-eliminated.
// Gather a list of IF tests that are dominated by iteration splitting;
// also gather the end of the first split and the start of the 2nd split.
// Return TRUE or FALSE if the loop should be cache-line aligned.
// Gather the expression that does the alignment. Note that only
// one array base can be aligned in a loop (unless the VM guarentees
// mutual alignment). Note that if we vectorize short memory ops
// into longer memory ops, we may want to increase alignment.
// Compute loop trip count from profile data
// Reassociate invariant expressions.
// Reassociate invariant add and subtract expressions.
// Return nonzero index of invariant operand if invariant and variant
// are combined with an Add or Sub. Helper for reassoicate_invariants.
// Return true if n is invariant
bool is_invariant(Node* n) const;
// Put loop body on igvn work list
void record_for_igvn();
#ifndef PRODUCT
void dump_head( ) const; // Dump loop head only
void dump() const; // Dump this loop recursively
#endif
};
// -----------------------------PhaseIdealLoop---------------------------------
// Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees into a
// loop tree. Drives the loop-based transformations on the ideal graph.
class PhaseIdealLoop : public PhaseTransform {
friend class IdealLoopTree;
friend class SuperWord;
// Pre-computed def-use info
// Head of loop tree
// Array of pre-order numbers, plus post-visited bit.
// ZERO for not pre-visited. EVEN for pre-visited but not post-visited.
// ODD for post-visited. Other bits are the pre-order number.
// Allocate _preorders[] array
void allocate_preorders() {
}
// Allocate _preorders[] array
void reallocate_preorders() {
if ( _max_preorder < C->unique() ) {
_max_preorder = C->unique();
}
}
// Check to grow _preorders[] array for the case when build_loop_tree_impl()
// adds new nodes.
void check_grow_preorders( ) {
if ( _max_preorder < C->unique() ) {
}
}
// Check for pre-visited. Zero for NOT visited; non-zero for visited.
// Pre-order numbers are written to the Nodes array as low-bit-set values.
};
// Return pre-order number.
// Check for being post-visited.
// Should be previsited already (checked with assert(is_visited(n))).
// Mark as post visited
// Returns true if "n" is a data node, false if it's a control node.
// clear out dead code after build_loop_late
// Support for faster execution of get_late_ctrl()/dom_lca()
// when a node has many uses and dominator depth is deep.
void init_dom_lca_tags();
void clear_dom_lca_tags();
// Inline wrapper for frequent cases:
// 1) only one use
// 2) a use is the same as the current LCA passed as 'n1'
// Fast-path NULL lca
// find LCA of all uses
}
return find_non_split_ctrl(n);
}
// true if CFG node d dominates CFG node n
// Helper function for directing control inputs away from CFG split
// points.
if (ctrl->is_MultiBranch()) {
}
}
return ctrl;
}
public:
// check if transform created new nodes that need _ctrl recorded
void set_early_ctrl( Node *n );
}
// Set control and update loop membership
}
}
// Control nodes can be replaced or subsumed. During this pass they
// get their replacement Node in slot 1. Instead of updating the block
// location of all Nodes in the subsumed block, we lazily do it. As we
// pull such a subsumed block out of the array, we write back the final
// correct block.
Node *n = get_ctrl_no_update(i);
return n;
}
private:
if (!n->in(0)) {
// Skip dead CFG nodes
do {
} while (!n->in(0));
n = find_non_split_ctrl(n);
}
return n;
}
// Check for loop being set
// "n" must be a control node. Returns true if "n" is known to be in a loop.
return has_node(n);
}
// Set loop
}
// Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace
// the 'old_node' with 'new_node'. Kill old-node. Add a reference
// from old_node to new_node to support the lazy update. Reference
// replaces loop reference, since that is not neede for dead node.
public:
//old_node->set_req( 1, new_node /*NO DU INFO*/ );
// Nodes always have DU info now, so re-use the side array slot
// for this node to provide the forwarding pointer.
}
}
}
private:
// Place 'n' in some loop nest, where 'n' is a CFG node
void build_loop_tree();
// Insert loop into the existing loop tree. 'innermost' is a leaf of the
// loop tree, not the root.
// Place Data nodes in some loop nest
void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me );
void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me );
// Array of immediate dominance info for each CFG node indexed by node idx
private:
//n = n->in(1);
}
return n;
}
Node *n = idom_no_update(d);
return n;
}
return _dom_depth[d->_idx];
}
// Locally compute IDOM using dom_lca call
// Recompute dom_depth
void recompute_dom_depth();
// Is safept not required by an outer loop?
public:
// Dominators for the sea of nodes
void Dominators();
}
// Compute the Ideal Node to Loop mapping
// True if the method has at least 1 irreducible loop
bool _has_irreducible_loops;
// Per-Node transform
// Return a post-walked LoopNode
// Dead nodes have no loop, so return the top level loop instead
if (!has_node(n)) return _ltree_root;
}
// Is 'n' a (nested) member of 'loop'?
// This is the basic building block of the loop optimizations. It clones an
// entire loop body. It makes an old_new loop body mapping; with this
// mapping you can find the new-loop equivalent to an old-loop node. All
// new-loop nodes are exactly equal to their old-loop counterparts, all
// edges are the same. All exits from the old-loop now have a RegionNode
// that merges the equivalent new-loop path. This is true even for the
// normal "loop-exit" condition. All uses of loop-invariant old-loop values
// now come from (one or more) Phis that merge their new-loop equivalents.
// Parameter side_by_side_idom:
// When side_by_size_idom is NULL, the dominator tree is constructed for
// the clone loop to dominate the original. Used in construction of
// pre-main-post loop sequence.
// When nonnull, the clone and original are side-by-side, both are
// dominated by the passed in side_by_side_idom node. Used in
// construction of unswitched loops.
// If we got the effect of peeling, either by actually peeling or by
// making a pre-loop which must execute at least once, we can remove
// all loop-invariant dominated tests in the main body.
// Generate code to do a loop peel for the given loop (and body).
// old_new is a temp array.
// Add pre and post loops around the given loop. These loops are used
// during RCE, unrolling and aligning loops.
// If Node n lives in the back_ctrl block, we clone a private version of n
// in preheader_ctrl block and return that, otherwise return n.
// Take steps to maximally unroll the loop. Peel any odd iterations, then
// unroll to do double iterations. The next round of major loop transforms
// will repeat till the doubled loop body does all remaining iterations in 1
// pass.
// Unroll the loop body one step - make each trip do 2 iterations.
// Return true if exp is a constant times an induction var
// Return true if exp is a scaled induction var plus (or minus) constant
// Eliminate range-checks and other trip-counter vs loop-invariant tests.
// Create a slow version of the loop by cloning the loop
// and inserting an if to select fast-slow versions.
// Clone loop with an invariant test (that does not exit) and
// insert a clone of the test that selects which version to
// execute.
// Find candidate "if" for unswitching
// Range Check Elimination uses this function!
// Constrain the main loop iterations so the affine function:
// scale_con * I + offset < limit
// always holds true. That is, either increase the number of iterations in
// the pre-loop or the post-loop until the condition holds true in the main
// loop. Scale_con, offset and limit are all loop invariant.
void add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
// Partially peel loop up through last_peel node.
// Create a scheduled list of nodes control dependent on ctrl set.
// Has a use in the vector set
// Has use internal to the vector set (ie. not in a phi at the loop head)
// clone "n" for uses that are outside of loop
// clone "n" for special uses that are in the not_peeled region
// Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist
void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp );
#ifdef ASSERT
// Validate the loop partition sets: peel and not_peel
bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel );
// Ensure that uses outside of loop are of the right form
#endif
// Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.)
// Return the (unique) control output node that's in the loop (if it exists.)
// Insert a signed compare loop exit cloned from an unsigned compare.
// Utility to register node "n" with PhaseIdealLoop
// Utility to create an if-projection
// Force the iff control output to be the live_proj
// Insert a region before an if projection
// Insert a new if before an if projection
ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj);
// "Nearly" because all Nodes have been cloned from the original in the loop,
// through the Phi recursively, and return a Bool.
// Rework addressing expressions to get the most loop-invariant stuff
// moved out. We'd like to do all associative operators, but it's especially
// important (common) to do address expressions.
// Reorganize offset computations to lower register pressure.
// Mostly prevent loop-fallout uses of the pre-incremented trip counter
// (which are then alive with the post-incremented trip counter
// forcing an extra register move)
// Check for aggressive application of 'split-if' optimization,
// using basic block level info.
void split_if_with_blocks_post( Node *n );
// Mark an IfNode as being dominated by a prior test,
// without actually altering the CFG (and hence IDOM info).
// Split Node 'n' through merge point
// Split Node 'n' through merge point if there is enough win.
// Found an If getting its condition-code input from a Phi in the
// same block. Split thru the Region.
private:
// Return a type based on condition control flow
// Helpers for filtered type
// Helper functions
Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
bool _created_loop_node;
public:
void set_created_loop_node() { _created_loop_node = true; }
bool created_loop_node() { return _created_loop_node; }
#ifndef PRODUCT
void dump( ) const;
void verify() const; // Major slow :-)
// Dead nodes have no loop, so return the top level loop instead
}
// Print some stats
static void print_statistics();
static int _loop_invokes; // Count of PhaseIdealLoop invokes
static int _loop_work; // Sum of PhaseIdealLoop x _unique
#endif
};
// Handle lazy update of _tail field
//while( !n->in(0) ) // Skip dead CFG nodes
//n = n->in(1);
_tail = n;
return n;
}
// Iterate over the loop tree using a preorder, left-to-right traversal.
//
// Example that visits all counted loops from within PhaseIdealLoop
//
// for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
// IdealLoopTree* lpt = iter.current();
// if (!lpt->is_counted()) continue;
// ...
class LoopTreeIterator : public StackObj {
private:
public:
void next(); // Advance to next loop tree
};