0N/A/*
3845N/A * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#include "precompiled.hpp"
1879N/A#include "compiler/oopMap.hpp"
1879N/A#include "memory/allocation.inline.hpp"
1879N/A#include "opto/addnode.hpp"
1879N/A#include "opto/block.hpp"
1879N/A#include "opto/callnode.hpp"
1879N/A#include "opto/cfgnode.hpp"
1879N/A#include "opto/chaitin.hpp"
1879N/A#include "opto/coalesce.hpp"
1879N/A#include "opto/connode.hpp"
1879N/A#include "opto/indexSet.hpp"
1879N/A#include "opto/machnode.hpp"
1879N/A#include "opto/memnode.hpp"
1879N/A#include "opto/opcodes.hpp"
0N/A
0N/A#define EXACT_PRESSURE 1
0N/A
0N/A//=============================================================================
0N/A//------------------------------IFG--------------------------------------------
0N/APhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
0N/A}
0N/A
0N/A//------------------------------init-------------------------------------------
0N/Avoid PhaseIFG::init( uint maxlrg ) {
0N/A _maxlrg = maxlrg;
0N/A _yanked = new (_arena) VectorSet(_arena);
0N/A _is_square = false;
0N/A // Make uninitialized adjacency lists
0N/A _adjs = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*maxlrg);
0N/A // Also make empty live range structures
0N/A _lrgs = (LRG *)_arena->Amalloc( maxlrg * sizeof(LRG) );
0N/A memset(_lrgs,0,sizeof(LRG)*maxlrg);
0N/A // Init all to empty
0N/A for( uint i = 0; i < maxlrg; i++ ) {
0N/A _adjs[i].initialize(maxlrg);
0N/A _lrgs[i].Set_All();
0N/A }
0N/A}
0N/A
0N/A//------------------------------add--------------------------------------------
0N/A// Add edge between vertices a & b. These are sorted (triangular matrix),
0N/A// then the smaller number is inserted in the larger numbered array.
0N/Aint PhaseIFG::add_edge( uint a, uint b ) {
0N/A lrgs(a).invalid_degree();
0N/A lrgs(b).invalid_degree();
0N/A // Sort a and b, so that a is bigger
0N/A assert( !_is_square, "only on triangular" );
0N/A if( a < b ) { uint tmp = a; a = b; b = tmp; }
0N/A return _adjs[a].insert( b );
0N/A}
0N/A
0N/A//------------------------------add_vector-------------------------------------
0N/A// Add an edge between 'a' and everything in the vector.
0N/Avoid PhaseIFG::add_vector( uint a, IndexSet *vec ) {
0N/A // IFG is triangular, so do the inserts where 'a' < 'b'.
0N/A assert( !_is_square, "only on triangular" );
0N/A IndexSet *adjs_a = &_adjs[a];
0N/A if( !vec->count() ) return;
0N/A
0N/A IndexSetIterator elements(vec);
0N/A uint neighbor;
0N/A while ((neighbor = elements.next()) != 0) {
0N/A add_edge( a, neighbor );
0N/A }
0N/A}
0N/A
0N/A//------------------------------test-------------------------------------------
0N/A// Is there an edge between a and b?
0N/Aint PhaseIFG::test_edge( uint a, uint b ) const {
0N/A // Sort a and b, so that a is larger
0N/A assert( !_is_square, "only on triangular" );
0N/A if( a < b ) { uint tmp = a; a = b; b = tmp; }
0N/A return _adjs[a].member(b);
0N/A}
0N/A
0N/A//------------------------------SquareUp---------------------------------------
0N/A// Convert triangular matrix to square matrix
0N/Avoid PhaseIFG::SquareUp() {
0N/A assert( !_is_square, "only on triangular" );
0N/A
0N/A // Simple transpose
0N/A for( uint i = 0; i < _maxlrg; i++ ) {
0N/A IndexSetIterator elements(&_adjs[i]);
0N/A uint datum;
0N/A while ((datum = elements.next()) != 0) {
0N/A _adjs[datum].insert( i );
0N/A }
0N/A }
0N/A _is_square = true;
0N/A}
0N/A
0N/A//------------------------------Compute_Effective_Degree-----------------------
0N/A// Compute effective degree in bulk
0N/Avoid PhaseIFG::Compute_Effective_Degree() {
0N/A assert( _is_square, "only on square" );
0N/A
0N/A for( uint i = 0; i < _maxlrg; i++ )
0N/A lrgs(i).set_degree(effective_degree(i));
0N/A}
0N/A
0N/A//------------------------------test_edge_sq-----------------------------------
0N/Aint PhaseIFG::test_edge_sq( uint a, uint b ) const {
0N/A assert( _is_square, "only on square" );
0N/A // Swap, so that 'a' has the lesser count. Then binary search is on
0N/A // the smaller of a's list and b's list.
0N/A if( neighbor_cnt(a) > neighbor_cnt(b) ) { uint tmp = a; a = b; b = tmp; }
0N/A //return _adjs[a].unordered_member(b);
0N/A return _adjs[a].member(b);
0N/A}
0N/A
0N/A//------------------------------Union------------------------------------------
0N/A// Union edges of B into A
0N/Avoid PhaseIFG::Union( uint a, uint b ) {
0N/A assert( _is_square, "only on square" );
0N/A IndexSet *A = &_adjs[a];
0N/A IndexSetIterator b_elements(&_adjs[b]);
0N/A uint datum;
0N/A while ((datum = b_elements.next()) != 0) {
0N/A if(A->insert(datum)) {
0N/A _adjs[datum].insert(a);
0N/A lrgs(a).invalid_degree();
0N/A lrgs(datum).invalid_degree();
0N/A }
0N/A }
0N/A}
0N/A
0N/A//------------------------------remove_node------------------------------------
0N/A// Yank a Node and all connected edges from the IFG. Return a
0N/A// list of neighbors (edges) yanked.
0N/AIndexSet *PhaseIFG::remove_node( uint a ) {
0N/A assert( _is_square, "only on square" );
0N/A assert( !_yanked->test(a), "" );
0N/A _yanked->set(a);
0N/A
0N/A // I remove the LRG from all neighbors.
0N/A IndexSetIterator elements(&_adjs[a]);
0N/A LRG &lrg_a = lrgs(a);
0N/A uint datum;
0N/A while ((datum = elements.next()) != 0) {
0N/A _adjs[datum].remove(a);
0N/A lrgs(datum).inc_degree( -lrg_a.compute_degree(lrgs(datum)) );
0N/A }
0N/A return neighbors(a);
0N/A}
0N/A
0N/A//------------------------------re_insert--------------------------------------
0N/A// Re-insert a yanked Node.
0N/Avoid PhaseIFG::re_insert( uint a ) {
0N/A assert( _is_square, "only on square" );
0N/A assert( _yanked->test(a), "" );
0N/A (*_yanked) >>= a;
0N/A
0N/A IndexSetIterator elements(&_adjs[a]);
0N/A uint datum;
0N/A while ((datum = elements.next()) != 0) {
0N/A _adjs[datum].insert(a);
0N/A lrgs(datum).invalid_degree();
0N/A }
0N/A}
0N/A
0N/A//------------------------------compute_degree---------------------------------
0N/A// Compute the degree between 2 live ranges. If both live ranges are
0N/A// aligned-adjacent powers-of-2 then we use the MAX size. If either is
0N/A// mis-aligned (or for Fat-Projections, not-adjacent) then we have to
0N/A// MULTIPLY the sizes. Inspect Brigg's thesis on register pairs to see why
0N/A// this is so.
0N/Aint LRG::compute_degree( LRG &l ) const {
0N/A int tmp;
0N/A int num_regs = _num_regs;
0N/A int nregs = l.num_regs();
0N/A tmp = (_fat_proj || l._fat_proj) // either is a fat-proj?
0N/A ? (num_regs * nregs) // then use product
0N/A : MAX2(num_regs,nregs); // else use max
0N/A return tmp;
0N/A}
0N/A
0N/A//------------------------------effective_degree-------------------------------
0N/A// Compute effective degree for this live range. If both live ranges are
0N/A// aligned-adjacent powers-of-2 then we use the MAX size. If either is
0N/A// mis-aligned (or for Fat-Projections, not-adjacent) then we have to
0N/A// MULTIPLY the sizes. Inspect Brigg's thesis on register pairs to see why
0N/A// this is so.
0N/Aint PhaseIFG::effective_degree( uint lidx ) const {
0N/A int eff = 0;
0N/A int num_regs = lrgs(lidx).num_regs();
0N/A int fat_proj = lrgs(lidx)._fat_proj;
0N/A IndexSet *s = neighbors(lidx);
0N/A IndexSetIterator elements(s);
0N/A uint nidx;
0N/A while((nidx = elements.next()) != 0) {
0N/A LRG &lrgn = lrgs(nidx);
0N/A int nregs = lrgn.num_regs();
0N/A eff += (fat_proj || lrgn._fat_proj) // either is a fat-proj?
0N/A ? (num_regs * nregs) // then use product
0N/A : MAX2(num_regs,nregs); // else use max
0N/A }
0N/A return eff;
0N/A}
0N/A
0N/A
0N/A#ifndef PRODUCT
0N/A//------------------------------dump-------------------------------------------
0N/Avoid PhaseIFG::dump() const {
0N/A tty->print_cr("-- Interference Graph --%s--",
0N/A _is_square ? "square" : "triangular" );
0N/A if( _is_square ) {
0N/A for( uint i = 0; i < _maxlrg; i++ ) {
0N/A tty->print( (*_yanked)[i] ? "XX " : " ");
0N/A tty->print("L%d: { ",i);
0N/A IndexSetIterator elements(&_adjs[i]);
0N/A uint datum;
0N/A while ((datum = elements.next()) != 0) {
0N/A tty->print("L%d ", datum);
0N/A }
0N/A tty->print_cr("}");
0N/A
0N/A }
0N/A return;
0N/A }
0N/A
0N/A // Triangular
0N/A for( uint i = 0; i < _maxlrg; i++ ) {
0N/A uint j;
0N/A tty->print( (*_yanked)[i] ? "XX " : " ");
0N/A tty->print("L%d: { ",i);
0N/A for( j = _maxlrg; j > i; j-- )
0N/A if( test_edge(j - 1,i) ) {
0N/A tty->print("L%d ",j - 1);
0N/A }
0N/A tty->print("| ");
0N/A IndexSetIterator elements(&_adjs[i]);
0N/A uint datum;
0N/A while ((datum = elements.next()) != 0) {
0N/A tty->print("L%d ", datum);
0N/A }
0N/A tty->print("}\n");
0N/A }
0N/A tty->print("\n");
0N/A}
0N/A
0N/A//------------------------------stats------------------------------------------
0N/Avoid PhaseIFG::stats() const {
0N/A ResourceMark rm;
0N/A int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2);
0N/A memset( h_cnt, 0, sizeof(int)*_maxlrg*2 );
0N/A uint i;
0N/A for( i = 0; i < _maxlrg; i++ ) {
0N/A h_cnt[neighbor_cnt(i)]++;
0N/A }
0N/A tty->print_cr("--Histogram of counts--");
0N/A for( i = 0; i < _maxlrg*2; i++ )
0N/A if( h_cnt[i] )
0N/A tty->print("%d/%d ",i,h_cnt[i]);
0N/A tty->print_cr("");
0N/A}
0N/A
0N/A//------------------------------verify-----------------------------------------
0N/Avoid PhaseIFG::verify( const PhaseChaitin *pc ) const {
0N/A // IFG is square, sorted and no need for Find
0N/A for( uint i = 0; i < _maxlrg; i++ ) {
0N/A assert(!((*_yanked)[i]) || !neighbor_cnt(i), "Is removed completely" );
0N/A IndexSet *set = &_adjs[i];
0N/A IndexSetIterator elements(set);
0N/A uint idx;
0N/A uint last = 0;
0N/A while ((idx = elements.next()) != 0) {
0N/A assert( idx != i, "Must have empty diagonal");
0N/A assert( pc->Find_const(idx) == idx, "Must not need Find" );
0N/A assert( _adjs[idx].member(i), "IFG not square" );
0N/A assert( !(*_yanked)[idx], "No yanked neighbors" );
0N/A assert( last < idx, "not sorted increasing");
0N/A last = idx;
0N/A }
0N/A assert( !lrgs(i)._degree_valid ||
0N/A effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" );
0N/A }
0N/A}
0N/A#endif
0N/A
0N/A//------------------------------interfere_with_live----------------------------
0N/A// Interfere this register with everything currently live. Use the RegMasks
0N/A// to trim the set of possible interferences. Return a count of register-only
605N/A// interferences as an estimate of register pressure.
0N/Avoid PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
0N/A uint retval = 0;
0N/A // Interfere with everything live.
0N/A const RegMask &rm = lrgs(r).mask();
0N/A // Check for interference by checking overlap of regmasks.
0N/A // Only interfere if acceptable register masks overlap.
0N/A IndexSetIterator elements(liveout);
0N/A uint l;
0N/A while( (l = elements.next()) != 0 )
0N/A if( rm.overlap( lrgs(l).mask() ) )
0N/A _ifg->add_edge( r, l );
0N/A}
0N/A
0N/A//------------------------------build_ifg_virtual------------------------------
0N/A// Actually build the interference graph. Uses virtual registers only, no
0N/A// physical register masks. This allows me to be very aggressive when
0N/A// coalescing copies. Some of this aggressiveness will have to be undone
0N/A// later, but I'd rather get all the copies I can now (since unremoved copies
0N/A// at this point can end up in bad places). Copies I re-insert later I have
0N/A// more opportunity to insert them in low-frequency locations.
0N/Avoid PhaseChaitin::build_ifg_virtual( ) {
0N/A
0N/A // For all blocks (in any order) do...
0N/A for( uint i=0; i<_cfg._num_blocks; i++ ) {
0N/A Block *b = _cfg._blocks[i];
0N/A IndexSet *liveout = _live->live(b);
0N/A
0N/A // The IFG is built by a single reverse pass over each basic block.
0N/A // Starting with the known live-out set, we remove things that get
0N/A // defined and add things that become live (essentially executing one
0N/A // pass of a standard LIVE analysis). Just before a Node defines a value
0N/A // (and removes it from the live-ness set) that value is certainly live.
0N/A // The defined value interferes with everything currently live. The
0N/A // value is then removed from the live-ness set and it's inputs are
0N/A // added to the live-ness set.
0N/A for( uint j = b->end_idx() + 1; j > 1; j-- ) {
0N/A Node *n = b->_nodes[j-1];
0N/A
0N/A // Get value being defined
0N/A uint r = n2lidx(n);
0N/A
0N/A // Some special values do not allocate
0N/A if( r ) {
0N/A
0N/A // Remove from live-out set
0N/A liveout->remove(r);
0N/A
0N/A // Copies do not define a new value and so do not interfere.
0N/A // Remove the copies source from the liveout set before interfering.
0N/A uint idx = n->is_Copy();
0N/A if( idx ) liveout->remove( n2lidx(n->in(idx)) );
0N/A
0N/A // Interfere with everything live
0N/A interfere_with_live( r, liveout );
0N/A }
0N/A
0N/A // Make all inputs live
0N/A if( !n->is_Phi() ) { // Phi function uses come from prior block
0N/A for( uint k = 1; k < n->req(); k++ )
0N/A liveout->insert( n2lidx(n->in(k)) );
0N/A }
0N/A
0N/A // 2-address instructions always have the defined value live
0N/A // on entry to the instruction, even though it is being defined
0N/A // by the instruction. We pretend a virtual copy sits just prior
0N/A // to the instruction and kills the src-def'd register.
0N/A // In other words, for 2-address instructions the defined value
0N/A // interferes with all inputs.
0N/A uint idx;
0N/A if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
0N/A const MachNode *mach = n->as_Mach();
0N/A // Sometimes my 2-address ADDs are commuted in a bad way.
0N/A // We generally want the USE-DEF register to refer to the
0N/A // loop-varying quantity, to avoid a copy.
0N/A uint op = mach->ideal_Opcode();
0N/A // Check that mach->num_opnds() == 3 to ensure instruction is
0N/A // not subsuming constants, effectively excludes addI_cin_imm
0N/A // Can NOT swap for instructions like addI_cin_imm since it
0N/A // is adding zero to yhi + carry and the second ideal-input
0N/A // points to the result of adding low-halves.
0N/A // Checking req() and num_opnds() does NOT distinguish addI_cout from addI_cout_imm
0N/A if( (op == Op_AddI && mach->req() == 3 && mach->num_opnds() == 3) &&
0N/A n->in(1)->bottom_type()->base() == Type::Int &&
0N/A // See if the ADD is involved in a tight data loop the wrong way
0N/A n->in(2)->is_Phi() &&
0N/A n->in(2)->in(2) == n ) {
0N/A Node *tmp = n->in(1);
0N/A n->set_req( 1, n->in(2) );
0N/A n->set_req( 2, tmp );
0N/A }
0N/A // Defined value interferes with all inputs
0N/A uint lidx = n2lidx(n->in(idx));
0N/A for( uint k = 1; k < n->req(); k++ ) {
0N/A uint kidx = n2lidx(n->in(k));
0N/A if( kidx != lidx )
0N/A _ifg->add_edge( r, kidx );
0N/A }
0N/A }
0N/A } // End of forall instructions in block
0N/A } // End of forall blocks
0N/A}
0N/A
0N/A//------------------------------count_int_pressure-----------------------------
0N/Auint PhaseChaitin::count_int_pressure( IndexSet *liveout ) {
0N/A IndexSetIterator elements(liveout);
0N/A uint lidx;
0N/A uint cnt = 0;
0N/A while ((lidx = elements.next()) != 0) {
0N/A if( lrgs(lidx).mask().is_UP() &&
0N/A lrgs(lidx).mask_size() &&
0N/A !lrgs(lidx)._is_float &&
3845N/A !lrgs(lidx)._is_vector &&
0N/A lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) )
0N/A cnt += lrgs(lidx).reg_pressure();
0N/A }
0N/A return cnt;
0N/A}
0N/A
0N/A//------------------------------count_float_pressure---------------------------
0N/Auint PhaseChaitin::count_float_pressure( IndexSet *liveout ) {
0N/A IndexSetIterator elements(liveout);
0N/A uint lidx;
0N/A uint cnt = 0;
0N/A while ((lidx = elements.next()) != 0) {
0N/A if( lrgs(lidx).mask().is_UP() &&
0N/A lrgs(lidx).mask_size() &&
3845N/A (lrgs(lidx)._is_float || lrgs(lidx)._is_vector))
0N/A cnt += lrgs(lidx).reg_pressure();
0N/A }
0N/A return cnt;
0N/A}
0N/A
0N/A//------------------------------lower_pressure---------------------------------
0N/A// Adjust register pressure down by 1. Capture last hi-to-low transition,
0N/Astatic void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) {
3845N/A if (lrg->mask().is_UP() && lrg->mask_size()) {
3845N/A if (lrg->_is_float || lrg->_is_vector) {
0N/A pressure[1] -= lrg->reg_pressure();
0N/A if( pressure[1] == (uint)FLOATPRESSURE ) {
0N/A hrp_index[1] = where;
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[1] > b->_freg_pressure )
0N/A b->_freg_pressure = pressure[1]+1;
0N/A#else
0N/A b->_freg_pressure = (uint)FLOATPRESSURE+1;
0N/A#endif
0N/A }
0N/A } else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
0N/A pressure[0] -= lrg->reg_pressure();
0N/A if( pressure[0] == (uint)INTPRESSURE ) {
0N/A hrp_index[0] = where;
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[0] > b->_reg_pressure )
0N/A b->_reg_pressure = pressure[0]+1;
0N/A#else
0N/A b->_reg_pressure = (uint)INTPRESSURE+1;
0N/A#endif
0N/A }
0N/A }
0N/A }
0N/A}
0N/A
0N/A//------------------------------build_ifg_physical-----------------------------
0N/A// Build the interference graph using physical registers when available.
0N/A// That is, if 2 live ranges are simultaneously alive but in their acceptable
0N/A// register sets do not overlap, then they do not interfere.
0N/Auint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
0N/A NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); )
0N/A
0N/A uint spill_reg = LRG::SPILL_REG;
0N/A uint must_spill = 0;
0N/A
0N/A // For all blocks (in any order) do...
0N/A for( uint i = 0; i < _cfg._num_blocks; i++ ) {
0N/A Block *b = _cfg._blocks[i];
0N/A // Clone (rather than smash in place) the liveout info, so it is alive
0N/A // for the "collect_gc_info" phase later.
0N/A IndexSet liveout(_live->live(b));
0N/A uint last_inst = b->end_idx();
566N/A // Compute first nonphi node index
566N/A uint first_inst;
566N/A for( first_inst = 1; first_inst < last_inst; first_inst++ )
566N/A if( !b->_nodes[first_inst]->is_Phi() )
0N/A break;
0N/A
566N/A // Spills could be inserted before CreateEx node which should be
566N/A // first instruction in block after Phis. Move CreateEx up.
566N/A for( uint insidx = first_inst; insidx < last_inst; insidx++ ) {
566N/A Node *ex = b->_nodes[insidx];
566N/A if( ex->is_SpillCopy() ) continue;
566N/A if( insidx > first_inst && ex->is_Mach() &&
566N/A ex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
566N/A // If the CreateEx isn't above all the MachSpillCopies
566N/A // then move it to the top.
566N/A b->_nodes.remove(insidx);
566N/A b->_nodes.insert(first_inst, ex);
566N/A }
566N/A // Stop once a CreateEx or any other node is found
566N/A break;
566N/A }
566N/A
0N/A // Reset block's register pressure values for each ifg construction
0N/A uint pressure[2], hrp_index[2];
0N/A pressure[0] = pressure[1] = 0;
0N/A hrp_index[0] = hrp_index[1] = last_inst+1;
0N/A b->_reg_pressure = b->_freg_pressure = 0;
0N/A // Liveout things are presumed live for the whole block. We accumulate
0N/A // 'area' accordingly. If they get killed in the block, we'll subtract
0N/A // the unused part of the block from the area.
566N/A int inst_count = last_inst - first_inst;
369N/A double cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
369N/A assert(!(cost < 0.0), "negative spill cost" );
0N/A IndexSetIterator elements(&liveout);
0N/A uint lidx;
0N/A while ((lidx = elements.next()) != 0) {
0N/A LRG &lrg = lrgs(lidx);
0N/A lrg._area += cost;
0N/A // Compute initial register pressure
3845N/A if (lrg.mask().is_UP() && lrg.mask_size()) {
3845N/A if (lrg._is_float || lrg._is_vector) { // Count float pressure
0N/A pressure[1] += lrg.reg_pressure();
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[1] > b->_freg_pressure )
0N/A b->_freg_pressure = pressure[1];
0N/A#endif
0N/A // Count int pressure, but do not count the SP, flags
0N/A } else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
0N/A pressure[0] += lrg.reg_pressure();
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[0] > b->_reg_pressure )
0N/A b->_reg_pressure = pressure[0];
0N/A#endif
0N/A }
0N/A }
0N/A }
0N/A assert( pressure[0] == count_int_pressure (&liveout), "" );
0N/A assert( pressure[1] == count_float_pressure(&liveout), "" );
0N/A
0N/A // The IFG is built by a single reverse pass over each basic block.
0N/A // Starting with the known live-out set, we remove things that get
0N/A // defined and add things that become live (essentially executing one
0N/A // pass of a standard LIVE analysis). Just before a Node defines a value
0N/A // (and removes it from the live-ness set) that value is certainly live.
0N/A // The defined value interferes with everything currently live. The
0N/A // value is then removed from the live-ness set and it's inputs are added
0N/A // to the live-ness set.
0N/A uint j;
0N/A for( j = last_inst + 1; j > 1; j-- ) {
0N/A Node *n = b->_nodes[j - 1];
0N/A
0N/A // Get value being defined
0N/A uint r = n2lidx(n);
0N/A
0N/A // Some special values do not allocate
0N/A if( r ) {
0N/A // A DEF normally costs block frequency; rematerialized values are
0N/A // removed from the DEF sight, so LOWER costs here.
0N/A lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq;
0N/A
0N/A // If it is not live, then this instruction is dead. Probably caused
0N/A // by spilling and rematerialization. Who cares why, yank this baby.
0N/A if( !liveout.member(r) && n->Opcode() != Op_SafePoint ) {
0N/A Node *def = n->in(0);
0N/A if( !n->is_Proj() ||
0N/A // Could also be a flags-projection of a dead ADD or such.
0N/A (n2lidx(def) && !liveout.member(n2lidx(def)) ) ) {
0N/A b->_nodes.remove(j - 1);
0N/A if( lrgs(r)._def == n ) lrgs(r)._def = 0;
4123N/A n->disconnect_inputs(NULL, C);
0N/A _cfg._bbs.map(n->_idx,NULL);
0N/A n->replace_by(C->top());
0N/A // Since yanking a Node from block, high pressure moves up one
0N/A hrp_index[0]--;
0N/A hrp_index[1]--;
0N/A continue;
0N/A }
0N/A
0N/A // Fat-projections kill many registers which cannot be used to
0N/A // hold live ranges.
0N/A if( lrgs(r)._fat_proj ) {
0N/A // Count the int-only registers
0N/A RegMask itmp = lrgs(r).mask();
0N/A itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
0N/A int iregs = itmp.Size();
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[0]+iregs > b->_reg_pressure )
0N/A b->_reg_pressure = pressure[0]+iregs;
0N/A#endif
0N/A if( pressure[0] <= (uint)INTPRESSURE &&
0N/A pressure[0]+iregs > (uint)INTPRESSURE ) {
0N/A#ifndef EXACT_PRESSURE
0N/A b->_reg_pressure = (uint)INTPRESSURE+1;
0N/A#endif
0N/A hrp_index[0] = j-1;
0N/A }
0N/A // Count the float-only registers
0N/A RegMask ftmp = lrgs(r).mask();
0N/A ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
0N/A int fregs = ftmp.Size();
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[1]+fregs > b->_freg_pressure )
0N/A b->_freg_pressure = pressure[1]+fregs;
0N/A#endif
0N/A if( pressure[1] <= (uint)FLOATPRESSURE &&
0N/A pressure[1]+fregs > (uint)FLOATPRESSURE ) {
0N/A#ifndef EXACT_PRESSURE
0N/A b->_freg_pressure = (uint)FLOATPRESSURE+1;
0N/A#endif
0N/A hrp_index[1] = j-1;
0N/A }
0N/A }
0N/A
0N/A } else { // Else it is live
0N/A // A DEF also ends 'area' partway through the block.
0N/A lrgs(r)._area -= cost;
369N/A assert(!(lrgs(r)._area < 0.0), "negative spill area" );
0N/A
0N/A // Insure high score for immediate-use spill copies so they get a color
0N/A if( n->is_SpillCopy()
295N/A && lrgs(r).is_singledef() // MultiDef live range can still split
0N/A && n->outcnt() == 1 // and use must be in this block
0N/A && _cfg._bbs[n->unique_out()->_idx] == b ) {
0N/A // All single-use MachSpillCopy(s) that immediately precede their
0N/A // use must color early. If a longer live range steals their
0N/A // color, the spill copy will split and may push another spill copy
0N/A // further away resulting in an infinite spill-split-retry cycle.
0N/A // Assigning a zero area results in a high score() and a good
0N/A // location in the simplify list.
0N/A //
0N/A
0N/A Node *single_use = n->unique_out();
0N/A assert( b->find_node(single_use) >= j, "Use must be later in block");
0N/A // Use can be earlier in block if it is a Phi, but then I should be a MultiDef
0N/A
0N/A // Find first non SpillCopy 'm' that follows the current instruction
0N/A // (j - 1) is index for current instruction 'n'
0N/A Node *m = n;
0N/A for( uint i = j; i <= last_inst && m->is_SpillCopy(); ++i ) { m = b->_nodes[i]; }
0N/A if( m == single_use ) {
0N/A lrgs(r)._area = 0.0;
0N/A }
0N/A }
0N/A
0N/A // Remove from live-out set
0N/A if( liveout.remove(r) ) {
0N/A // Adjust register pressure.
0N/A // Capture last hi-to-lo pressure transition
0N/A lower_pressure( &lrgs(r), j-1, b, pressure, hrp_index );
0N/A assert( pressure[0] == count_int_pressure (&liveout), "" );
0N/A assert( pressure[1] == count_float_pressure(&liveout), "" );
0N/A }
0N/A
0N/A // Copies do not define a new value and so do not interfere.
0N/A // Remove the copies source from the liveout set before interfering.
0N/A uint idx = n->is_Copy();
0N/A if( idx ) {
0N/A uint x = n2lidx(n->in(idx));
0N/A if( liveout.remove( x ) ) {
0N/A lrgs(x)._area -= cost;
0N/A // Adjust register pressure.
0N/A lower_pressure( &lrgs(x), j-1, b, pressure, hrp_index );
0N/A assert( pressure[0] == count_int_pressure (&liveout), "" );
0N/A assert( pressure[1] == count_float_pressure(&liveout), "" );
0N/A }
0N/A }
0N/A } // End of if live or not
0N/A
0N/A // Interfere with everything live. If the defined value must
0N/A // go in a particular register, just remove that register from
0N/A // all conflicting parties and avoid the interference.
0N/A
0N/A // Make exclusions for rematerializable defs. Since rematerializable
0N/A // DEFs are not bound but the live range is, some uses must be bound.
0N/A // If we spill live range 'r', it can rematerialize at each use site
0N/A // according to its bindings.
0N/A const RegMask &rmask = lrgs(r).mask();
0N/A if( lrgs(r).is_bound() && !(n->rematerialize()) && rmask.is_NotEmpty() ) {
0N/A // Check for common case
0N/A int r_size = lrgs(r).num_regs();
0N/A OptoReg::Name r_reg = (r_size == 1) ? rmask.find_first_elem() : OptoReg::Physical;
3845N/A // Smear odd bits
0N/A IndexSetIterator elements(&liveout);
0N/A uint l;
0N/A while ((l = elements.next()) != 0) {
0N/A LRG &lrg = lrgs(l);
0N/A // If 'l' must spill already, do not further hack his bits.
0N/A // He'll get some interferences and be forced to spill later.
0N/A if( lrg._must_spill ) continue;
0N/A // Remove bound register(s) from 'l's choices
0N/A RegMask old = lrg.mask();
0N/A uint old_size = lrg.mask_size();
0N/A // Remove the bits from LRG 'r' from LRG 'l' so 'l' no
0N/A // longer interferes with 'r'. If 'l' requires aligned
0N/A // adjacent pairs, subtract out bit pairs.
3845N/A assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
3845N/A if (lrg.num_regs() > 1 && !lrg._fat_proj) {
3845N/A RegMask r2mask = rmask;
3845N/A // Leave only aligned set of bits.
3845N/A r2mask.smear_to_sets(lrg.num_regs());
3845N/A // It includes vector case.
0N/A lrg.SUBTRACT( r2mask );
0N/A lrg.compute_set_mask_size();
3845N/A } else if( r_size != 1 ) { // fat proj
0N/A lrg.SUBTRACT( rmask );
0N/A lrg.compute_set_mask_size();
0N/A } else { // Common case: size 1 bound removal
0N/A if( lrg.mask().Member(r_reg) ) {
0N/A lrg.Remove(r_reg);
0N/A lrg.set_mask_size(lrg.mask().is_AllStack() ? 65535:old_size-1);
0N/A }
0N/A }
0N/A // If 'l' goes completely dry, it must spill.
0N/A if( lrg.not_free() ) {
0N/A // Give 'l' some kind of reasonable mask, so he picks up
0N/A // interferences (and will spill later).
0N/A lrg.set_mask( old );
0N/A lrg.set_mask_size(old_size);
0N/A must_spill++;
0N/A lrg._must_spill = 1;
0N/A lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
0N/A }
0N/A }
0N/A } // End of if bound
0N/A
0N/A // Now interference with everything that is live and has
0N/A // compatible register sets.
0N/A interfere_with_live(r,&liveout);
0N/A
0N/A } // End of if normal register-allocated value
0N/A
369N/A // Area remaining in the block
369N/A inst_count--;
369N/A cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
0N/A
0N/A // Make all inputs live
0N/A if( !n->is_Phi() ) { // Phi function uses come from prior block
0N/A JVMState* jvms = n->jvms();
0N/A uint debug_start = jvms ? jvms->debug_start() : 999999;
0N/A // Start loop at 1 (skip control edge) for most Nodes.
0N/A // SCMemProj's might be the sole use of a StoreLConditional.
0N/A // While StoreLConditionals set memory (the SCMemProj use)
0N/A // they also def flags; if that flag def is unused the
0N/A // allocator sees a flag-setting instruction with no use of
0N/A // the flags and assumes it's dead. This keeps the (useless)
0N/A // flag-setting behavior alive while also keeping the (useful)
0N/A // memory update effect.
1212N/A for( uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++ ) {
0N/A Node *def = n->in(k);
0N/A uint x = n2lidx(def);
0N/A if( !x ) continue;
0N/A LRG &lrg = lrgs(x);
0N/A // No use-side cost for spilling debug info
0N/A if( k < debug_start )
0N/A // A USE costs twice block frequency (once for the Load, once
0N/A // for a Load-delay). Rematerialized uses only cost once.
0N/A lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq));
0N/A // It is live now
0N/A if( liveout.insert( x ) ) {
0N/A // Newly live things assumed live from here to top of block
0N/A lrg._area += cost;
0N/A // Adjust register pressure
3845N/A if (lrg.mask().is_UP() && lrg.mask_size()) {
3845N/A if (lrg._is_float || lrg._is_vector) {
0N/A pressure[1] += lrg.reg_pressure();
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[1] > b->_freg_pressure )
0N/A b->_freg_pressure = pressure[1];
0N/A#endif
0N/A } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
0N/A pressure[0] += lrg.reg_pressure();
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[0] > b->_reg_pressure )
0N/A b->_reg_pressure = pressure[0];
0N/A#endif
0N/A }
0N/A }
0N/A assert( pressure[0] == count_int_pressure (&liveout), "" );
0N/A assert( pressure[1] == count_float_pressure(&liveout), "" );
0N/A }
369N/A assert(!(lrg._area < 0.0), "negative spill area" );
0N/A }
0N/A }
0N/A } // End of reverse pass over all instructions in block
0N/A
0N/A // If we run off the top of the block with high pressure and
0N/A // never see a hi-to-low pressure transition, just record that
0N/A // the whole block is high pressure.
0N/A if( pressure[0] > (uint)INTPRESSURE ) {
0N/A hrp_index[0] = 0;
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[0] > b->_reg_pressure )
0N/A b->_reg_pressure = pressure[0];
0N/A#else
0N/A b->_reg_pressure = (uint)INTPRESSURE+1;
0N/A#endif
0N/A }
0N/A if( pressure[1] > (uint)FLOATPRESSURE ) {
0N/A hrp_index[1] = 0;
0N/A#ifdef EXACT_PRESSURE
0N/A if( pressure[1] > b->_freg_pressure )
0N/A b->_freg_pressure = pressure[1];
0N/A#else
0N/A b->_freg_pressure = (uint)FLOATPRESSURE+1;
0N/A#endif
0N/A }
0N/A
0N/A // Compute high pressure indice; avoid landing in the middle of projnodes
0N/A j = hrp_index[0];
0N/A if( j < b->_nodes.size() && j < b->end_idx()+1 ) {
0N/A Node *cur = b->_nodes[j];
0N/A while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) {
0N/A j--;
0N/A cur = b->_nodes[j];
0N/A }
0N/A }
0N/A b->_ihrp_index = j;
0N/A j = hrp_index[1];
0N/A if( j < b->_nodes.size() && j < b->end_idx()+1 ) {
0N/A Node *cur = b->_nodes[j];
0N/A while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) {
0N/A j--;
0N/A cur = b->_nodes[j];
0N/A }
0N/A }
0N/A b->_fhrp_index = j;
0N/A
0N/A#ifndef PRODUCT
0N/A // Gather Register Pressure Statistics
0N/A if( PrintOptoStatistics ) {
0N/A if( b->_reg_pressure > (uint)INTPRESSURE || b->_freg_pressure > (uint)FLOATPRESSURE )
0N/A _high_pressure++;
0N/A else
0N/A _low_pressure++;
0N/A }
0N/A#endif
0N/A } // End of for all blocks
0N/A
0N/A return must_spill;
0N/A}