/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "code/nmethod.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/oopMap.hpp"
#include "opto/addnode.hpp"
#include "opto/c2compiler.hpp"
#include "opto/callGenerator.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/chaitin.hpp"
#include "opto/compile.hpp"
#include "opto/connode.hpp"
#include "opto/divnode.hpp"
#include "opto/escape.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/matcher.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/opcodes.hpp"
#include "opto/output.hpp"
#include "opto/phaseX.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/stringopts.hpp"
#include "opto/vectornode.hpp"
#include "runtime/arguments.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "trace/tracing.hpp"
#ifdef TARGET_ARCH_MODEL_x86_32
# include "adfiles/ad_x86_32.hpp"
#endif
#ifdef TARGET_ARCH_MODEL_x86_64
# include "adfiles/ad_x86_64.hpp"
#endif
#ifdef TARGET_ARCH_MODEL_sparc
# include "adfiles/ad_sparc.hpp"
#endif
#ifdef TARGET_ARCH_MODEL_zero
# include "adfiles/ad_zero.hpp"
#endif
#ifdef TARGET_ARCH_MODEL_arm
# include "adfiles/ad_arm.hpp"
#endif
#ifdef TARGET_ARCH_MODEL_ppc
# include "adfiles/ad_ppc.hpp"
#endif
// -------------------- Compile::mach_constant_base_node -----------------------
// Constant table base node singleton.
if (_mach_constant_base_node == NULL) {
_mach_constant_base_node = new (C) MachConstantBaseNode();
}
return _mach_constant_base_node;
}
/// Support for intrinsics.
// Return the index at which m must be inserted (or already exists).
// The sort order is by the address of the ciMethod, with is_virtual as minor key.
#ifdef ASSERT
"compiler intrinsics list must stay sorted");
}
#endif
// Binary search sorted list, in decreasing intervals [lo, hi].
if (m < mid_m) {
} else if (m > mid_m) {
} else {
// look at minor sort key
if (is_virtual < mid_virt) {
} else if (is_virtual > mid_virt) {
} else {
return mid; // exact match
}
}
}
return lo; // inexact match
}
if (_intrinsics == NULL) {
}
// This code is stolen from ciObjectFactory::insert.
// Really, GrowableArray should have methods for
// insert_at, remove_at, and binary_search.
} else {
#ifdef ASSERT
assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
#endif
int pos;
}
}
}
if (_intrinsics != NULL) {
}
}
// Lazily create intrinsics for intrinsic IDs well-known in the runtime.
// Save it for next time:
return cg;
} else {
}
}
return NULL;
}
// Compile:: register_library_intrinsics and make_vm_intrinsic are defined
// in library_call.cpp.
#ifndef PRODUCT
// statistics gathering...
if (is_virtual) {
}
if ((flags & _intrinsic_worked) != 0) {
if (count == 1) {
changed = true; // first time
}
// increment the overall count also:
}
if (changed) {
// Something changed about the intrinsic's virtuality.
if ((flags & _intrinsic_virtual) != 0) {
// This is the first use of this intrinsic as a virtual call.
if (oflags != 0) {
// We already saw it as a non-virtual, so note both cases.
flags |= _intrinsic_both;
}
} else if ((oflags & _intrinsic_both) == 0) {
// This is the first use of this intrinsic as a non-virtual
flags |= _intrinsic_both;
}
}
}
// update the overall flags also:
return changed;
}
buf[0] = 0;
return &buf[1];
}
}
}
PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf));
}
}
// put this under its own <statistics> element.
}
}
#endif //PRODUCT
// Support for bundling info
return &_node_bundling_base[n->_idx];
}
return (_node_bundling_limit > n->_idx);
}
else
uses_found++;
}
}
if (is_in_table) {
// reinsert into table
}
i -= uses_found; // we deleted 1 or more copies of this edge
}
}
if (n == NULL) return true;
return false;
}
// Identify all nodes that are reachable from below, useful.
// Use breadth-first pass that records state in a Unique_Node_List,
// recursive traversal is slower.
// Initialize worklist
// If 'top' is cached, declare it useful to preserve cached node
// Push all useful nodes onto the list, breadthfirst
if (not_a_node(m)) continue;
}
}
}
// Update dead_node_list with any missing dead nodes using useful
// list. Consider all non-useful nodes to be useless i.e., dead nodes.
// If node with index node_idx is not in useful set,
// mark it as dead in dead node list.
}
}
}
void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
int shift = 0;
if (shift > 0) {
}
shift++;
}
}
}
// Disconnect all useless nodes by disconnecting those at the boundary.
// Use raw traversal of out edges since this code removes out edges
for (int j = 0; j < max; ++j) {
"If top is cached in Compile object it is in useful list");
// Only need to remove this out-edge to the useless node
n->raw_del_out(j);
--j;
--max;
}
}
record_for_igvn(n->unique_out());
}
}
// Remove useless macro and predicate opaq nodes
for (int i = C->macro_count()-1; i >= 0; i--) {
Node* n = C->macro_node(i);
}
}
// Remove useless expensive node
for (int i = C->expensive_count()-1; i >= 0; i--) {
Node* n = C->expensive_node(i);
}
}
// clean up the late inline lists
}
//------------------------------frame_size_in_words-----------------------------
// frame_slots in units of words
// shift is 0 in LP32 and 1 in LP64
return words;
}
// ============================================================================
//------------------------------CompileWrapper---------------------------------
public:
~CompileWrapper();
};
// the Compile* pointer is stored in the current ciEnv:
_compile->begin_method();
}
CompileWrapper::~CompileWrapper() {
_compile->end_method();
}
//----------------------------print_compile_messages---------------------------
#ifndef PRODUCT
// Check if recompiling
if (_subsume_loads == false && PrintOpto) {
// Recompiling without allowing machine instructions to subsume loads
}
// Recompiling without escape analysis
}
if (env()->break_at_compile()) {
// Open the debugger when compiling this method.
method()->print_short_name();
}
if( PrintOpto ) {
if (is_osr_compilation()) {
} else {
}
}
#endif
}
//-----------------------init_scratch_buffer_blob------------------------------
// Construct a temporary BufferBlob and cache it for this compile.
// If there is already a scratch buffer blob allocated and the
// constant section is big enough, use it. Otherwise free the
// current and allocate a new one.
// Use the current blob.
} else {
}
// Record the buffer blob for next time.
// Have we run out of code space?
if (scratch_buffer_blob() == NULL) {
// Let CompilerBroker disable further compilations.
record_failure("Not enough space for scratch buffer in CodeCache");
return;
}
}
// Initialize the relocation buffers
}
//-----------------------scratch_emit_size-------------------------------------
// Helper function that computes size by emitting code
// Start scratch_emit_size section.
set_in_scratch_emit_size(true);
// Emit into a trash buffer and count bytes emitted.
// This is a pretty expensive way to compute a size,
// but it works well enough if seldom used.
// All common fixed-size instructions are given a size
// method by the AD file.
// Note that the scratch buffer blob and locs memory are
// allocated at the beginning of the compile task, and
// may be shared by several calls to scratch_emit_size.
// The allocation of the scratch buffer blob is particularly
// expensive, since it has to grab the code cache lock.
// Do the emission.
if (is_branch) {
}
if (is_branch) // Restore label.
// End scratch_emit_size section.
set_in_scratch_emit_size(false);
return buf.insts_size();
}
// ============================================================================
//------------------------------Compile standard-------------------------------
// Compile a method. entry_bci is -1 for normal compilations and indicates
// the continuation bci for on stack replacement.
Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis )
_save_argument_registers(false),
_code_buffer("Compile::Fill_buffer"),
_orig_pc_slot(0),
_has_method_handle_invokes(false),
_java_calls(0),
_inner_loops(0),
_scratch_const_size(-1),
_in_scratch_emit_size(false),
_dead_node_count(0),
#ifndef PRODUCT
#endif
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining(0) {
C = this;
CompileWrapper cw(this);
#ifndef PRODUCT
if (TimeCompiler2) {
}
if (!print_opto_assembly) {
print_opto_assembly = true;
}
}
set_parsed_irreducible_loop(false);
#endif
if (ProfileTraps) {
// Make sure the method being compiled gets its own MDO,
// so we can at least track the decompile_count().
method()->ensure_method_data();
}
Init(::AliasLevel);
else
// Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
// Node list that Iterative GVN will start with
// GVN that will be run immediately on new nodes
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
{ // Scope for timing the parser
// Put top into the hash table ASAP.
// Set up tf(), start(), and find a CallGenerator.
if (is_osr_compilation()) {
initial_gvn()->set_type_bottom(s);
init_start(s);
} else {
// Normal case.
initial_gvn()->set_type_bottom(s);
init_start(s);
// With java.lang.ref.reference.get() we must go through the
// intrinsic when G1 is enabled - even when get() is the root
// method of the compile - so that, if necessary, the value in
// the referent field of the reference object gets recorded by
// the pre-barrier code.
// Specifically, if G1 is enabled, the value in the referent
// field is recorded by the G1 SATB pre barrier. This will
// result in the referent being marked live and the reference
// object removed from the list of discovered references during
// reference processing.
}
}
}
if (failing()) return;
record_method_not_compilable_all_tiers("cannot parse method");
return;
}
record_method_not_compilable("method parse failed");
return;
}
// Accept return values, and transfer control we know not where.
// This is done by a special, unique ReturnNode bound to root.
}
if (kit.has_exceptions()) {
// Any exceptions that escape from this call must be rethrown
// to whatever caller is dynamically above us on the stack.
// This is done by a special, unique RethrowNode bound to root.
}
assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
inline_string_calls(true);
}
if (failing()) return;
// Remove clutter produced by parsing.
if (!failing()) {
}
}
// Note: Large methods are capped off in do_one_bytecode().
if (failing()) return;
// After parsing, node notes are no longer automagic.
// They must be propagated by register_new_node_with_optimizer(),
// clone(), or the like.
for (;;) {
if (failing()) return;
if (successes == 0) break;
}
// Drain the list.
Finish_Warm();
#ifndef PRODUCT
if (_printer) {
_printer->print_inlining(this);
}
#endif
if (failing()) return;
// Now optimize
Optimize();
if (failing()) return;
#ifndef PRODUCT
if (PrintIdeal) {
// This output goes directly to the tty, not the compiler log.
// To enable tools to match it up with the compilation activity,
// be sure to tag this tty output with the compile ID.
is_osr_compilation() ? " compile_kind='osr'" :
"");
}
}
}
#endif
// Now that we know the size of all the monitors we can add a fixed slot
// for the original deopt pc.
_orig_pc_slot = fixed_slots();
// Now generate code
Code_Gen();
if (failing()) return;
// Check if we want to skip execution of all compiled code.
{
#ifndef PRODUCT
if (OptoNoExecute) {
return;
}
#endif
if (is_osr_compilation()) {
} else {
}
code_buffer(),
env()->comp_level(),
);
log()->code_cache_state();
}
}
//------------------------------Compile----------------------------------------
// Compile a runtime stub
const char *stub_name,
int is_fancy_jump,
bool pass_tls,
bool save_arg_registers,
bool return_pc )
_compile_id(0),
_orig_pc_slot(0),
_subsume_loads(true),
_do_escape_analysis(false),
_code_buffer("Compile::Fill_buffer"),
_has_method_handle_invokes(false),
_java_calls(0),
_inner_loops(0),
#ifndef PRODUCT
#endif
_dead_node_count(0),
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining(0) {
C = this;
#ifndef PRODUCT
set_parsed_irreducible_loop(false);
#endif
CompileWrapper cw(this);
Init(/*AliasLevel=*/ 0);
{
// The following is a dummy for the sake of GraphKit::gen_stub
}
Code_Gen();
if (failing()) return;
// Entry point will be accessed using compile->stub_entry_point();
if (code_buffer() == NULL) {
} else {
if (!failing()) {
// Make the NMethod
// For now we mark the frame as never safe for profile stackwalking
code_buffer(),
// _code_offsets.value(CodeOffsets::Frame_Complete),
}
}
}
#ifndef PRODUCT
}
}
#endif
}
//------------------------------Init-------------------------------------------
// Prepare for a single compilation
_unique = 0;
set_24_bit_selection_and_mode(Use24BitFP, false);
// Globally visible Nodes
// First set TOP to NULL to give safe behavior during creation of RootNode
// Now that you have a Root to point to, create the real TOP
// Create Debug Information Recorder to record scopes, oopmaps, etc.
_fixed_slots = 0;
set_has_split_ifs(false);
set_has_stringbuilder(false);
_trap_can_recompile = false; // no traps emitted yet
_major_progress = true; // start out assuming good things will happen
set_has_unsafe_access(false);
set_do_count_invocations(false);
set_do_method_data_update(false);
if (debug_info()->recording_non_safepoints()) {
}
// // -- Initialize types before each compile --
// // Update cached type information
// if( _method && _method->constants() )
// Type::update_loaded_types(_method, _method->constants());
// Init alias_type map.
{
}
// Initialize the first few types.
// Zero out the alias type cache.
// A NULL adr_type hits in the cache right away. Preload the right answer.
_intrinsics = NULL;
}
//---------------------------init_start----------------------------------------
// Install the StartNode on this compile object.
if (failing())
return; // already failing
}
}
return NULL;
}
//-------------------------------immutable_memory-------------------------------------
// Access immutable memory
if (_immutable_memory != NULL) {
return _immutable_memory;
}
_immutable_memory = p;
return _immutable_memory;
}
}
return NULL;
}
//----------------------set_cached_top_node------------------------------------
// Install the cached top node, and make sure Node::is_top works correctly.
// Calling Node::setup_is_top allows the nodes the chance to adjust
// their _out arrays.
}
#ifdef ASSERT
// Get useful node list by walking the graph.
}
// Return if CompileLog is NULL and PrintIdealNodeCount is false.
return;
}
// This is an expensive function. It is executed only when the user
// specifies VerifyIdealNodeCount option or otherwise knows the
// additional work that needs to be done to identify reachable nodes
// by walking the flow graph and find the missing ones using
// _dead_node_list.
// Get useful node list by walking the graph.
if (l_nodes != l_nodes_by_walk) {
}
for (int i = 0; i < last_idx; i++) {
if (useful_member_set.test(i)) {
if (_dead_node_list.test(i)) {
}
if (PrintIdealNodeCount) {
// Print the log message to tty
}
}
}
else if (! _dead_node_list.test(i)) {
}
if (PrintIdealNodeCount) {
// Print the log message to tty
}
}
}
}
}
}
#endif
#ifndef PRODUCT
}
}
#endif
///-------------------Managing Per-Node Debug & Profile Info-------------------
while (num_notes > 0) {
}
}
return false; // Do not push debug info onto constants.
#ifdef ASSERT
// Leave a bread crumb trail pointing to the original node:
}
#endif
if (node_note_array() == NULL)
return false; // Not collecting any notes now.
// This is a copy onto a pre-existing node, which may already have notes.
// If both nodes have notes, do not overwrite any pre-existing notes.
}
// The order of operations here ensures that dest notes will win...
}
//--------------------------allow_range_check_smearing-------------------------
// Gating condition for coalescing similar range checks.
// Sometimes we try 'speculatively' replacing a series of a range checks by a
// single covering check that is at least as strong as any of them.
// If the optimization succeeds, the simplified (strengthened) range check
// will always succeed. If it fails, we will deopt, and then give up
// on the optimization.
// If this method has already thrown a range-check,
// assume it was because we already tried range smearing
// and it failed.
return !already_trapped;
}
//------------------------------flatten_alias_type-----------------------------
// Known instance (scalarizable allocation) alias only with itself.
// Process weird unsafe references.
}
// Array pointers need some flattening
if( ta && is_known_inst ) {
}
// For arrays indexed by constant indices, we flatten the alias
// space to include all of the array body. Only the header, klass
// and array length can be accessed un-aliased.
// range is OK as-is.
} else { // Random constant offset into array body
}
}
// Arrays of fixed size alias with arrays of unknown size.
}
// Arrays of known objects become arrays of unknown objects.
}
}
// Arrays of bytes and of booleans both use 'bastore' and 'baload' so
// cannot be distinguished by bytecode alone.
}
// During the 2nd round of IterGVN, NotNull castings are removed.
// Make sure the Bottom and NotNull variants alias the same.
// Also, make sure exact and non-exact variants alias the same.
}
}
// Oop pointers need some flattening
// No constant oop pointers (such as Strings); they alias with
// unknown strings.
}
} else if( is_known_inst ) {
// During the 2nd round of IterGVN, NotNull castings are removed.
// Make sure the Bottom and NotNull variants alias the same.
// Also, make sure exact and non-exact variants alias the same.
}
// Canonicalize the holder of this field
// First handle header references such as a LoadKlassNode, even if the
// object's klass is unloaded at compile time (4965979).
if (!is_known_inst) { // Do it only for non-instance types
}
// Static fields are in the space above the normal instance
// fields in the java.lang.Class instance.
}
} else {
if( is_known_inst ) {
} else {
}
}
}
}
// Klass pointers to object array klasses need some flattening
if( tk ) {
// If we are referencing a field within a Klass, we need
// to assume the worst case of an Object. Both exact and
// inexact types must flatten to the same alias class so
// use NotNull as the PTR.
offset);
}
if( klass->is_obj_array_klass() ) {
if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs
}
// Check for precise loads from the primary supertype array and force them
// to the supertype cache alias index. Check for generic array loads from
// the primary supertype array and also force them to the supertype cache
// alias index. Since the same load can reach both, we need to merge
// these 2 disparate memories into the same alias class. Since the
// primary supertype array is read-only, there's no chance of confusion
// where we bypass an array load and an array store.
(offset >= primary_supers_offset &&
}
}
// Flatten all Raw pointers together.
// Flatten all to bottom for now
switch( _AliasLevel ) {
case 0:
break;
case 1: // Flatten to: oop, static, field or array
//case Type::AryPtr: tj = TypeAryPtr::RANGE; break;
default: ShouldNotReachHere();
}
break;
case 2: // No collapsing at level 2; keep all splits
case 3: // No collapsing at level 3; keep all splits
break;
default:
}
"For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
// assert( tj->ptr() != TypePtr::Constant ||
// tj->base() == Type::RawPtr ||
// tj->base() == Type::KlassPtr, "No constant oop addresses" );
return tj;
}
_index = i;
_is_rewritable = true; // default
} else {
_general_index = 0;
}
}
//---------------------------------print_on------------------------------------
#ifndef PRODUCT
if (index() < 10)
}
}
}
void print_alias_types() {
}
}
#endif
//----------------------------probe_alias_cache--------------------------------
}
//-----------------------------grow_alias_types--------------------------------
}
//--------------------------------find_alias_type------------------------------
Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
if (_AliasLevel == 0)
return alias_type(AliasIdxBot);
}
// Handle special cases.
// Do it the slow way.
#ifdef ASSERT
// Scalarizable allocations have exact klass always.
}
#endif
for (int i = 0; i < num_alias_types(); i++) {
idx = i;
break;
}
}
if (idx == AliasIdxTop) {
// Grow the array if necessary.
// Add a new alias type.
idx = _num_alias_types++;
if (flat->isa_instptr()) {
}
if (flat->isa_klassptr()) {
}
// %%% (We would like to finalize JavaThread::threadObj_offset(),
// but the base pointer type is not distinctive enough to identify
// references into JavaThread.)
// Check for final fields.
// static field
ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
} else {
}
original_field == NULL ||
// Set field() and is_rewritable() attributes.
}
}
// Fill the cache for next time.
// Might as well try to fill the cache for the flattened version, too.
}
return alias_type(idx);
}
const TypeOopPtr* t;
else
return atp;
}
//------------------------------have_alias_type--------------------------------
return true;
}
// Handle special cases.
}
//-----------------------------must_alias--------------------------------------
// True if all values of the given address type are in the given alias category.
// the only remaining possible overlap is identity
"should not be testing for overlap with an unsafe pointer");
}
//------------------------------can_alias--------------------------------------
// True if any values of the given address type are in the given alias category.
// the only remaining possible overlap is identity
}
//---------------------------pop_warm_call-------------------------------------
return wci;
}
//----------------------------Inline_Warm--------------------------------------
// If there is room, try to inline some more warm call sites.
// %%% Do a graph index compaction pass when we think we're out of space?
if (!InlineWarmCalls) return 0;
int calls_made_hot = 0;
int amount_grown = 0;
// This one won't fit anyway. Get rid of it.
continue;
}
amount_grown += est_size;
}
if (calls_made_hot > 0) set_major_progress();
return calls_made_hot;
}
//----------------------------Finish_Warm--------------------------------------
if (!InlineWarmCalls) return;
if (failing()) return;
if (warm_calls() == NULL) return;
// Clean up loose ends, if we are out of space for inlining.
}
}
//---------------------cleanup_loop_predicates-----------------------
// Remove the opaque nodes that protect the predicates so that all unused
// checks and uncommon_traps will be eliminated from the ideal graph
if (predicate_count()==0) return;
for (int i = predicate_count(); i > 0; i--) {
}
}
// StringOpts and late inlining of string methods
{
// remove useless nodes to make the usage analysis simpler
}
{
}
// now inline anything that we skipped the first time around
if (!parse_time) {
}
while (_string_late_inlines.length() > 0) {
cg->do_late_inline();
if (failing()) return;
}
}
set_inlining_progress(false);
int i = 0;
_late_inlines_pos = i+1;
cg->do_late_inline();
if (failing()) return;
}
int j = 0;
for (; i < _late_inlines.length(); i++, j++) {
}
{
}
}
// Perform incremental inlining until bound on number of live nodes is reached
set_inlining_incrementally(true);
set_inlining_progress(true);
// PhaseIdealLoop is expensive so we only try it once we are
// out of loop and we only try it again if the previous helped
// got the number of nodes down significantly
if (failing()) return;
low_live_nodes = live_nodes();
_major_progress = true;
}
break;
}
}
if (failing()) return;
if (failing()) return;
}
if (_string_late_inlines.length() > 0) {
inline_string_calls(false);
if (failing()) return;
{
}
}
set_inlining_incrementally(false);
}
//------------------------------Optimize---------------------------------------
// Given a graph, optimize it.
#ifndef PRODUCT
if (env()->break_at_compile()) {
}
#endif
int loop_opts_cnt;
{
// Iterative Global Value Numbering, including ideal transforms
// Initialize IterGVN with types and values from parse-time GVN
{
}
if (failing()) return;
if (failing()) return;
// No more new expensive nodes will be added to the list from here
// so keep only the actual candidates for optimizations.
// Perform escape analysis
if (has_loops()) {
// Cleanup graph (remove dead nodes).
if (failing()) return;
}
if (failing()) return;
// Optimize out fields loads from scalar replaceable allocations.
if (failing()) return;
igvn.set_delay_transform(false);
if (failing()) return;
}
}
// Loop transforms on the ideal graph. Range Check Elimination,
// peeling, unrolling, etc.
// Set loop opts counter
{
if (failing()) return;
}
// Loop opts pass if partial peeling occurred in previous pass
if (failing()) return;
}
// Loop opts pass for loop-unrolling before CCP
if(major_progress() && (loop_opts_cnt > 0)) {
}
if (!failing()) {
// Verify that last round of loop opts produced a valid graph
}
}
if (failing()) return;
// Conditional Constant Propagation;
assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
{
ccp.do_transform();
}
assert( true, "Break here to ccp.dump_old2new_map()");
// Iterative Global Value Numbering, including ideal transforms
{
}
if (failing()) return;
// Loop transforms on the ideal graph. Range Check Elimination,
// peeling, unrolling, etc.
if(loop_opts_cnt > 0) {
debug_only( int cnt = 0; );
while(major_progress() && (loop_opts_cnt > 0)) {
if (failing()) return;
}
}
{
// Verify that all previous optimizations produced a valid graph
// at least to this point, even if no loop optimizations were done.
}
{
if (mex.expand_macro_nodes()) {
return;
}
}
} // (End scope of igvn; run destructor if necessary for asserts.)
// A method with only infinite loops has no edges entering loops from root
{
if (final_graph_reshaping()) {
return;
}
}
}
//------------------------------Code_Gen---------------------------------------
// Given a graph, generate code for it
if (failing()) return;
// Perform instruction selection. You might think we could reclaim Matcher
// memory PDQ, but actually the Matcher is used in generating spill code.
// Internals of the Matcher (including some VectorSets) must remain live
// for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
// set a bit in reclaimed memory.
// In debug mode can dump m._nodes.dump() for mapping of ideal to machine
// nodes. Mapping is only valid at the root of each matched subtree.
_matcher = &m;
{
m.match();
}
// In debug mode can dump m._nodes.dump() for mapping of ideal to machine
// nodes. Mapping is only valid at the root of each matched subtree.
// If you have too many nodes, or if matching has failed, bail out
check_node_count(0, "out of nodes matching instructions");
if (failing()) return;
// Build a proper-looking CFG
{
cfg.Dominators();
if (failing()) return;
if (failing()) return;
}
{
// Perform any platform dependent preallocation actions. This is used,
// for example, to avoid taking an implicit null pointer exception
// using the frame pointer on win95.
// Perform register allocation. After Chaitin, use-def chains are
// no longer accurate (at spill code) and so must be ignored.
// Node->LRG->reg mappings are still accurate.
// Bail out if the allocator builds too many nodes
if (failing()) return;
}
// Prior to register allocation we kept empty basic blocks in case the
// the allocator needed a place to spill. After register allocation we
// are not adding any new instructions. If any basic block is empty, we
// can now safely remove it.
{
cfg.remove_empty();
if (do_freq_based_layout()) {
} else {
}
cfg.fixup_flow();
}
// Perform any platform dependent postallocation verifications.
// Apply peephole optimizations
if( OptoPeephole ) {
peep.do_transform();
}
// Convert Nodes to instruction bits in a buffer
{
// %%%% workspace merge brought two timers together for one job
Output();
}
// He's dead, Jim.
}
//------------------------------dump_asm---------------------------------------
// Dump formatted assembly
#ifndef PRODUCT
bool cut_short = false;
// For all blocks
_regalloc->dump_frame();
if (b->is_connector() && !Verbose) continue;
n = b->_nodes[0];
else
if (b->is_connector()) {
} else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
}
// For all instructions
n = b->_nodes[j];
if (valid_bundle_info(n)) {
if (bundle->used_in_unconditional_delay()) {
delay = n;
continue;
}
if (bundle->starts_bundle())
starts_bundle = '+';
}
if (WizardMode) n->dump();
if( !n->is_Region() && // Dont print in the Assembly
!n->is_Phi() && // a few noisely useless nodes
!n->is_Proj() &&
!n->is_MachTemp() &&
!n->is_SafePointScalarObject() &&
!n->is_Catch() && // Would be nice to print exception table targets
!n->is_MergeMem() && // Not very interesting
!n->is_top() && // Debug info table constants
) {
else
starts_bundle = ' ';
}
// If we have an instruction with a delay slot, and have seen a delay,
// then back up and print it
starts_bundle = '+';
else
starts_bundle = ' ';
}
// Dump the exception table as well
// Print the exception table for this offset
}
}
else
} // End of per-block dump
}
#endif
//------------------------------Final_Reshape_Counts---------------------------
// This class defines counters to help identify when a method
_java_call_count(0), _inner_loop_count(0),
};
// Make sure the offset goes inside the instance layout.
// Note that OffsetBot and OffsetTop are very negative.
}
// Eliminate trivially redundant StoreCMs and accumulate their
// precedence edges.
// There are multiple users of the same address so it might be
// possible to eliminate some of the StoreCMs
bool done = false;
// Walk the chain of StoreCMs eliminating ones that match. As
// long as it's a chain of single users then the optimization is
// safe. Eliminating partially redundant StoreCMs would require
// cloning copies down the other paths.
// redundant StoreCM
// Hasn't been processed by this code yet.
} else {
// Already converted to precedence edge
// Accumulate any precedence edges
}
}
// Everything above this point has been processed.
done = true;
}
// Eliminate the previous StoreCM
} else {
}
}
}
}
//------------------------------final_graph_reshaping_impl----------------------
// Implement items 1-5 from final_graph_reshaping below.
if ( n->outcnt() == 0 ) return; // dead node
// Check for 2-input instruction with "last use" on right input.
// Swap to left input. Implements item (2).
// Check for commutative opcode
switch( nop ) {
// Move "last use" input to left by swapping inputs
break;
}
default:
break;
}
}
#ifdef ASSERT
if( n->is_Mem() ) {
// oop will be recorded in oop map if load crosses safepoint
"raw memory operations should have control edge");
}
#endif
// Count FPU ops and common calls, implements item (3)
switch( nop ) {
// Count all float operations that may use FPU
case Op_AddF:
case Op_SubF:
case Op_MulF:
case Op_DivF:
case Op_NegF:
case Op_ModF:
case Op_ConvI2F:
case Op_ConF:
case Op_CmpF:
case Op_CmpF3:
// case Op_ConvL2F: // longs are split into 32-bit halves
break;
case Op_ConvF2D:
case Op_ConvD2F:
break;
// Count all double operations that may use FPU
case Op_AddD:
case Op_SubD:
case Op_MulD:
case Op_DivD:
case Op_NegD:
case Op_ModD:
case Op_ConvI2D:
case Op_ConvD2I:
// case Op_ConvL2D: // handled by leaf call
// case Op_ConvD2L: // handled by leaf call
case Op_ConD:
case Op_CmpD:
case Op_CmpD3:
break;
case Op_Opaque1: // Remove Opaque Nodes before matching
case Op_Opaque2: // Remove Opaque Nodes before matching
break;
case Op_CallStaticJava:
case Op_CallJava:
case Op_CallDynamicJava:
case Op_CallRuntime:
case Op_CallLeaf:
case Op_CallLeafNoFP: {
// Count call sites where the FP mode bit would have to be flipped.
// Do not count uncommon runtime calls:
// uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
// _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
} else { // See if uncommon argument is shared
// Clone shared simple arguments to uncommon calls, item (1).
if( n->outcnt() > 1 &&
!n->is_Proj() &&
nop != Op_CreateEx &&
nop != Op_CheckCastPP &&
nop != Op_DecodeN &&
!n->is_Mem() ) {
}
}
break;
}
case Op_StoreD:
case Op_LoadD:
case Op_LoadD_unaligned:
goto handle_mem;
case Op_StoreF:
case Op_LoadF:
goto handle_mem;
case Op_StoreCM:
{
// Convert OopStore dependence into precedence edge
}
// fall through
case Op_StoreB:
case Op_StoreC:
case Op_StorePConditional:
case Op_StoreI:
case Op_StoreL:
case Op_StoreIConditional:
case Op_StoreLConditional:
case Op_CompareAndSwapI:
case Op_CompareAndSwapL:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
case Op_GetAndAddI:
case Op_GetAndAddL:
case Op_GetAndSetI:
case Op_GetAndSetL:
case Op_GetAndSetP:
case Op_GetAndSetN:
case Op_StoreP:
case Op_StoreN:
case Op_LoadB:
case Op_LoadUB:
case Op_LoadUS:
case Op_LoadI:
case Op_LoadKlass:
case Op_LoadNKlass:
case Op_LoadL:
case Op_LoadL_unaligned:
case Op_LoadPLocked:
case Op_LoadP:
case Op_LoadN:
case Op_LoadRange:
case Op_LoadS: {
#ifdef ASSERT
if( VerifyOptoOopOffsets ) {
// Check to see if address types have grounded out somehow.
}
#endif
break;
}
case Op_AddP: { // Assert sane base pointers
"Base pointers must match" );
#ifdef _LP64
if (UseCompressedOops &&
// Use addressing with narrow klass to load with offset on x86.
// On sparc loading 32-bits constant and decoding it have less
// instructions (4) then load 64-bits constant (7).
// Do this transformation here since IGVN will convert ConN back to ConP.
if (t->isa_oopptr()) {
// Look for existing ConN node of the same exact type.
m->bottom_type()->make_ptr() == t) {
nn = m;
break;
}
}
// Decode a narrow oop to match address
// [R12 + narrow_oop_reg<<3 + offset]
}
}
}
}
#endif
break;
}
#ifdef _LP64
case Op_CastPP:
const Type* t = n->bottom_type();
if (!Matcher::narrow_oop_use_complex_address()) {
//
// x86, ARM and friends can handle 2 adds in addressing mode
// and Matcher can fold a DecodeN node into address by using
// a narrow oop directly and do implicit NULL check in address:
//
// [R12 + narrow_oop_reg<<3 + offset]
// NullCheck narrow_oop_reg
//
// On other platforms (Sparc) we have to keep new DecodeN node and
// use it to do implicit NULL check in address:
//
// decode_not_null narrow_oop_reg, base_reg
// [base_reg + offset]
// NullCheck base_reg
//
// Pin the new DecodeN node to non-null path on these platform (Sparc)
// to keep the information to which NULL check the new DecodeN node
// corresponds to use it as value in implicit_null_check().
//
}
n->subsume_by(new_in1, this);
}
}
break;
case Op_CmpP:
// Do this transformation here to preserve CmpPNode::sub() and
// other TypePtr related Ideal optimizations (for example, ptr nullness).
if (!in1->is_DecodeN()) {
}
if (in2->is_DecodeN()) {
// Don't convert CmpP null check into CmpN if compressed
// oops implicit null check is not generated.
// This will allow to generate normal oop implicit null check.
//
// This transformation together with CastPP transformation above
// will generated code for implicit NULL checks for compressed oops.
//
// The original code after Optimize()
//
// LoadN memory, narrow_oop_reg
// decode narrow_oop_reg, base_reg
// CmpP base_reg, NULL
// CastPP base_reg // NotNull
// Load [base_reg + offset], val_reg
//
// after these transformations will be
//
// LoadN memory, narrow_oop_reg
// CmpN narrow_oop_reg, NULL
// decode_not_null narrow_oop_reg, base_reg
// Load [base_reg + offset], val_reg
//
// and the uncommon path (== NULL) will use narrow_oop_reg directly
// since narrow oops can be used in debug info now (see the code in
// final_graph_reshaping_walk()).
//
// At the end the code will be matched to
// on x86:
//
// Load_narrow_oop memory, narrow_oop_reg
// Load [R12 + narrow_oop_reg<<3 + offset], val_reg
// NullCheck narrow_oop_reg
//
// and on sparc:
//
// Load_narrow_oop memory, narrow_oop_reg
// decode_not_null narrow_oop_reg, base_reg
// Load [base_reg + offset], val_reg
// NullCheck base_reg
//
} else if (t->isa_oopptr()) {
}
}
n->subsume_by(cmpN, this);
}
}
}
}
break;
case Op_DecodeN:
// DecodeN could be pinned when it can't be fold into
// an address expression, see the code for Op_CastPP above.
break;
case Op_EncodeP: {
if (in1->is_DecodeN()) {
} else if (t->isa_oopptr()) {
}
}
}
break;
}
case Op_Proj: {
if (OptimizeStringConcat) {
if (p->_is_io_use) {
// Separate projections were used for the exception path which
// are normally removed by a late inline. If it wasn't inlined
// then they will hang around and should just be replaced with
// the original one.
// Replace with just one
break;
}
}
p->subsume_by(proj, this);
}
}
break;
}
case Op_Phi:
// The EncodeP optimization may create Phi with the same edges
// for all paths. It is not handled well by Register Allocator.
if (unique_in != m)
}
n->subsume_by(unique_in, this);
}
}
break;
#endif
case Op_ModI:
if (UseDivMod) {
// Check if a%b and a/b both exist
if (d) {
// Replace them with a fused divmod if supported
} else {
// replace a%b with a-((a/b)*b)
n->subsume_by(sub, this);
}
}
}
break;
case Op_ModL:
if (UseDivMod) {
// Check if a%b and a/b both exist
if (d) {
// Replace them with a fused divmod if supported
} else {
// replace a%b with a-((a/b)*b)
n->subsume_by(sub, this);
}
}
}
break;
case Op_LoadVector:
case Op_StoreVector:
break;
case Op_PackB:
case Op_PackS:
case Op_PackI:
case Op_PackF:
case Op_PackL:
case Op_PackD:
// Replace many operand PackNodes with a binary tree for matching
n->subsume_by(btp, this);
}
break;
case Op_Loop:
case Op_CountedLoop:
if (n->as_Loop()->is_inner_loop()) {
}
break;
case Op_LShiftI:
case Op_RShiftI:
case Op_URShiftI:
case Op_LShiftL:
case Op_RShiftL:
case Op_URShiftL:
if (Matcher::need_masked_shift_count) {
// The cpu's shift instructions don't restrict the count to the
// lower 5/6 bits. We need to do the masking ourselves.
}
} else {
}
}
}
}
break;
case Op_MemBarStoreStore:
// Break the link with AllocateNode: it is no longer useful and
// confuses register allocation.
}
break;
default:
break;
}
// Collect CFG split points
if (n->is_MultiBranch())
}
//------------------------------final_graph_reshaping_walk---------------------
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
// requires that the walk visits a node's inputs before visiting the node.
void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
uint i = 0;
while (true) {
if (i < cnt) {
// Place all non-visited non-null inputs onto stack
++i;
n = m;
i = 0;
}
} else {
// Now do post-visit work
final_graph_reshaping_impl( n, frc );
break; // finished
}
}
// Skip next transformation if compressed oops are not used.
return;
// Go over safepoints nodes to skip DecodeN nodes for debug edges.
// It could be done for an uncommon traps or any safepoints/calls
// if the DecodeN node is referenced only in a debug info.
n->as_CallStaticJava()->uncommon_trap_request() != 0);
if (in->is_DecodeN()) {
bool safe_to_skip = true;
if (!is_uncommon ) {
// Is it safe to skip?
if (!u->is_SafePoint() ||
safe_to_skip = false;
}
}
}
if (safe_to_skip) {
}
}
}
}
}
}
//------------------------------final_graph_reshaping--------------------------
// Final Graph Reshaping.
//
// (1) Clone simple inputs to uncommon calls, so they can be scheduled late
// and not commoned up and forced early. Must come after regular
// optimizations to avoid GVN undoing the cloning. Clone constant
// inputs to Loop Phis; these will be split by the allocator anyways.
// Remove Opaque nodes.
// (2) Move last-uses by commutative operations to the left input to encourage
// Intel update-in-place two-address operations and better register usage
// on RISCs. Must come after regular optimizations to avoid GVN Ideal
// calls canonicalizing them back.
// (3) Count the number of double-precision FP ops, single-precision FP ops
// and call sites. On Intel, we can get correct rounding either by
// forcing singles to memory (requires extra stores and loads after each
// FP bytecode) or we can set a rounding mode bit (requires setting and
// clearing the mode bit around call sites). The mode bit is only used
// if the relative frequency of single FP ops to calls is low enough.
// This is a key transform for SPEC mpeg_audio.
// (4) Detect infinite loops; blobs of code reachable from above but not
// below. Several of the Code_Gen algorithms fail on such code shapes,
// so we simply bail out. Happens a lot in ZKM.jar, but also happens
// from time to time in other codes (such as -Xcomp finalizer loops, etc).
// Detection is by looking for IfNodes where only 1 projection is
// reachable from below or CatchNodes missing some targets.
// (5) Assert for insane oop offsets in debug mode.
// an infinite loop may have been eliminated by the optimizer,
// in which case the graph will be empty.
record_method_not_compilable("trivial infinite loop");
return true;
}
// Expensive nodes have their control input set to prevent the GVN
// from freely commoning them. There's no GVN beyond this point so
// no need to keep the control input. We want the expensive nodes to
// be freely moved to the least frequent code path by gcm.
for (int i = 0; i < expensive_count(); i++) {
}
// Visit everybody reachable!
// Allocate stack of size C->unique()/2 to avoid frequent realloc
// Check for unreachable (from below) code (i.e., infinite loops).
// Get number of CFG targets.
// Note that PCTables include exception targets after calls.
if (n->outcnt() != required_outcnt) {
// Check for a few special cases. Rethrow Nodes never take the
// 'fall-thru' path, so expected kids is 1 less.
required_outcnt--; // Rethrow always has 1 less kid
call->is_CallDynamicJava()) {
// Check for null receiver. In such case, the optimizer has
// detected that the virtual call will always result in a null
// pointer exception. The fall-through projection of this CatchNode
// will not be populated.
}
call->is_CallStaticJava()) {
// Check for negative array length. In such case, the optimizer has
// detected that the allocation attempt will always result in an
// exception. There is no fall-through projection of this CatchNode .
}
}
}
}
// Recheck with a better notion of 'required_outcnt'
if (n->outcnt() != required_outcnt) {
record_method_not_compilable("malformed control flow");
return true; // Not all targets reachable!
}
}
// Check that I actually visited all kids. Unreached kids
// must be infinite loops.
record_method_not_compilable("infinite loop");
return true; // Found unvisited kid; must be unreach
}
}
// If original bytecodes contained a mixture of floats and doubles
// check if the optimizer has made it homogenous, item (3).
frc.get_double_count() == 0 &&
set_24_bit_selection_and_mode( false, true );
}
// No infinite loops, no reason to bail out.
return false;
}
//-----------------------------too_many_traps----------------------------------
// Report if there are too many traps at the current method and bci.
int bci,
// Assume the trap has not occurred, or that it occurred only
// because of a transient condition during start-up in the interpreter.
return false;
}
// Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
// Also, if there are multiple reasons, or if there is no per-BCI record,
// assume the worst.
if (log())
return true;
} else {
}
}
// Less-accurate variant which does not require a method and bci.
ciMethodData* logmd) {
// Too many traps globally.
// Note that we use cumulative trap_count, not just md->trap_count.
if (log()) {
}
return true;
} else {
// The coast is clear.
return false;
}
}
//--------------------------too_many_recompiles--------------------------------
// Report if there are too many recompiles at the current method and bci.
// Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
// Is not eager to return true, since this will cause the compiler to use
// Action_none for a trap point, to avoid too many recompilations.
int bci,
// Assume the trap has not occurred, or that it occurred only
// because of a transient condition during start-up in the interpreter.
return false;
}
// Pick a cutoff point well within PerBytecodeRecompilationCutoff.
// The trap frequency measure we care about is the recompile count:
// Do not emit a trap here if it has already caused recompilations.
// Also, if there are multiple reasons, or if there is no per-BCI record,
// assume the worst.
if (log())
return true;
} else if (trap_count(reason) != 0
&& decompile_count() >= m_cutoff) {
// Too many recompiles globally, and we have seen this sort of trap.
// Use cumulative decompile_count, not just md->decompile_count.
if (log())
return true;
} else {
// The coast is clear.
return false;
}
}
#ifndef PRODUCT
//------------------------------verify_graph_edges---------------------------
// Walk the Graph and verify that there is a one-to-one correspondence
// between Use-Def edges and Def-Use edges in the graph.
if (VerifyGraphEdges) {
// Call recursive graph walk to check edges
if (no_dead_code) {
// Now make sure that no visited node is used by an unvisited node.
bool dead_nodes = 0;
// At this point, we have found a dead node which is DU-reachable.
if (dead_nodes++ == 0)
}
}
}
}
}
#endif
// The Compile object keeps track of failure reasons separately from the ciEnv.
// This is required because there is not quite a 1-1 relation between the
// ciEnv and its compilation task and the Compile object. Note that one
// ciEnv might use two Compile objects, if C2Compiler::compile_method decides
// to backtrack and retry without subsuming loads. Other than this backtracking
// behavior, the Compile's failure reason is quietly copied up to the ciEnv
// by the logic in C2Compiler.
}
if (_failure_reason == NULL) {
// Record the first failure reason.
}
if (event.should_commit()) {
}
}
}
{
if (dolog) {
} else {
C = NULL;
}
_log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
}
}
if (_dolog) {
} else {
}
#ifdef ASSERT
if (PrintIdealNodeCount) {
}
if (VerifyIdealNodeCount) {
}
#endif
}
}
//=============================================================================
// Two Constant's are equal when the type and the value are equal.
// For floating point values we compare the bit pattern.
switch (type()) {
case T_LONG:
case T_OBJECT:
default: ShouldNotReachHere();
}
return false;
}
switch (t) {
// We use T_VOID as marker for jump-table entries (labels) which
// need an internal word relocation.
case T_VOID:
case T_ADDRESS:
}
return -1;
}
// sort descending
return 0;
}
// First, sort the array by frequencies.
#ifdef ASSERT
// Make sure all jump-table entries were sorted to the end of the
// array (they have a negative frequency).
bool found_void = false;
for (int i = 0; i < _constants.length(); i++) {
found_void = true; // jump-tables
else
}
#endif
int offset = 0;
for (int i = 0; i < _constants.length(); i++) {
// Align offset for type.
} else {
}
}
// Align size up to the next section start (which is insts; see
// CodeBuffer::align_at_start).
}
for (int i = 0; i < _constants.length(); i++) {
case T_OBJECT: {
break;
}
case T_ADDRESS: {
break;
}
// We use T_VOID as marker for jump-table entries (labels) which
// need an internal word relocation.
case T_VOID: {
// Fill the jump-table with a dummy word. The real value is
// filled in later in fill_jump_table.
// Expand jump-table
}
break;
}
default: ShouldNotReachHere();
}
assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), err_msg_res("must be: %d == %d", constant_addr - _masm.code()->consts()->start(), con.offset()));
}
}
return offset;
}
if (con.can_be_reused()) {
return;
}
}
}
return con;
}
switch (type) {
case T_OBJECT:
default: ShouldNotReachHere();
}
}
// We can use the node pointer here to identify the right jump-table
// as this method is called from Compile::Fill_buffer right before
// the MachNodes are emitted and the jump-table is filled (means the
// MachNode pointers do not change anymore).
Constant con(T_VOID, value, next_jump_table_freq(), false); // Labels of a jump-table cannot be reused.
return con;
}
void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const {
// If called from Compile::scratch_emit_size do nothing.
assert((uint) labels.length() == n->outcnt(), err_msg_res("must be equal: %d == %d", labels.length(), n->outcnt()));
// Since MachConstantNode::constant_offset() also contains
// table_base_offset() we need to subtract the table_base_offset()
// to get the plain offset into the constant table.
assert(*constant_addr == (((address) n) + i), err_msg_res("all jump-table entries must contain adjusted node pointer: " INTPTR_FORMAT " == " INTPTR_FORMAT, *constant_addr, (((address) n) + i)));
}
}
// Print inlining message for candidates that we couldn't inline
// for lack of space or non constant receiver
for (int i = 0; i < _late_inlines.length(); i++) {
}
if (n->is_Call() && n->as_Call()->generator() != NULL && n->as_Call()->generator()->call_node() == n) {
}
if ( m == NULL ) continue;
}
}
for (int i = 0; i < _print_inlining_list->length(); i++) {
}
}
}
assert(n1->req() == n2->req(), err_msg_res("can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req()));
}
return 0;
}
}
if (!expensive_nodes_sorted()) {
}
}
return false;
}
}
return true;
}
if (_expensive_nodes->length() == 0) {
return false;
}
// Take this opportunity to remove dead nodes from the list
int j = 0;
for (int i = 0; i < _expensive_nodes->length(); i++) {
if (!n->is_unreachable(igvn)) {
_expensive_nodes->at_put(j, n);
j++;
}
}
_expensive_nodes->trunc_to(j);
// Then sort the list so that similar nodes are next to each other
// and check for at least two nodes of identical kind with same data
// inputs.
return true;
}
}
return false;
}
if (_expensive_nodes->length() == 0) {
return;
}
// Sort to bring similar nodes next to each other and clear the
// control input of nodes for which there's only a single copy.
int j = 0;
int identical = 0;
int i = 0;
assert(j <= i, "can't write beyond current index");
identical++;
continue;
}
if (identical > 0) {
identical = 0;
} else {
igvn.hash_delete(n);
igvn.hash_insert(n);
}
}
if (identical > 0) {
igvn.hash_delete(n);
igvn.hash_insert(n);
}
_expensive_nodes->trunc_to(j);
}
if (OptimizeExpensiveOps) {
_expensive_nodes->append(n);
} else {
// Clear control input and let IGVN optimize expensive nodes if
// OptimizeExpensiveOps is off.
}
}