/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OPTO_PARSE_HPP
#define SHARE_VM_OPTO_PARSE_HPP
#include "ci/ciMethodData.hpp"
#include "ci/ciTypeFlow.hpp"
#include "compiler/methodLiveness.hpp"
#include "libadt/vectset.hpp"
#include "oops/generateOopMap.hpp"
#include "opto/graphKit.hpp"
#include "opto/subnode.hpp"
class BytecodeParseHistogram;
class InlineTree;
class Parse;
class SwitchRange;
//------------------------------InlineTree-------------------------------------
friend class VMStructs;
Compile* C; // cache
// Call-site count / interpreter invocation count, scaled recursively.
// Always between 0.0 and 1.0. Represents the percentage of the method's
// total execution time used at this call site.
const float _site_invoke_ratio;
float compute_callee_frequency( int caller_bci ) const;
const char* _msg;
protected:
InlineTree(Compile* C,
const InlineTree* caller_tree,
int caller_bci,
float site_invoke_ratio,
int max_inline_level);
int caller_bci);
int caller_bci,
bool& should_delay);
int caller_bci,
bool success) const;
public:
static InlineTree* build_inline_tree_root();
// For temporary (stack-allocated, stateless) ilts:
InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
// InlineTree enum
enum InlineStyle {
};
// See if it is OK to inline.
// The receiver is the inline tree for the caller.
//
// The result is a temperature indication. If it is hot or cold,
// inlining is immediate or undesirable. Otherwise, the info block
// returned is newly allocated and may be enqueued.
//
// If the method is inlinable, a new inline subtree is created on the fly,
// and may be accessed by find_subtree_from_root.
// The call_method is the dest_method for a special or static invocation.
// The call_method is an optimized virtual method candidate otherwise.
WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
// Information about inlined method
#ifndef PRODUCT
private:
public:
// Debug information collected during parse
#endif
};
//-----------------------------------------------------------------------------
//------------------------------Parse------------------------------------------
// Parse bytecodes, build a Graph
public:
// Per-block information needed by the parser:
class Block {
private:
// Use init_node/init_graph to initialize Blocks.
// Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
public:
// Set up the block data structure itself.
// Set up the block's relations to other blocks.
// True after any predecessor flows control into this block
#ifdef ASSERT
// True after backedge predecessor flows control into this block
_has_merged_backedge = true;
}
}
#endif
// True when all non-exception predecessors have been parsed.
return _successors[i];
}
}
const Type* stack_type_at(int i) const;
const Type* local_type_at(int i) const;
// Call this just before parsing a block.
void mark_parsed() {
_is_parsed = true;
}
// and bump the pred number. For historical reasons these index
// numbers are handed out in descending order. The last index is
// always PhiNode::Input (i.e., 1). The value returned is known
// as a "path number" because it distinguishes by which path we are
// entering the block.
int next_path_num() {
return pred_count() - _preds_parsed++;
}
// Add a previously unaccounted predecessor to this block.
// This operates by increasing the size of the block's region
// and all its phi nodes (if any). The value returned is a
// path number ("pnum").
int add_new_path();
// Initialize me by recording the parser's map. My own map must be NULL.
};
#ifndef PRODUCT
// BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
private:
enum BPHType {
};
static bool _initialized;
int _initial_node_count;
int _initial_transforms;
int _initial_values;
// Initialization
static void reset();
// Return info being collected, select with global flag 'BytecodeParseInfo'
public:
static bool initialized();
// Record info when starting to parse one bytecode
// Record results of parsing one bytecode
void record_change();
// Profile printing
};
public:
// Record work done during parsing
#endif
private:
friend class Block;
// Variables which characterize this compilation as a whole:
// Variables which track Java semantics during bytecode parsing:
#ifndef PRODUCT
#endif
public:
// Constructor
public:
// Accessors.
// entry_bci() -- see osr_bci, etc.
// blocks() -- see rpo_at, start_block, etc.
// Derived accessors:
void set_parse_bci(int bci);
// Must this parse be aborted?
}
}
// Can return NULL if the flow pass did not complete a block.
}
private:
// Create a JVMS & map for the initial state of this method.
// OSR helpers
// Functions for managing basic blocks:
void init_blocks();
void load_state_from(Block* b);
// Parse all the basic blocks.
void do_all_blocks();
// Parse the current basic block
void do_one_block();
// Raise an error if we get a bad ciTypeFlow CFG.
void handle_missing_successor(int bci);
// first actions (before BCI 0)
void do_method_entry();
// implementation of monitorenter/monitorexit
void do_monitor_enter();
void do_monitor_exit();
// Eagerly create phie throughout the state, to cope with back edges.
void ensure_phis_everywhere();
// Merge the current mapping into the basic block starting at bci
void merge( int target_bci);
// Same as plain merge, except that it allocates a new path number.
void merge_new_path( int target_bci);
// Merge the current mapping into an exception handler.
void merge_exception(int target_bci);
// Helper: Merge the current mapping into the given basic block
// Helper functions for merging individual cells.
// Helper to merge the current memory state into the given basic block
// Parse this bytecode, and alter the Parsers JVM->Node mapping
void do_one_bytecode();
// helper function to generate array store check
void array_store_check();
// Helper function to generate array load
// Helper function to generate array store
// Helper function to compute array addressing
// Pass current map to exits
// Register finalizers on return from Object.<init>
void call_register_finalizer();
// Insert a compiler safepoint into the graph
void add_safepoint();
// Insert a compiler safepoint into the graph, if there is a back-branch.
}
}
// Note: Intrinsic generation routines may be found in library_call.cpp.
// Helper function to setup Ideal Call nodes
void do_call();
// Helper function to uncommon-trap or bailout for non-compilable call-sites
// Helper function to setup for type-profile based inlining
// Helper functions for type checking bytecodes:
void do_checkcast();
void do_instanceof();
// Helper functions for shifting & arithmetic
void modf();
void modd();
void l2f();
void do_irem();
// implementation of _get* and _put* bytecodes
// common code for making initial checks and forming addresses
// common code for actually performing the load or store
// loading from a constant field or the constant pool
// returns false if push failed (non-perm field constants only, not ldcs)
// implementation of object creation bytecodes
void do_new();
void do_anewarray();
void do_multianewarray();
void do_jsr();
void do_ret();
float dynamic_branch_prediction(float &cnt);
bool seems_never_taken(float prob);
int repush_if_args();
friend class SwitchRange;
void do_tableswitch();
void do_lookupswitch();
// helper functions for methodData style profiling
void increment_and_test_invocation_counter(int limit);
Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
void profile_method_entry();
void profile_not_taken_branch(bool force_update = false);
void profile_generic_call();
void profile_ret(int target_bci);
void profile_null_checkcast();
void profile_switch_case(int table_index);
// helper function for call statistics
// Helper functions for handling normal and abnormal exits.
void build_exits();
// Fix up all exceptional control flow exiting a single bytecode.
void do_exceptions();
// Fix up all exiting control flow at the end of the parse.
void do_exits();
// Add Catch/CatchProjs
// The call is either a Java call or the VM's rethrow stub
// Handle all exceptions thrown by the inlined method.
// Also handles exceptions for individual bytecodes.
// Merge the given map into correct exceptional exit state.
// Assumes that there is no applicable local handler.
public:
#ifndef PRODUCT
// Handle PrintOpto, etc.
void show_parse_info();
void dump_map_adr_mem() const;
static void print_statistics(); // Print some performance counters
void dump();
#endif
};
#endif // SHARE_VM_OPTO_PARSE_HPP