parse.hpp revision 1472
0N/A/*
1472N/A * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
0N/Aclass BytecodeParseHistogram;
0N/Aclass InlineTree;
0N/Aclass Parse;
0N/Aclass SwitchRange;
0N/A
0N/A
0N/A//------------------------------InlineTree-------------------------------------
0N/Aclass InlineTree : public ResourceObj {
0N/A Compile* C; // cache
0N/A JVMState* _caller_jvms; // state of caller
0N/A ciMethod* _method; // method being called by the caller_jvms
0N/A InlineTree* _caller_tree;
0N/A uint _count_inline_bcs; // Accumulated count of inlined bytecodes
0N/A // Call-site count / interpreter invocation count, scaled recursively.
0N/A // Always between 0.0 and 1.0. Represents the percentage of the method's
0N/A // total execution time used at this call site.
0N/A const float _site_invoke_ratio;
1157N/A const int _site_depth_adjust;
0N/A float compute_callee_frequency( int caller_bci ) const;
0N/A
0N/A GrowableArray<InlineTree*> _subtrees;
0N/A friend class Compile;
0N/A
0N/Aprotected:
0N/A InlineTree(Compile* C,
0N/A const InlineTree* caller_tree,
0N/A ciMethod* callee_method,
0N/A JVMState* caller_jvms,
0N/A int caller_bci,
1157N/A float site_invoke_ratio,
1157N/A int site_depth_adjust);
0N/A InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
0N/A JVMState* caller_jvms,
0N/A int caller_bci);
41N/A const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result);
41N/A const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
41N/A const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
0N/A void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN;
0N/A
0N/A InlineTree *caller_tree() const { return _caller_tree; }
0N/A InlineTree* callee_at(int bci, ciMethod* m) const;
1157N/A int inline_depth() const { return stack_depth() + _site_depth_adjust; }
1157N/A int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
0N/A
0N/Apublic:
0N/A static InlineTree* build_inline_tree_root();
0N/A static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
0N/A
0N/A // For temporary (stack-allocated, stateless) ilts:
1157N/A InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust);
0N/A
0N/A // InlineTree enum
0N/A enum InlineStyle {
0N/A Inline_do_not_inline = 0, //
0N/A Inline_cha_is_monomorphic = 1, //
0N/A Inline_type_profile_monomorphic = 2 //
0N/A };
0N/A
0N/A // See if it is OK to inline.
605N/A // The receiver is the inline tree for the caller.
0N/A //
0N/A // The result is a temperature indication. If it is hot or cold,
0N/A // inlining is immediate or undesirable. Otherwise, the info block
0N/A // returned is newly allocated and may be enqueued.
0N/A //
0N/A // If the method is inlinable, a new inline subtree is created on the fly,
0N/A // and may be accessed by find_subtree_from_root.
0N/A // The call_method is the dest_method for a special or static invocation.
0N/A // The call_method is an optimized virtual method candidate otherwise.
0N/A WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci);
0N/A
0N/A // Information about inlined method
0N/A JVMState* caller_jvms() const { return _caller_jvms; }
0N/A ciMethod *method() const { return _method; }
0N/A int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
0N/A uint count_inline_bcs() const { return _count_inline_bcs; }
0N/A float site_invoke_ratio() const { return _site_invoke_ratio; };
0N/A
0N/A#ifndef PRODUCT
0N/Aprivate:
0N/A uint _count_inlines; // Count of inlined methods
0N/Apublic:
0N/A // Debug information collected during parse
0N/A uint count_inlines() const { return _count_inlines; };
0N/A#endif
0N/A GrowableArray<InlineTree*> subtrees() { return _subtrees; }
0N/A};
0N/A
0N/A
0N/A//-----------------------------------------------------------------------------
0N/A//------------------------------Parse------------------------------------------
0N/A// Parse bytecodes, build a Graph
0N/Aclass Parse : public GraphKit {
0N/A public:
0N/A // Per-block information needed by the parser:
0N/A class Block {
0N/A private:
0N/A ciTypeFlow::Block* _flow;
0N/A int _pred_count; // how many predecessors in CFG?
0N/A int _preds_parsed; // how many of these have been parsed?
0N/A uint _count; // how many times executed? Currently only set by _goto's
0N/A bool _is_parsed; // has this block been parsed yet?
0N/A bool _is_handler; // is this block an exception handler?
0N/A SafePointNode* _start_map; // all values flowing into this block
0N/A MethodLivenessResult _live_locals; // lazily initialized liveness bitmap
0N/A
0N/A int _num_successors; // Includes only normal control flow.
0N/A int _all_successors; // Include exception paths also.
0N/A Block** _successors;
0N/A
0N/A // Use init_node/init_graph to initialize Blocks.
0N/A // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
0N/A Block() : _live_locals(NULL,0) { ShouldNotReachHere(); }
0N/A
0N/A public:
0N/A
0N/A // Set up the block data structure itself.
0N/A void init_node(Parse* outer, int po);
0N/A // Set up the block's relations to other blocks.
0N/A void init_graph(Parse* outer);
0N/A
0N/A ciTypeFlow::Block* flow() const { return _flow; }
0N/A int pred_count() const { return _pred_count; }
0N/A int preds_parsed() const { return _preds_parsed; }
0N/A bool is_parsed() const { return _is_parsed; }
0N/A bool is_handler() const { return _is_handler; }
0N/A void set_count( uint x ) { _count = x; }
0N/A uint count() const { return _count; }
0N/A
0N/A SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; }
0N/A void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; }
0N/A
0N/A // True after any predecessor flows control into this block
0N/A bool is_merged() const { return _start_map != NULL; }
0N/A
0N/A // True when all non-exception predecessors have been parsed.
0N/A bool is_ready() const { return preds_parsed() == pred_count(); }
0N/A
0N/A int num_successors() const { return _num_successors; }
0N/A int all_successors() const { return _all_successors; }
0N/A Block* successor_at(int i) const {
0N/A assert((uint)i < (uint)all_successors(), "");
0N/A return _successors[i];
0N/A }
0N/A Block* successor_for_bci(int bci);
0N/A
0N/A int start() const { return flow()->start(); }
0N/A int limit() const { return flow()->limit(); }
367N/A int rpo() const { return flow()->rpo(); }
0N/A int start_sp() const { return flow()->stack_size(); }
0N/A
367N/A bool is_loop_head() const { return flow()->is_loop_head(); }
367N/A bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); }
367N/A bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
367N/A bool is_invariant_local(uint i) const {
367N/A const JVMState* jvms = start_map()->jvms();
435N/A if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
367N/A return flow()->is_invariant_local(i - jvms->locoff());
367N/A }
367N/A bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); }
367N/A
0N/A const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); }
0N/A
0N/A const Type* stack_type_at(int i) const;
0N/A const Type* local_type_at(int i) const;
0N/A static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
0N/A
0N/A bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; }
0N/A
0N/A // Call this just before parsing a block.
0N/A void mark_parsed() {
0N/A assert(!_is_parsed, "must parse each block exactly once");
0N/A _is_parsed = true;
0N/A }
0N/A
0N/A // Return the phi/region input index for the "current" pred,
0N/A // and bump the pred number. For historical reasons these index
0N/A // numbers are handed out in descending order. The last index is
0N/A // always PhiNode::Input (i.e., 1). The value returned is known
0N/A // as a "path number" because it distinguishes by which path we are
0N/A // entering the block.
0N/A int next_path_num() {
0N/A assert(preds_parsed() < pred_count(), "too many preds?");
0N/A return pred_count() - _preds_parsed++;
0N/A }
0N/A
0N/A // Add a previously unaccounted predecessor to this block.
0N/A // This operates by increasing the size of the block's region
0N/A // and all its phi nodes (if any). The value returned is a
0N/A // path number ("pnum").
0N/A int add_new_path();
0N/A
0N/A // Initialize me by recording the parser's map. My own map must be NULL.
0N/A void record_state(Parse* outer);
0N/A };
0N/A
0N/A#ifndef PRODUCT
0N/A // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
0N/A class BytecodeParseHistogram : public ResourceObj {
0N/A private:
0N/A enum BPHType {
0N/A BPH_transforms,
0N/A BPH_values
0N/A };
0N/A static bool _initialized;
0N/A static uint _bytecodes_parsed [Bytecodes::number_of_codes];
0N/A static uint _nodes_constructed[Bytecodes::number_of_codes];
0N/A static uint _nodes_transformed[Bytecodes::number_of_codes];
0N/A static uint _new_values [Bytecodes::number_of_codes];
0N/A
0N/A Bytecodes::Code _initial_bytecode;
0N/A int _initial_node_count;
0N/A int _initial_transforms;
0N/A int _initial_values;
0N/A
0N/A Parse *_parser;
0N/A Compile *_compiler;
0N/A
0N/A // Initialization
0N/A static void reset();
0N/A
0N/A // Return info being collected, select with global flag 'BytecodeParseInfo'
0N/A int current_count(BPHType info_selector);
0N/A
0N/A public:
0N/A BytecodeParseHistogram(Parse *p, Compile *c);
0N/A static bool initialized();
0N/A
0N/A // Record info when starting to parse one bytecode
0N/A void set_initial_state( Bytecodes::Code bc );
0N/A // Record results of parsing one bytecode
0N/A void record_change();
0N/A
0N/A // Profile printing
0N/A static void print(float cutoff = 0.01F); // cutoff in percent
0N/A };
0N/A
0N/A public:
0N/A // Record work done during parsing
0N/A BytecodeParseHistogram* _parse_histogram;
0N/A void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
0N/A BytecodeParseHistogram* parse_histogram() { return _parse_histogram; }
0N/A#endif
0N/A
0N/A private:
0N/A friend class Block;
0N/A
0N/A // Variables which characterize this compilation as a whole:
0N/A
0N/A JVMState* _caller; // JVMS which carries incoming args & state.
0N/A float _expected_uses; // expected number of calls to this code
0N/A float _prof_factor; // discount applied to my profile counts
0N/A int _depth; // Inline tree depth, for debug printouts
0N/A const TypeFunc*_tf; // My kind of function type
0N/A int _entry_bci; // the osr bci or InvocationEntryBci
0N/A
0N/A ciTypeFlow* _flow; // Results of previous flow pass.
0N/A Block* _blocks; // Array of basic-block structs.
0N/A int _block_count; // Number of elements in _blocks.
0N/A
0N/A GraphKit _exits; // Record all normal returns and throws here.
0N/A bool _wrote_final; // Did we write a final field?
0N/A bool _count_invocations; // update and test invocation counter
0N/A bool _method_data_update; // update method data oop
0N/A
0N/A // Variables which track Java semantics during bytecode parsing:
0N/A
0N/A Block* _block; // block currently getting parsed
0N/A ciBytecodeStream _iter; // stream of this method's bytecodes
0N/A
0N/A int _blocks_merged; // Progress meter: state merges from BB preds
0N/A int _blocks_parsed; // Progress meter: BBs actually parsed
0N/A
0N/A const FastLockNode* _synch_lock; // FastLockNode for synchronized method
0N/A
0N/A#ifndef PRODUCT
0N/A int _max_switch_depth; // Debugging SwitchRanges.
0N/A int _est_switch_depth; // Debugging SwitchRanges.
0N/A#endif
0N/A
0N/A public:
0N/A // Constructor
0N/A Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
0N/A
0N/A virtual Parse* is_Parse() const { return (Parse*)this; }
0N/A
0N/A public:
0N/A // Accessors.
0N/A JVMState* caller() const { return _caller; }
0N/A float expected_uses() const { return _expected_uses; }
0N/A float prof_factor() const { return _prof_factor; }
0N/A int depth() const { return _depth; }
0N/A const TypeFunc* tf() const { return _tf; }
0N/A // entry_bci() -- see osr_bci, etc.
0N/A
0N/A ciTypeFlow* flow() const { return _flow; }
367N/A // blocks() -- see rpo_at, start_block, etc.
0N/A int block_count() const { return _block_count; }
0N/A
0N/A GraphKit& exits() { return _exits; }
0N/A bool wrote_final() const { return _wrote_final; }
0N/A void set_wrote_final(bool z) { _wrote_final = z; }
0N/A bool count_invocations() const { return _count_invocations; }
0N/A bool method_data_update() const { return _method_data_update; }
0N/A
0N/A Block* block() const { return _block; }
0N/A ciBytecodeStream& iter() { return _iter; }
0N/A Bytecodes::Code bc() const { return _iter.cur_bc(); }
0N/A
0N/A void set_block(Block* b) { _block = b; }
0N/A
0N/A // Derived accessors:
0N/A bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; }
0N/A bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; }
0N/A int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; }
0N/A
0N/A void set_parse_bci(int bci);
0N/A
0N/A // Must this parse be aborted?
0N/A bool failing() { return C->failing(); }
0N/A
367N/A Block* rpo_at(int rpo) {
367N/A assert(0 <= rpo && rpo < _block_count, "oob");
367N/A return &_blocks[rpo];
0N/A }
0N/A Block* start_block() {
367N/A return rpo_at(flow()->start_block()->rpo());
0N/A }
0N/A // Can return NULL if the flow pass did not complete a block.
0N/A Block* successor_for_bci(int bci) {
0N/A return block()->successor_for_bci(bci);
0N/A }
0N/A
0N/A private:
0N/A // Create a JVMS & map for the initial state of this method.
0N/A SafePointNode* create_entry_map();
0N/A
0N/A // OSR helpers
0N/A Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
0N/A Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
0N/A void load_interpreter_state(Node* osr_buf);
0N/A
0N/A // Functions for managing basic blocks:
0N/A void init_blocks();
0N/A void load_state_from(Block* b);
0N/A void store_state_to(Block* b) { b->record_state(this); }
0N/A
0N/A // Parse all the basic blocks.
0N/A void do_all_blocks();
0N/A
0N/A // Parse the current basic block
0N/A void do_one_block();
0N/A
0N/A // Raise an error if we get a bad ciTypeFlow CFG.
0N/A void handle_missing_successor(int bci);
0N/A
0N/A // first actions (before BCI 0)
0N/A void do_method_entry();
0N/A
0N/A // implementation of monitorenter/monitorexit
0N/A void do_monitor_enter();
0N/A void do_monitor_exit();
0N/A
0N/A // Eagerly create phie throughout the state, to cope with back edges.
0N/A void ensure_phis_everywhere();
0N/A
0N/A // Merge the current mapping into the basic block starting at bci
0N/A void merge( int target_bci);
0N/A // Same as plain merge, except that it allocates a new path number.
0N/A void merge_new_path( int target_bci);
0N/A // Merge the current mapping into an exception handler.
0N/A void merge_exception(int target_bci);
0N/A // Helper: Merge the current mapping into the given basic block
0N/A void merge_common(Block* target, int pnum);
0N/A // Helper functions for merging individual cells.
0N/A PhiNode *ensure_phi( int idx, bool nocreate = false);
0N/A PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
0N/A // Helper to merge the current memory state into the given basic block
0N/A void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
0N/A
0N/A // Parse this bytecode, and alter the Parsers JVM->Node mapping
0N/A void do_one_bytecode();
0N/A
0N/A // helper function to generate array store check
0N/A void array_store_check();
0N/A // Helper function to generate array load
0N/A void array_load(BasicType etype);
0N/A // Helper function to generate array store
0N/A void array_store(BasicType etype);
0N/A // Helper function to compute array addressing
0N/A Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
0N/A
0N/A // Pass current map to exits
0N/A void return_current(Node* value);
0N/A
0N/A // Register finalizers on return from Object.<init>
0N/A void call_register_finalizer();
0N/A
0N/A // Insert a compiler safepoint into the graph
0N/A void add_safepoint();
0N/A
0N/A // Insert a compiler safepoint into the graph, if there is a back-branch.
0N/A void maybe_add_safepoint(int target_bci) {
0N/A if (UseLoopSafepoints && target_bci <= bci()) {
0N/A add_safepoint();
0N/A }
0N/A }
0N/A
1172N/A // Return true if the parser should add a loop predicate
1172N/A bool should_add_predicate(int target_bci);
1172N/A // Insert a loop predicate into the graph
1172N/A void add_predicate();
1172N/A
0N/A // Note: Intrinsic generation routines may be found in library_call.cpp.
0N/A
0N/A // Helper function to setup Ideal Call nodes
0N/A void do_call();
0N/A
0N/A // Helper function to uncommon-trap or bailout for non-compilable call-sites
0N/A bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
0N/A
0N/A // Helper function to identify inlining potential at call-site
0N/A ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
0N/A ciMethod *dest_method, const TypeOopPtr* receiver_type);
0N/A
0N/A // Helper function to setup for type-profile based inlining
0N/A bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method);
0N/A
0N/A // Helper functions for type checking bytecodes:
0N/A void do_checkcast();
0N/A void do_instanceof();
0N/A
0N/A // Helper functions for shifting & arithmetic
0N/A void modf();
0N/A void modd();
0N/A void l2f();
0N/A
0N/A void do_irem();
0N/A
0N/A // implementation of _get* and _put* bytecodes
0N/A void do_getstatic() { do_field_access(true, false); }
0N/A void do_getfield () { do_field_access(true, true); }
0N/A void do_putstatic() { do_field_access(false, false); }
0N/A void do_putfield () { do_field_access(false, true); }
0N/A
0N/A // common code for making initial checks and forming addresses
0N/A void do_field_access(bool is_get, bool is_field);
0N/A bool static_field_ok_in_clinit(ciField *field, ciMethod *method);
0N/A
0N/A // common code for actually performing the load or store
0N/A void do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field);
0N/A void do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field);
0N/A
0N/A // loading from a constant field or the constant pool
0N/A // returns false if push failed (non-perm field constants only, not ldcs)
989N/A bool push_constant(ciConstant con, bool require_constant = false);
0N/A
0N/A // implementation of object creation bytecodes
0N/A void do_new();
0N/A void do_newarray(BasicType elemtype);
0N/A void do_anewarray();
0N/A void do_multianewarray();
730N/A Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
0N/A
0N/A // implementation of jsr/ret
0N/A void do_jsr();
0N/A void do_ret();
0N/A
0N/A float dynamic_branch_prediction(float &cnt);
0N/A float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
0N/A bool seems_never_taken(float prob);
0N/A
248N/A void do_ifnull(BoolTest::mask btest, Node* c);
0N/A void do_if(BoolTest::mask btest, Node* c);
1172N/A int repush_if_args();
0N/A void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
0N/A Block* path, Block* other_path);
0N/A IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
0N/A Node* jump_if_join(Node* iffalse, Node* iftrue);
0N/A void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);
0N/A void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index);
0N/A void jump_if_always_fork(int dest_bci_if_true, int prof_table_index);
0N/A
0N/A friend class SwitchRange;
0N/A void do_tableswitch();
0N/A void do_lookupswitch();
0N/A void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
0N/A bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
0N/A
0N/A // helper functions for methodData style profiling
0N/A void test_counter_against_threshold(Node* cnt, int limit);
0N/A void increment_and_test_invocation_counter(int limit);
0N/A void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit);
0N/A Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
0N/A void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
0N/A void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant);
0N/A
0N/A void profile_method_entry();
0N/A void profile_taken_branch(int target_bci, bool force_update = false);
0N/A void profile_not_taken_branch(bool force_update = false);
0N/A void profile_call(Node* receiver);
0N/A void profile_generic_call();
0N/A void profile_receiver_type(Node* receiver);
0N/A void profile_ret(int target_bci);
0N/A void profile_null_checkcast();
0N/A void profile_switch_case(int table_index);
0N/A
0N/A // helper function for call statistics
0N/A void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
0N/A
0N/A Node_Notes* make_node_notes(Node_Notes* caller_nn);
0N/A
0N/A // Helper functions for handling normal and abnormal exits.
0N/A void build_exits();
0N/A
0N/A // Fix up all exceptional control flow exiting a single bytecode.
0N/A void do_exceptions();
0N/A
0N/A // Fix up all exiting control flow at the end of the parse.
0N/A void do_exits();
0N/A
0N/A // Add Catch/CatchProjs
0N/A // The call is either a Java call or the VM's rethrow stub
0N/A void catch_call_exceptions(ciExceptionHandlerStream&);
0N/A
0N/A // Handle all exceptions thrown by the inlined method.
0N/A // Also handles exceptions for individual bytecodes.
0N/A void catch_inline_exceptions(SafePointNode* ex_map);
0N/A
0N/A // Merge the given map into correct exceptional exit state.
0N/A // Assumes that there is no applicable local handler.
0N/A void throw_to_exit(SafePointNode* ex_map);
0N/A
0N/A public:
0N/A#ifndef PRODUCT
0N/A // Handle PrintOpto, etc.
0N/A void show_parse_info();
0N/A void dump_map_adr_mem() const;
0N/A static void print_statistics(); // Print some performance counters
0N/A void dump();
0N/A void dump_bci(int bci);
0N/A#endif
0N/A};