0N/A/*
2273N/A * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#ifndef SHARE_VM_C1_C1_LINEARSCAN_HPP
1879N/A#define SHARE_VM_C1_C1_LINEARSCAN_HPP
1879N/A
1879N/A#include "c1/c1_FpuStackSim.hpp"
1879N/A#include "c1/c1_FrameMap.hpp"
1879N/A#include "c1/c1_IR.hpp"
1879N/A#include "c1/c1_Instruction.hpp"
1879N/A#include "c1/c1_LIR.hpp"
1879N/A#include "c1/c1_LIRGenerator.hpp"
1879N/A
0N/Aclass DebugInfoCache;
0N/Aclass FpuStackAllocator;
0N/Aclass IRScopeDebugInfo;
0N/Aclass Interval;
0N/Aclass IntervalWalker;
0N/Aclass LIRGenerator;
0N/Aclass LinearScan;
0N/Aclass MoveResolver;
0N/Aclass Range;
0N/A
0N/Adefine_array(IntervalArray, Interval*)
0N/Adefine_stack(IntervalList, IntervalArray)
0N/A
0N/Adefine_array(IntervalsArray, IntervalList*)
0N/Adefine_stack(IntervalsList, IntervalsArray)
0N/A
0N/Adefine_array(OopMapArray, OopMap*)
0N/Adefine_stack(OopMapList, OopMapArray)
0N/A
0N/Adefine_array(ScopeValueArray, ScopeValue*)
0N/A
0N/Adefine_array(LIR_OpListArray, LIR_OpList*);
0N/Adefine_stack(LIR_OpListStack, LIR_OpListArray);
0N/A
0N/A
0N/Aenum IntervalUseKind {
0N/A // priority of use kinds must be ascending
0N/A noUse = 0,
0N/A loopEndMarker = 1,
0N/A shouldHaveRegister = 2,
0N/A mustHaveRegister = 3,
0N/A
0N/A firstValidKind = 1,
0N/A lastValidKind = 3
0N/A};
0N/Adefine_array(UseKindArray, IntervalUseKind)
0N/Adefine_stack(UseKindStack, UseKindArray)
0N/A
0N/A
0N/Aenum IntervalKind {
0N/A fixedKind = 0, // interval pre-colored by LIR_Generator
0N/A anyKind = 1, // no register/memory allocated by LIR_Generator
0N/A nofKinds,
0N/A firstKind = fixedKind
0N/A};
0N/A
0N/A
0N/A// during linear scan an interval is in one of four states in
0N/Aenum IntervalState {
0N/A unhandledState = 0, // unhandled state (not processed yet)
0N/A activeState = 1, // life and is in a physical register
0N/A inactiveState = 2, // in a life time hole and is in a physical register
0N/A handledState = 3, // spilled or not life again
0N/A invalidState = -1
0N/A};
0N/A
0N/A
0N/Aenum IntervalSpillState {
0N/A noDefinitionFound, // starting state of calculation: no definition found yet
0N/A oneDefinitionFound, // one definition has already been found.
0N/A // Note: two consecutive definitions are treated as one (e.g. consecutive move and add because of two-operand LIR form)
0N/A // the position of this definition is stored in _definition_pos
0N/A oneMoveInserted, // one spill move has already been inserted.
0N/A storeAtDefinition, // the interval should be stored immediately after its definition because otherwise
0N/A // there would be multiple redundant stores
0N/A startInMemory, // the interval starts in memory (e.g. method parameter), so a store is never necessary
0N/A noOptimization // the interval has more then one definition (e.g. resulting from phi moves), so stores to memory are not optimized
0N/A};
0N/A
0N/A
0N/A#define for_each_interval_kind(kind) \
0N/A for (IntervalKind kind = firstKind; kind < nofKinds; kind = (IntervalKind)(kind + 1))
0N/A
0N/A#define for_each_visitor_mode(mode) \
0N/A for (LIR_OpVisitState::OprMode mode = LIR_OpVisitState::firstMode; mode < LIR_OpVisitState::numModes; mode = (LIR_OpVisitState::OprMode)(mode + 1))
0N/A
0N/A
0N/Aclass LinearScan : public CompilationResourceObj {
0N/A // declare classes used by LinearScan as friends because they
0N/A // need a wide variety of functions declared here
0N/A //
0N/A // Only the small interface to the rest of the compiler is public
0N/A friend class Interval;
0N/A friend class IntervalWalker;
0N/A friend class LinearScanWalker;
0N/A friend class FpuStackAllocator;
0N/A friend class MoveResolver;
0N/A friend class LinearScanStatistic;
0N/A friend class LinearScanTimers;
0N/A friend class RegisterVerifier;
0N/A
0N/A public:
0N/A enum {
0N/A any_reg = -1,
0N/A nof_cpu_regs = pd_nof_cpu_regs_linearscan,
0N/A nof_fpu_regs = pd_nof_fpu_regs_linearscan,
0N/A nof_xmm_regs = pd_nof_xmm_regs_linearscan,
0N/A nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs
0N/A };
0N/A
0N/A private:
0N/A Compilation* _compilation;
0N/A IR* _ir;
0N/A LIRGenerator* _gen;
0N/A FrameMap* _frame_map;
0N/A
0N/A BlockList _cached_blocks; // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged)
0N/A int _num_virtual_regs; // number of virtual registers (without new registers introduced because of splitting intervals)
0N/A bool _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary)
0N/A int _num_calls; // total number of calls in this method
0N/A int _max_spills; // number of stack slots used for intervals allocated to memory
0N/A int _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value
0N/A
0N/A IntervalList _intervals; // mapping from register number to interval
0N/A IntervalList* _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split
0N/A IntervalArray* _sorted_intervals; // intervals sorted by Interval::from()
1969N/A bool _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted
0N/A
0N/A LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node
0N/A BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction
0N/A BitMap _has_info; // bit set for each LIR_Op id that has a CodeEmitInfo
0N/A BitMap _has_call; // bit set for each LIR_Op id that destroys all caller save registers
0N/A BitMap2D _interval_in_loop; // bit set for each virtual register that is contained in each loop
0N/A
0N/A // cached debug info to prevent multiple creation of same object
0N/A // TODO: cached scope values for registers could be static
0N/A ScopeValueArray _scope_value_cache;
0N/A
3237N/A static ConstantOopWriteValue* _oop_null_scope_value;
3237N/A static ConstantIntValue* _int_m1_scope_value;
3237N/A static ConstantIntValue* _int_0_scope_value;
3237N/A static ConstantIntValue* _int_1_scope_value;
3237N/A static ConstantIntValue* _int_2_scope_value;
0N/A
0N/A // accessors
0N/A IR* ir() const { return _ir; }
0N/A Compilation* compilation() const { return _compilation; }
0N/A LIRGenerator* gen() const { return _gen; }
0N/A FrameMap* frame_map() const { return _frame_map; }
0N/A
0N/A // unified bailout support
0N/A void bailout(const char* msg) const { compilation()->bailout(msg); }
0N/A bool bailed_out() const { return compilation()->bailed_out(); }
0N/A
0N/A // access to block list (sorted in linear scan order)
0N/A int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); }
0N/A BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); }
0N/A
0N/A int num_virtual_regs() const { return _num_virtual_regs; }
0N/A // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
0N/A int live_set_size() const { return round_to(_num_virtual_regs, BitsPerWord); }
0N/A bool has_fpu_registers() const { return _has_fpu_registers; }
0N/A int num_loops() const { return ir()->num_loops(); }
0N/A bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
0N/A
0N/A // handling of fpu stack allocation (platform dependent, needed for debug information generation)
304N/A#ifdef X86
0N/A FpuStackAllocator* _fpu_stack_allocator;
0N/A bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }
0N/A#else
0N/A bool use_fpu_stack_allocation() const { return false; }
0N/A#endif
0N/A
0N/A
0N/A // access to interval list
0N/A int interval_count() const { return _intervals.length(); }
0N/A Interval* interval_at(int reg_num) const { return _intervals.at(reg_num); }
0N/A
0N/A IntervalList* new_intervals_from_allocation() const { return _new_intervals_from_allocation; }
0N/A
0N/A // access to LIR_Ops and Blocks indexed by op_id
0N/A int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
0N/A LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
0N/A BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
0N/A
0N/A bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
0N/A bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); }
0N/A
0N/A bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
0N/A bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
0N/A
0N/A
0N/A // functions for converting LIR-Operands to register numbers
0N/A static bool is_valid_reg_num(int reg_num) { return reg_num >= 0; }
0N/A static int reg_num(LIR_Opr opr);
0N/A static int reg_numHi(LIR_Opr opr);
0N/A
0N/A // functions for classification of intervals
0N/A static bool is_precolored_interval(const Interval* i);
0N/A static bool is_virtual_interval(const Interval* i);
0N/A
0N/A static bool is_precolored_cpu_interval(const Interval* i);
0N/A static bool is_virtual_cpu_interval(const Interval* i);
0N/A static bool is_precolored_fpu_interval(const Interval* i);
0N/A static bool is_virtual_fpu_interval(const Interval* i);
0N/A
0N/A static bool is_in_fpu_register(const Interval* i);
0N/A static bool is_oop_interval(const Interval* i);
0N/A
0N/A
0N/A // General helper functions
0N/A int allocate_spill_slot(bool double_word);
0N/A void assign_spill_slot(Interval* it);
0N/A void propagate_spill_slots();
0N/A
0N/A Interval* create_interval(int reg_num);
0N/A void append_interval(Interval* it);
0N/A void copy_register_flags(Interval* from, Interval* to);
0N/A
0N/A // platform dependent functions
0N/A static bool is_processed_reg_num(int reg_num);
0N/A static int num_physical_regs(BasicType type);
0N/A static bool requires_adjacent_regs(BasicType type);
0N/A static bool is_caller_save(int assigned_reg);
0N/A
0N/A // spill move optimization: eliminate moves from register to stack if
0N/A // stack slot is known to be correct
0N/A void change_spill_definition_pos(Interval* interval, int def_pos);
0N/A void change_spill_state(Interval* interval, int spill_pos);
0N/A static bool must_store_at_definition(const Interval* i);
0N/A void eliminate_spill_moves();
0N/A
0N/A // Phase 1: number all instructions in all blocks
0N/A void number_instructions();
0N/A
0N/A // Phase 2: compute local live sets separately for each block
0N/A // (sets live_gen and live_kill for each block)
0N/A //
0N/A // helper methods used by compute_local_live_sets()
0N/A void set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill);
0N/A
0N/A void compute_local_live_sets();
0N/A
0N/A // Phase 3: perform a backward dataflow analysis to compute global live sets
0N/A // (sets live_in and live_out for each block)
0N/A void compute_global_live_sets();
0N/A
0N/A
0N/A // Phase 4: build intervals
0N/A // (fills the list _intervals)
0N/A //
0N/A // helper methods used by build_intervals()
0N/A void add_use (Value value, int from, int to, IntervalUseKind use_kind);
0N/A
0N/A void add_def (LIR_Opr opr, int def_pos, IntervalUseKind use_kind);
0N/A void add_use (LIR_Opr opr, int from, int to, IntervalUseKind use_kind);
0N/A void add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind);
0N/A
0N/A void add_def (int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type);
0N/A void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type);
0N/A void add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type);
0N/A
0N/A // Add platform dependent kills for particular LIR ops. Can be used
0N/A // to add platform dependent behaviour for some operations.
0N/A void pd_add_temps(LIR_Op* op);
0N/A
0N/A IntervalUseKind use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr);
0N/A IntervalUseKind use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr);
0N/A void handle_method_arguments(LIR_Op* op);
0N/A void handle_doubleword_moves(LIR_Op* op);
0N/A void add_register_hints(LIR_Op* op);
0N/A
0N/A void build_intervals();
0N/A
0N/A
0N/A // Phase 5: actual register allocation
0N/A // (Uses LinearScanWalker)
0N/A //
0N/A // helper functions for building a sorted list of intervals
0N/A NOT_PRODUCT(bool is_sorted(IntervalArray* intervals);)
0N/A static int interval_cmp(Interval** a, Interval** b);
0N/A void add_to_list(Interval** first, Interval** prev, Interval* interval);
0N/A void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i));
0N/A
0N/A void sort_intervals_before_allocation();
0N/A void sort_intervals_after_allocation();
0N/A void allocate_registers();
0N/A
0N/A
0N/A // Phase 6: resolve data flow
0N/A // (insert moves at edges between blocks if intervals have been split)
0N/A //
0N/A // helper functions for resolve_data_flow()
0N/A Interval* split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode);
0N/A Interval* interval_at_block_begin(BlockBegin* block, int reg_num);
0N/A Interval* interval_at_block_end(BlockBegin* block, int reg_num);
0N/A Interval* interval_at_op_id(int reg_num, int op_id);
0N/A void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
0N/A void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
0N/A void resolve_data_flow();
0N/A
0N/A void resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver);
0N/A void resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver);
0N/A void resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver);
0N/A void resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver);
0N/A void resolve_exception_handlers();
0N/A
0N/A // Phase 7: assign register numbers back to LIR
0N/A // (includes computation of debug information and oop maps)
0N/A //
0N/A // helper functions for assign_reg_num()
0N/A VMReg vm_reg_for_interval(Interval* interval);
0N/A VMReg vm_reg_for_operand(LIR_Opr opr);
0N/A
0N/A static LIR_Opr operand_for_interval(Interval* interval);
0N/A static LIR_Opr calc_operand_for_interval(const Interval* interval);
0N/A LIR_Opr canonical_spill_opr(Interval* interval);
0N/A
0N/A LIR_Opr color_lir_opr(LIR_Opr opr, int id, LIR_OpVisitState::OprMode);
0N/A
0N/A // methods used for oop map computation
0N/A IntervalWalker* init_compute_oop_maps();
0N/A OopMap* compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site);
0N/A void compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op);
0N/A
0N/A // methods used for debug information computation
0N/A void init_compute_debug_info();
0N/A
0N/A MonitorValue* location_for_monitor_index(int monitor_index);
0N/A LocationValue* location_for_name(int name, Location::Type loc_type);
2742N/A void set_oop(OopMap* map, VMReg name) {
2742N/A if (map->legal_vm_reg_name(name)) {
2742N/A map->set_oop(name);
2742N/A } else {
2742N/A bailout("illegal oopMap register name");
2742N/A }
2742N/A }
0N/A
0N/A int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
0N/A int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
0N/A int append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values);
0N/A
1739N/A IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state);
0N/A void compute_debug_info(CodeEmitInfo* info, int op_id);
0N/A
0N/A void assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw);
0N/A void assign_reg_num();
0N/A
0N/A
0N/A // Phase 8: fpu stack allocation
0N/A // (Used only on x86 when fpu operands are present)
0N/A void allocate_fpu_stack();
0N/A
0N/A
0N/A // helper functions for printing state
0N/A#ifndef PRODUCT
0N/A static void print_bitmap(BitMap& bitmap);
0N/A void print_intervals(const char* label);
0N/A void print_lir(int level, const char* label, bool hir_valid = true);
0N/A#endif
0N/A
0N/A#ifdef ASSERT
0N/A // verification functions for allocation
0N/A // (check that all intervals have a correct register and that no registers are overwritten)
0N/A void verify();
0N/A void verify_intervals();
0N/A void verify_no_oops_in_fixed_intervals();
0N/A void verify_constants();
0N/A void verify_registers();
0N/A#endif
0N/A
0N/A public:
0N/A // creation
0N/A LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map);
0N/A
0N/A // main entry function: perform linear scan register allocation
0N/A void do_linear_scan();
0N/A
0N/A // accessors used by Compilation
0N/A int max_spills() const { return _max_spills; }
0N/A int num_calls() const { assert(_num_calls >= 0, "not set"); return _num_calls; }
0N/A
0N/A // entry functions for printing
0N/A#ifndef PRODUCT
0N/A static void print_statistics();
0N/A static void print_timers(double total);
0N/A#endif
0N/A};
0N/A
0N/A
0N/A// Helper class for ordering moves that are inserted at the same position in the LIR
0N/A// When moves between registers are inserted, it is important that the moves are
0N/A// ordered such that no register is overwritten. So moves from register to stack
0N/A// are processed prior to moves from stack to register. When moves have circular
0N/A// dependencies, a temporary stack slot is used to break the circle.
0N/A// The same logic is used in the LinearScanWalker and in LinearScan during resolve_data_flow
0N/A// and therefore factored out in a separate class
0N/Aclass MoveResolver: public StackObj {
0N/A private:
0N/A LinearScan* _allocator;
0N/A
0N/A LIR_List* _insert_list;
0N/A int _insert_idx;
0N/A LIR_InsertionBuffer _insertion_buffer; // buffer where moves are inserted
0N/A
0N/A IntervalList _mapping_from;
0N/A LIR_OprList _mapping_from_opr;
0N/A IntervalList _mapping_to;
0N/A bool _multiple_reads_allowed;
0N/A int _register_blocked[LinearScan::nof_regs];
0N/A
0N/A int register_blocked(int reg) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }
0N/A void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }
0N/A
0N/A void block_registers(Interval* it);
0N/A void unblock_registers(Interval* it);
0N/A bool save_to_process_move(Interval* from, Interval* to);
0N/A
0N/A void create_insertion_buffer(LIR_List* list);
0N/A void append_insertion_buffer();
0N/A void insert_move(Interval* from_interval, Interval* to_interval);
0N/A void insert_move(LIR_Opr from_opr, Interval* to_interval);
0N/A
0N/A DEBUG_ONLY(void verify_before_resolve();)
0N/A void resolve_mappings();
0N/A public:
0N/A MoveResolver(LinearScan* allocator);
0N/A
0N/A DEBUG_ONLY(void check_empty();)
0N/A void set_multiple_reads_allowed() { _multiple_reads_allowed = true; }
0N/A void set_insert_position(LIR_List* insert_list, int insert_idx);
0N/A void move_insert_position(LIR_List* insert_list, int insert_idx);
0N/A void add_mapping(Interval* from, Interval* to);
0N/A void add_mapping(LIR_Opr from, Interval* to);
0N/A void resolve_and_append_moves();
0N/A
0N/A LinearScan* allocator() { return _allocator; }
0N/A bool has_mappings() { return _mapping_from.length() > 0; }
0N/A};
0N/A
0N/A
0N/Aclass Range : public CompilationResourceObj {
0N/A friend class Interval;
0N/A
0N/A private:
0N/A static Range* _end; // sentinel (from == to == max_jint)
0N/A
0N/A int _from; // from (inclusive)
0N/A int _to; // to (exclusive)
0N/A Range* _next; // linear list of Ranges
0N/A
0N/A // used only by class Interval, so hide them
0N/A bool intersects(Range* r) const { return intersects_at(r) != -1; }
0N/A int intersects_at(Range* r) const;
0N/A
0N/A public:
0N/A Range(int from, int to, Range* next);
0N/A
1504N/A static void initialize(Arena* arena);
0N/A static Range* end() { return _end; }
0N/A
0N/A int from() const { return _from; }
0N/A int to() const { return _to; }
0N/A Range* next() const { return _next; }
0N/A void set_from(int from) { _from = from; }
0N/A void set_to(int to) { _to = to; }
0N/A void set_next(Range* next) { _next = next; }
0N/A
0N/A // for testing
0N/A void print(outputStream* out = tty) const PRODUCT_RETURN;
0N/A};
0N/A
0N/A
0N/A// Interval is an ordered list of disjoint ranges.
0N/A
0N/A// For pre-colored double word LIR_Oprs, one interval is created for
0N/A// the low word register and one is created for the hi word register.
0N/A// On Intel for FPU double registers only one interval is created. At
0N/A// all times assigned_reg contains the reg. number of the physical
0N/A// register.
0N/A
0N/A// For LIR_Opr in virtual registers a single interval can represent
0N/A// single and double word values. When a physical register is
0N/A// assigned to the interval, assigned_reg contains the
0N/A// phys. reg. number and for double word values assigned_regHi the
0N/A// phys. reg. number of the hi word if there is any. For spilled
0N/A// intervals assigned_reg contains the stack index. assigned_regHi is
0N/A// always -1.
0N/A
0N/Aclass Interval : public CompilationResourceObj {
0N/A private:
0N/A static Interval* _end; // sentinel (interval with only range Range::end())
0N/A
0N/A int _reg_num;
0N/A BasicType _type; // valid only for virtual registers
0N/A Range* _first; // sorted list of Ranges
0N/A intStack _use_pos_and_kinds; // sorted list of use-positions and their according use-kinds
0N/A
0N/A Range* _current; // interval iteration: the current Range
0N/A Interval* _next; // interval iteration: sorted list of Intervals (ends with sentinel)
0N/A IntervalState _state; // interval iteration: to which set belongs this interval
0N/A
0N/A
0N/A int _assigned_reg;
0N/A int _assigned_regHi;
0N/A
0N/A int _cached_to; // cached value: to of last range (-1: not cached)
0N/A LIR_Opr _cached_opr;
0N/A VMReg _cached_vm_reg;
0N/A
0N/A Interval* _split_parent; // the original interval where this interval is derived from
0N/A IntervalList _split_children; // list of all intervals that are split off from this interval (only available for split parents)
0N/A Interval* _current_split_child; // the current split child that has been active or inactive last (always stored in split parents)
0N/A
0N/A int _canonical_spill_slot; // the stack slot where all split parts of this interval are spilled to (always stored in split parents)
0N/A bool _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time
0N/A IntervalSpillState _spill_state; // for spill move optimization
0N/A int _spill_definition_pos; // position where the interval is defined (if defined only once)
0N/A Interval* _register_hint; // this interval should be in the same register as the hint interval
0N/A
0N/A int calc_to();
0N/A Interval* new_split_child();
0N/A public:
0N/A Interval(int reg_num);
0N/A
1504N/A static void initialize(Arena* arena);
0N/A static Interval* end() { return _end; }
0N/A
0N/A // accessors
0N/A int reg_num() const { return _reg_num; }
0N/A void set_reg_num(int r) { assert(_reg_num == -1, "cannot change reg_num"); _reg_num = r; }
0N/A BasicType type() const { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; }
0N/A void set_type(BasicType type) { assert(_reg_num < LIR_OprDesc::vreg_base || _type == T_ILLEGAL || _type == type, "overwriting existing type"); _type = type; }
0N/A
0N/A Range* first() const { return _first; }
0N/A int from() const { return _first->from(); }
0N/A int to() { if (_cached_to == -1) _cached_to = calc_to(); assert(_cached_to == calc_to(), "invalid cached value"); return _cached_to; }
0N/A int num_use_positions() const { return _use_pos_and_kinds.length() / 2; }
0N/A
0N/A Interval* next() const { return _next; }
0N/A Interval** next_addr() { return &_next; }
0N/A void set_next(Interval* next) { _next = next; }
0N/A
0N/A int assigned_reg() const { return _assigned_reg; }
0N/A int assigned_regHi() const { return _assigned_regHi; }
0N/A void assign_reg(int reg) { _assigned_reg = reg; _assigned_regHi = LinearScan::any_reg; }
0N/A void assign_reg(int reg,int regHi) { _assigned_reg = reg; _assigned_regHi = regHi; }
0N/A
0N/A Interval* register_hint(bool search_split_child = true) const; // calculation needed
0N/A void set_register_hint(Interval* i) { _register_hint = i; }
0N/A
0N/A int state() const { return _state; }
0N/A void set_state(IntervalState s) { _state = s; }
0N/A
0N/A // access to split parent and split children
0N/A bool is_split_parent() const { return _split_parent == this; }
0N/A bool is_split_child() const { return _split_parent != this; }
0N/A Interval* split_parent() const { assert(_split_parent->is_split_parent(), "must be"); return _split_parent; }
0N/A Interval* split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode);
0N/A Interval* split_child_before_op_id(int op_id);
0N/A bool split_child_covers(int op_id, LIR_OpVisitState::OprMode mode);
0N/A DEBUG_ONLY(void check_split_children();)
0N/A
0N/A // information stored in split parent, but available for all children
0N/A int canonical_spill_slot() const { return split_parent()->_canonical_spill_slot; }
0N/A void set_canonical_spill_slot(int slot) { assert(split_parent()->_canonical_spill_slot == -1, "overwriting existing value"); split_parent()->_canonical_spill_slot = slot; }
0N/A Interval* current_split_child() const { return split_parent()->_current_split_child; }
0N/A void make_current_split_child() { split_parent()->_current_split_child = this; }
0N/A
0N/A bool insert_move_when_activated() const { return _insert_move_when_activated; }
0N/A void set_insert_move_when_activated(bool b) { _insert_move_when_activated = b; }
0N/A
0N/A // for spill optimization
0N/A IntervalSpillState spill_state() const { return split_parent()->_spill_state; }
0N/A int spill_definition_pos() const { return split_parent()->_spill_definition_pos; }
0N/A void set_spill_state(IntervalSpillState state) { assert(state >= spill_state(), "state cannot decrease"); split_parent()->_spill_state = state; }
0N/A void set_spill_definition_pos(int pos) { assert(spill_definition_pos() == -1, "cannot set the position twice"); split_parent()->_spill_definition_pos = pos; }
0N/A // returns true if this interval has a shadow copy on the stack that is always correct
0N/A bool always_in_memory() const { return split_parent()->_spill_state == storeAtDefinition || split_parent()->_spill_state == startInMemory; }
0N/A
0N/A // caching of values that take time to compute and are used multiple times
0N/A LIR_Opr cached_opr() const { return _cached_opr; }
0N/A VMReg cached_vm_reg() const { return _cached_vm_reg; }
0N/A void set_cached_opr(LIR_Opr opr) { _cached_opr = opr; }
0N/A void set_cached_vm_reg(VMReg reg) { _cached_vm_reg = reg; }
0N/A
0N/A // access to use positions
0N/A int first_usage(IntervalUseKind min_use_kind) const; // id of the first operation requiring this interval in a register
0N/A int next_usage(IntervalUseKind min_use_kind, int from) const; // id of next usage seen from the given position
0N/A int next_usage_exact(IntervalUseKind exact_use_kind, int from) const;
0N/A int previous_usage(IntervalUseKind min_use_kind, int from) const;
0N/A
0N/A // manipulating intervals
0N/A void add_use_pos(int pos, IntervalUseKind use_kind);
0N/A void add_range(int from, int to);
0N/A Interval* split(int split_pos);
0N/A Interval* split_from_start(int split_pos);
0N/A void remove_first_use_pos() { _use_pos_and_kinds.truncate(_use_pos_and_kinds.length() - 2); }
0N/A
0N/A // test intersection
0N/A bool covers(int op_id, LIR_OpVisitState::OprMode mode) const;
0N/A bool has_hole_between(int from, int to);
0N/A bool intersects(Interval* i) const { return _first->intersects(i->_first); }
0N/A int intersects_at(Interval* i) const { return _first->intersects_at(i->_first); }
0N/A
0N/A // range iteration
0N/A void rewind_range() { _current = _first; }
0N/A void next_range() { assert(this != _end, "not allowed on sentinel"); _current = _current->next(); }
0N/A int current_from() const { return _current->from(); }
0N/A int current_to() const { return _current->to(); }
0N/A bool current_at_end() const { return _current == Range::end(); }
0N/A bool current_intersects(Interval* it) { return _current->intersects(it->_current); };
0N/A int current_intersects_at(Interval* it) { return _current->intersects_at(it->_current); };
0N/A
0N/A // printing
0N/A void print(outputStream* out = tty) const PRODUCT_RETURN;
0N/A};
0N/A
0N/A
0N/Aclass IntervalWalker : public CompilationResourceObj {
0N/A protected:
0N/A Compilation* _compilation;
0N/A LinearScan* _allocator;
0N/A
0N/A Interval* _unhandled_first[nofKinds]; // sorted list of intervals, not life before the current position
0N/A Interval* _active_first [nofKinds]; // sorted list of intervals, life at the current position
0N/A Interval* _inactive_first [nofKinds]; // sorted list of intervals, intervals in a life time hole at the current position
0N/A
0N/A Interval* _current; // the current interval coming from unhandled list
0N/A int _current_position; // the current position (intercept point through the intervals)
0N/A IntervalKind _current_kind; // and whether it is fixed_kind or any_kind.
0N/A
0N/A
0N/A Compilation* compilation() const { return _compilation; }
0N/A LinearScan* allocator() const { return _allocator; }
0N/A
0N/A // unified bailout support
0N/A void bailout(const char* msg) const { compilation()->bailout(msg); }
0N/A bool bailed_out() const { return compilation()->bailed_out(); }
0N/A
0N/A void check_bounds(IntervalKind kind) { assert(kind >= fixedKind && kind <= anyKind, "invalid interval_kind"); }
0N/A
0N/A Interval** unhandled_first_addr(IntervalKind kind) { check_bounds(kind); return &_unhandled_first[kind]; }
0N/A Interval** active_first_addr(IntervalKind kind) { check_bounds(kind); return &_active_first[kind]; }
0N/A Interval** inactive_first_addr(IntervalKind kind) { check_bounds(kind); return &_inactive_first[kind]; }
0N/A
0N/A void append_unsorted(Interval** first, Interval* interval);
0N/A void append_sorted(Interval** first, Interval* interval);
0N/A void append_to_unhandled(Interval** list, Interval* interval);
0N/A
0N/A bool remove_from_list(Interval** list, Interval* i);
0N/A void remove_from_list(Interval* i);
0N/A
0N/A void next_interval();
0N/A Interval* current() const { return _current; }
0N/A IntervalKind current_kind() const { return _current_kind; }
0N/A
0N/A void walk_to(IntervalState state, int from);
0N/A
0N/A // activate_current() is called when an unhandled interval becomes active (in current(), current_kind()).
0N/A // Return false if current() should not be moved the the active interval list.
0N/A // It is safe to append current to any interval list but the unhandled list.
0N/A virtual bool activate_current() { return true; }
0N/A
0N/A // interval_moved() is called whenever an interval moves from one interval list to another.
0N/A // In the implementation of this method it is prohibited to move the interval to any list.
0N/A virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);
0N/A
0N/A public:
0N/A IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
0N/A
0N/A Interval* unhandled_first(IntervalKind kind) { check_bounds(kind); return _unhandled_first[kind]; }
0N/A Interval* active_first(IntervalKind kind) { check_bounds(kind); return _active_first[kind]; }
0N/A Interval* inactive_first(IntervalKind kind) { check_bounds(kind); return _inactive_first[kind]; }
0N/A
0N/A // active contains the intervals that are live after the lir_op
0N/A void walk_to(int lir_op_id);
0N/A // active contains the intervals that are live before the lir_op
0N/A void walk_before(int lir_op_id) { walk_to(lir_op_id-1); }
0N/A // walk through all intervals
0N/A void walk() { walk_to(max_jint); }
0N/A
0N/A int current_position() { return _current_position; }
0N/A};
0N/A
0N/A
0N/A// The actual linear scan register allocator
0N/Aclass LinearScanWalker : public IntervalWalker {
0N/A enum {
0N/A any_reg = LinearScan::any_reg
0N/A };
0N/A
0N/A private:
0N/A int _first_reg; // the reg. number of the first phys. register
0N/A int _last_reg; // the reg. nmber of the last phys. register
0N/A int _num_phys_regs; // required by current interval
0N/A bool _adjacent_regs; // have lo/hi words of phys. regs be adjacent
0N/A
0N/A int _use_pos[LinearScan::nof_regs];
0N/A int _block_pos[LinearScan::nof_regs];
0N/A IntervalList* _spill_intervals[LinearScan::nof_regs];
0N/A
0N/A MoveResolver _move_resolver; // for ordering spill moves
0N/A
0N/A // accessors mapped to same functions in class LinearScan
0N/A int block_count() const { return allocator()->block_count(); }
0N/A BlockBegin* block_at(int idx) const { return allocator()->block_at(idx); }
0N/A BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); }
0N/A
0N/A void init_use_lists(bool only_process_use_pos);
0N/A void exclude_from_use(int reg);
0N/A void exclude_from_use(Interval* i);
0N/A void set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos);
0N/A void set_use_pos(Interval* i, int use_pos, bool only_process_use_pos);
0N/A void set_block_pos(int reg, Interval* i, int block_pos);
0N/A void set_block_pos(Interval* i, int block_pos);
0N/A
0N/A void free_exclude_active_fixed();
0N/A void free_exclude_active_any();
0N/A void free_collect_inactive_fixed(Interval* cur);
0N/A void free_collect_inactive_any(Interval* cur);
0N/A void free_collect_unhandled(IntervalKind kind, Interval* cur);
0N/A void spill_exclude_active_fixed();
0N/A void spill_block_unhandled_fixed(Interval* cur);
0N/A void spill_block_inactive_fixed(Interval* cur);
0N/A void spill_collect_active_any();
0N/A void spill_collect_inactive_any(Interval* cur);
0N/A
0N/A void insert_move(int op_id, Interval* src_it, Interval* dst_it);
0N/A int find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos);
0N/A int find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization);
0N/A void split_before_usage(Interval* it, int min_split_pos, int max_split_pos);
0N/A void split_for_spilling(Interval* it);
0N/A void split_stack_interval(Interval* it);
0N/A void split_when_partial_register_available(Interval* it, int register_available_until);
0N/A void split_and_spill_interval(Interval* it);
0N/A
0N/A int find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
0N/A int find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);
0N/A bool alloc_free_reg(Interval* cur);
0N/A
0N/A int find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
0N/A int find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);
0N/A void split_and_spill_intersecting_intervals(int reg, int regHi);
0N/A void alloc_locked_reg(Interval* cur);
0N/A
0N/A bool no_allocation_possible(Interval* cur);
0N/A void update_phys_reg_range(bool requires_cpu_register);
0N/A void init_vars_for_alloc(Interval* cur);
0N/A bool pd_init_regs_for_alloc(Interval* cur);
0N/A
0N/A void combine_spilled_intervals(Interval* cur);
0N/A bool is_move(LIR_Op* op, Interval* from, Interval* to);
0N/A
0N/A bool activate_current();
0N/A
0N/A public:
0N/A LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
0N/A
0N/A // must be called when all intervals are allocated
0N/A void finish_allocation() { _move_resolver.resolve_and_append_moves(); }
0N/A};
0N/A
0N/A
0N/A
0N/A/*
0N/AWhen a block has more than one predecessor, and all predecessors end with
0N/Athe same sequence of move-instructions, than this moves can be placed once
0N/Aat the beginning of the block instead of multiple times in the predecessors.
0N/A
0N/ASimilarly, when a block has more than one successor, then equal sequences of
0N/Amoves at the beginning of the successors can be placed once at the end of
0N/Athe block. But because the moves must be inserted before all branch
0N/Ainstructions, this works only when there is exactly one conditional branch
0N/Aat the end of the block (because the moves must be inserted before all
0N/Abranches, but after all compares).
0N/A
0N/AThis optimization affects all kind of moves (reg->reg, reg->stack and
0N/Astack->reg). Because this optimization works best when a block contains only
0N/Afew moves, it has a huge impact on the number of blocks that are totally
0N/Aempty.
0N/A*/
0N/Aclass EdgeMoveOptimizer : public StackObj {
0N/A private:
0N/A // the class maintains a list with all lir-instruction-list of the
0N/A // successors (predecessors) and the current index into the lir-lists
0N/A LIR_OpListStack _edge_instructions;
0N/A intStack _edge_instructions_idx;
0N/A
0N/A void init_instructions();
0N/A void append_instructions(LIR_OpList* instructions, int instructions_idx);
0N/A LIR_Op* instruction_at(int edge);
0N/A void remove_cur_instruction(int edge, bool decrement_index);
0N/A
0N/A bool operations_different(LIR_Op* op1, LIR_Op* op2);
0N/A
0N/A void optimize_moves_at_block_end(BlockBegin* cur);
0N/A void optimize_moves_at_block_begin(BlockBegin* cur);
0N/A
0N/A EdgeMoveOptimizer();
0N/A
0N/A public:
0N/A static void optimize(BlockList* code);
0N/A};
0N/A
0N/A
0N/A
0N/Aclass ControlFlowOptimizer : public StackObj {
0N/A private:
0N/A BlockList _original_preds;
0N/A
0N/A enum {
0N/A ShortLoopSize = 5
0N/A };
0N/A void reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx);
0N/A void reorder_short_loops(BlockList* code);
0N/A
0N/A bool can_delete_block(BlockBegin* cur);
0N/A void substitute_branch_target(BlockBegin* cur, BlockBegin* target_from, BlockBegin* target_to);
0N/A void delete_empty_blocks(BlockList* code);
0N/A
0N/A void delete_unnecessary_jumps(BlockList* code);
0N/A void delete_jumps_to_return(BlockList* code);
0N/A
0N/A DEBUG_ONLY(void verify(BlockList* code);)
0N/A
0N/A ControlFlowOptimizer();
0N/A public:
0N/A static void optimize(BlockList* code);
0N/A};
0N/A
0N/A
0N/A#ifndef PRODUCT
0N/A
0N/A// Helper class for collecting statistics of LinearScan
0N/Aclass LinearScanStatistic : public StackObj {
0N/A public:
0N/A enum Counter {
0N/A // general counters
0N/A counter_method,
0N/A counter_fpu_method,
0N/A counter_loop_method,
0N/A counter_exception_method,
0N/A counter_loop,
0N/A counter_block,
0N/A counter_loop_block,
0N/A counter_exception_block,
0N/A counter_interval,
0N/A counter_fixed_interval,
0N/A counter_range,
0N/A counter_fixed_range,
0N/A counter_use_pos,
0N/A counter_fixed_use_pos,
0N/A counter_spill_slots,
0N/A blank_line_1,
0N/A
0N/A // counter for classes of lir instructions
0N/A counter_instruction,
0N/A counter_label,
0N/A counter_entry,
0N/A counter_return,
0N/A counter_call,
0N/A counter_move,
0N/A counter_cmp,
0N/A counter_cond_branch,
0N/A counter_uncond_branch,
0N/A counter_stub_branch,
0N/A counter_alu,
0N/A counter_alloc,
0N/A counter_sync,
0N/A counter_throw,
0N/A counter_unwind,
0N/A counter_typecheck,
0N/A counter_fpu_stack,
0N/A counter_misc_inst,
0N/A counter_other_inst,
0N/A blank_line_2,
0N/A
0N/A // counter for different types of moves
0N/A counter_move_total,
0N/A counter_move_reg_reg,
0N/A counter_move_reg_stack,
0N/A counter_move_stack_reg,
0N/A counter_move_stack_stack,
0N/A counter_move_reg_mem,
0N/A counter_move_mem_reg,
0N/A counter_move_const_any,
0N/A
0N/A number_of_counters,
0N/A invalid_counter = -1
0N/A };
0N/A
0N/A private:
0N/A int _counters_sum[number_of_counters];
0N/A int _counters_max[number_of_counters];
0N/A
0N/A void inc_counter(Counter idx, int value = 1) { _counters_sum[idx] += value; }
0N/A
0N/A const char* counter_name(int counter_idx);
0N/A Counter base_counter(int counter_idx);
0N/A
0N/A void sum_up(LinearScanStatistic &method_statistic);
0N/A void collect(LinearScan* allocator);
0N/A
0N/A public:
0N/A LinearScanStatistic();
0N/A void print(const char* title);
0N/A static void compute(LinearScan* allocator, LinearScanStatistic &global_statistic);
0N/A};
0N/A
0N/A
0N/A// Helper class for collecting compilation time of LinearScan
0N/Aclass LinearScanTimers : public StackObj {
0N/A public:
0N/A enum Timer {
0N/A timer_do_nothing,
0N/A timer_number_instructions,
0N/A timer_compute_local_live_sets,
0N/A timer_compute_global_live_sets,
0N/A timer_build_intervals,
0N/A timer_sort_intervals_before,
0N/A timer_allocate_registers,
0N/A timer_resolve_data_flow,
0N/A timer_sort_intervals_after,
0N/A timer_eliminate_spill_moves,
0N/A timer_assign_reg_num,
0N/A timer_allocate_fpu_stack,
0N/A timer_optimize_lir,
0N/A
0N/A number_of_timers
0N/A };
0N/A
0N/A private:
0N/A elapsedTimer _timers[number_of_timers];
0N/A const char* timer_name(int idx);
0N/A
0N/A public:
0N/A LinearScanTimers();
0N/A
0N/A void begin_method(); // called for each method when register allocation starts
0N/A void end_method(LinearScan* allocator); // called for each method when register allocation completed
0N/A void print(double total_time); // called before termination of VM to print global summary
0N/A
0N/A elapsedTimer* timer(int idx) { return &(_timers[idx]); }
0N/A};
0N/A
0N/A
0N/A#endif // ifndef PRODUCT
0N/A
0N/A
0N/A// Pick up platform-dependent implementation details
1879N/A#ifdef TARGET_ARCH_x86
1879N/A# include "c1_LinearScan_x86.hpp"
1879N/A#endif
1879N/A#ifdef TARGET_ARCH_sparc
1879N/A# include "c1_LinearScan_sparc.hpp"
1879N/A#endif
2073N/A#ifdef TARGET_ARCH_arm
2073N/A# include "c1_LinearScan_arm.hpp"
2073N/A#endif
2073N/A#ifdef TARGET_ARCH_ppc
2073N/A# include "c1_LinearScan_ppc.hpp"
2073N/A#endif
1879N/A
1879N/A
1879N/A#endif // SHARE_VM_C1_C1_LINEARSCAN_HPP