c1_LinearScan.hpp revision 1739
2362N/A * or visit www.oracle.com if you need additional information or have any
0N/Aclass DebugInfoCache;
0N/Aclass FpuStackAllocator;
0N/Aclass IRScopeDebugInfo;
0N/Aclass IntervalWalker;
0N/Aclass LIRGenerator;
0N/Aclass LinearScan;
0N/Aclass MoveResolver;
0N/Aenum IntervalUseKind {
0N/Aenum IntervalKind {
0N/Aenum IntervalState {
enum IntervalSpillState {
// Note: two consecutive definitions are treated as one (e.g. consecutive move and add because of two-operand LIR form)
storeAtDefinition, // the interval should be stored immediately after its definition because otherwise
startInMemory, // the interval starts in memory (e.g. method parameter), so a store is never necessary
noOptimization // the interval has more then one definition (e.g. resulting from phi moves), so stores to memory are not optimized
for (LIR_OpVisitState::OprMode mode = LIR_OpVisitState::firstMode; mode < LIR_OpVisitState::numModes; mode = (LIR_OpVisitState::OprMode)(mode + 1))
friend class Interval;
friend class IntervalWalker;
friend class LinearScanWalker;
friend class FpuStackAllocator;
friend class MoveResolver;
friend class LinearScanStatistic;
friend class LinearScanTimers;
friend class RegisterVerifier;
BlockList _cached_blocks; // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged)
int _num_virtual_regs; // number of virtual registers (without new registers introduced because of splitting intervals)
bool _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary)
int _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value
IntervalList* _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split
BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction
int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); }
BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); }
bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
#ifdef X86
bool use_fpu_stack_allocation() const { return false; }
int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); }
bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
void propagate_spill_slots();
void eliminate_spill_moves();
void number_instructions();
void compute_local_live_sets();
void compute_global_live_sets();
void build_intervals();
void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i));
void sort_intervals_before_allocation();
void sort_intervals_after_allocation();
void allocate_registers();
void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
void resolve_data_flow();
void resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver);
void resolve_exception_handlers();
void init_compute_debug_info();
IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state);
void assign_reg_num();
void allocate_fpu_stack();
#ifndef PRODUCT
#ifdef ASSERT
void verify();
void verify_intervals();
void verify_constants();
void verify_registers();
void do_linear_scan();
#ifndef PRODUCT
static void print_statistics();
int _insert_idx;
bool _multiple_reads_allowed;
int register_blocked(int reg) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }
void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }
void append_insertion_buffer();
void resolve_mappings();
void resolve_and_append_moves();
friend class Interval;
int _reg_num;
int _assigned_reg;
int _assigned_regHi;
IntervalList _split_children; // list of all intervals that are split off from this interval (only available for split parents)
Interval* _current_split_child; // the current split child that has been active or inactive last (always stored in split parents)
int _canonical_spill_slot; // the stack slot where all split parts of this interval are spilled to (always stored in split parents)
bool _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time
int calc_to();
BasicType type() const { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; }
void set_type(BasicType type) { assert(_reg_num < LIR_OprDesc::vreg_base || _type == T_ILLEGAL || _type == type, "overwriting existing type"); _type = type; }
int to() { if (_cached_to == -1) _cached_to = calc_to(); assert(_cached_to == calc_to(), "invalid cached value"); return _cached_to; }
Interval* split_parent() const { assert(_split_parent->is_split_parent(), "must be"); return _split_parent; }
void set_canonical_spill_slot(int slot) { assert(split_parent()->_canonical_spill_slot == -1, "overwriting existing value"); split_parent()->_canonical_spill_slot = slot; }
void set_spill_state(IntervalSpillState state) { assert(state >= spill_state(), "state cannot decrease"); split_parent()->_spill_state = state; }
void set_spill_definition_pos(int pos) { assert(spill_definition_pos() == -1, "cannot set the position twice"); split_parent()->_spill_definition_pos = pos; }
bool always_in_memory() const { return split_parent()->_spill_state == storeAtDefinition || split_parent()->_spill_state == startInMemory; }
int first_usage(IntervalUseKind min_use_kind) const; // id of the first operation requiring this interval in a register
int next_usage(IntervalUseKind min_use_kind, int from) const; // id of next usage seen from the given position
Interval* _unhandled_first[nofKinds]; // sorted list of intervals, not life before the current position
Interval* _inactive_first [nofKinds]; // sorted list of intervals, intervals in a life time hole at the current position
void check_bounds(IntervalKind kind) { assert(kind >= fixedKind && kind <= anyKind, "invalid interval_kind"); }
Interval** unhandled_first_addr(IntervalKind kind) { check_bounds(kind); return &_unhandled_first[kind]; }
Interval** active_first_addr(IntervalKind kind) { check_bounds(kind); return &_active_first[kind]; }
Interval** inactive_first_addr(IntervalKind kind) { check_bounds(kind); return &_inactive_first[kind]; }
void next_interval();
// activate_current() is called when an unhandled interval becomes active (in current(), current_kind()).
virtual bool activate_current() { return true; }
virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);
IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); }
void free_exclude_active_fixed();
void free_exclude_active_any();
void spill_exclude_active_fixed();
void spill_collect_active_any();
int find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization);
int find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
int find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
bool activate_current();
LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
void init_instructions();
#ifndef PRODUCT
enum Counter {
enum Timer {
void end_method(LinearScan* allocator); // called for each method when register allocation completed
# include "incls/_c1_LinearScan_pd.hpp.incl"