Lines Matching refs:to

16  * 2 along with this work; if not, write to the Free Software Foundation,
101 noOptimization // the interval has more then one definition (e.g. resulting from phi moves), so stores to memory are not optimized
116 // Only the small interface to the rest of the compiler is public
145 int _max_spills; // number of stack slots used for intervals allocated to memory
148 IntervalList _intervals; // mapping from register number to interval
151 bool _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted
153 LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node
154 BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction
159 // cached debug info to prevent multiple creation of same object
179 // access to block list (sorted in linear scan order)
199 // access to interval list
205 // access to LIR_Ops and Blocks indexed by op_id
217 // functions for converting LIR-Operands to register numbers
242 void copy_register_flags(Interval* from, Interval* to);
250 // spill move optimization: eliminate moves from register to stack if
251 // stack slot is known to be correct
268 // Phase 3: perform a backward dataflow analysis to compute global live sets
277 void add_use (Value value, int from, int to, IntervalUseKind use_kind);
280 void add_use (LIR_Opr opr, int from, int to, IntervalUseKind use_kind);
284 void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type);
288 // to add platform dependent behaviour for some operations.
332 // Phase 7: assign register numbers back to LIR
417 // ordered such that no register is overwritten. So moves from register to stack
418 // are processed prior to moves from stack to register. When moves have circular
419 // dependencies, a temporary stack slot is used to break the circle.
441 bool save_to_process_move(Interval* from, Interval* to);
457 void add_mapping(Interval* from, Interval* to);
458 void add_mapping(LIR_Opr from, Interval* to);
470 static Range* _end; // sentinel (from == to == max_jint)
473 int _to; // to (exclusive)
481 Range(int from, int to, Range* next);
487 int to() const { return _to; }
490 void set_to(int to) { _to = to; }
508 // assigned to the interval, assigned_reg contains the
525 IntervalState _state; // interval iteration: to which set belongs this interval
531 int _cached_to; // cached value: to of last range (-1: not cached)
539 int _canonical_spill_slot; // the stack slot where all split parts of this interval are spilled to (always stored in split parents)
561 int to() { if (_cached_to == -1) _cached_to = calc_to(); assert(_cached_to == calc_to(), "invalid cached value"); return _cached_to; }
579 // access to split parent and split children
605 // caching of values that take time to compute and are used multiple times
611 // access to use positions
619 void add_range(int from, int to);
626 bool has_hole_between(int from, int to);
634 int current_to() const { return _current->to(); }
686 // It is safe to append current to any interval list but the unhandled list.
689 // interval_moved() is called whenever an interval moves from one interval list to another.
690 // In the implementation of this method it is prohibited to move the interval to any list.
691 virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);
729 // accessors mapped to same functions in class LinearScan
777 bool is_move(LIR_Op* op, Interval* from, Interval* to);
966 void print(double total_time); // called before termination of VM to print global summary