Lines Matching refs:at

287   assert(_intervals.at(reg_num) == NULL, "overwriting exisiting interval");
351 assert(false, "other states not allowed at this time");
364 // at the definition of the interval -> move write to memory out of loop
365 // by storing at definitin of the interval
369 // reason to store the interval at the definition
377 // memory at the definition
390 assert(false, "other states not allowed at this time");
422 assert(temp->spill_definition_pos() <= temp->from() + 2, "only intervals defined once at their start-pos can be optimized");
424 TRACE_LINEAR_SCAN(4, tty->print_cr("interval %d (from %d to %d) must be stored at %d", temp->reg_num(), temp->from(), temp->to(), temp->spill_definition_pos()));
440 LIR_Op* op = instructions->at(j);
477 TRACE_LINEAR_SCAN(4, tty->print_cr("inserting move after definition of interval %d to stack slot %d at op_id %d", interval->reg_num(), interval->canonical_spill_slot() - LinearScan::nof_regs, op_id));
526 LIR_Op* op = instructions->at(j);
561 if (!live_kill.at(reg)) {
589 // Phi functions at the begin of an exception handler are
590 // implicitly defined (= killed) at the beginning of the block.
600 assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label");
602 LIR_Op* op = instructions->at(j);
625 if (!live_kill.at(reg)) {
627 TRACE_LINEAR_SCAN(4, tty->print_cr(" Setting live_gen for register %d at instruction %d", reg, op->id()));
636 // fixed intervals are never live at block boundaries, so
643 assert(live_kill.at(reg), "using fixed register that is not defined in this block");
647 assert(live_kill.at(reg), "using fixed register that is not defined in this block");
680 // fixed intervals are never live at block boundaries, so
713 // fixed intervals are never live at block boundaries, so
830 // check that fixed intervals are not live at block boundaries
831 // (live set must be empty at fixed intervals)
835 assert(block->live_in().at(j) == false, "live_in set of fixed register must be empty");
836 assert(block->live_out().at(j) == false, "live_out set of fixed register must be empty");
837 assert(block->live_gen().at(j) == false, "live_gen set of fixed register must be empty");
853 if (ir()->start()->live_in().at(i)) {
859 if (block->live_gen().at(i)) {
862 if (block->live_kill().at(i)) {
975 TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: def of reg %d at %d occurs without use", reg_num, def_pos));
988 TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: dead value %d at %d in live intervals", reg_num, def_pos));
1242 TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", move->id(), from->reg_num(), to->reg_num()));
1259 TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num()));
1327 assert(block_from == instructions->at(0)->id(), "must be");
1328 assert(block_to == instructions->at(instructions->length() - 1)->id(), "must be");
1330 // Update intervals for registers live at the end of this block;
1334 assert(live.at(number), "should not stop here otherwise");
1354 assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label");
1356 LIR_Op* op = instructions->at(j);
1400 // to a call site, the value would be in a register at the call otherwise)
1453 Interval* it = intervals->at(i);
1468 if (interval_at(i) == intervals->at(j)) {
1478 if (interval_at(i) == intervals->at(j)) {
1509 v = _sorted_intervals->at(i);
1547 if (unsorted_list->at(unsorted_idx) != NULL) {
1556 Interval* cur_interval = unsorted_list->at(unsorted_idx);
1568 for (j = sorted_idx - 1; j >= 0 && cur_from < sorted_list->at(j)->from(); j--) {
1569 sorted_list->at_put(j + 1, sorted_list->at(j));
1609 if (new_idx >= new_len || (old_idx < old_len && old_list->at(old_idx)->from() <= new_list->at(new_idx)->from())) {
1610 combined_list->at_put(old_idx + new_idx, old_list->at(old_idx));
1613 combined_list->at_put(old_idx + new_idx, new_list->at(new_idx));
1656 // (insert moves at edges between blocks if intervals have been split)
1706 assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge");
1721 TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at end of from_block B%d", from_block->block_id()));
1734 TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at beginning of to_block B%d", to_block->block_id()));
1736 assert(from_block->lir()->instructions_list()->at(0)->as_OpLabel() != NULL, "block does not start with a label");
1752 // insert necessary moves (spilling or reloading) at edges between blocks if interval has been split
1768 assert(instructions->at(0)->code() == lir_label, "block must start with label");
1778 if (!block_completed.at(pred->linear_scan_number()) && !block_completed.at(sux->linear_scan_number())) {
1782 // directly resolve between pred and sux (without looking at the empty block between)
1795 if (!block_completed.at(i)) {
1804 if (!already_resolved.at(to_block->linear_scan_number())) {
1838 // * the interval would be on the fpu stack at the begin of the exception handler
1907 // interval at the throwing instruction must be searched using the operands
1920 // search split child at the throwing op_id
1927 // search split child at the throwing op_id
1989 assert(visitor.no_operands(ops->at(0)), "first operation must always be a label");
1991 LIR_Op* op = ops->at(j);
2173 // check if spill moves could have been appended at the end of this block, but
2178 if (block->live_out().at(opr->vreg_number())) {
2201 assert(op_id == -1 || !is_block_begin(op_id), "holes at begin of block may also result from control flow");
2222 ScopeValue* value = values->at(i);
2237 MonitorValue* value = values->at(i);
2285 assert_equal(d1->locals()->at(i), d2->locals()->at(i));
2295 assert_equal(d1->expressions()->at(i), d2->expressions()->at(i));
2305 assert_equal(d1->monitors()->at(i), d2->monitors()->at(i));
2339 assert(stack_end >= -Bytecodes::depth(code), "must have non-empty expression stack at if bytecode");
2366 TRACE_LINEAR_SCAN(3, tty->print_cr("creating oop map at op_id %d", op->id()));
2368 // walk before the current operation -> intervals that start at
2396 // start or end at the current operation are not included in the
2398 // moves, any intervals which end at this instruction are included
2403 // caller-save registers must not be included into oop-maps at calls
2404 assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
2572 ScopeValue* sv = _scope_value_cache.at(cache_idx);
2590 ScopeValue* sv = _scope_value_cache.at(cache_idx);
2800 // and so the wrong operand would be returned (spill moves at block boundaries are not
2804 if (block->live_out().at(opr->vreg_number())) {
2815 assert(!has_call(op_id) || opr->is_stack() || !is_caller_save(reg_num(opr)), "can not have caller-save register operands at calls");
2905 TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id));
2930 LIR_Op* op = instructions->at(j);
3007 LIR_Op* op = instructions->at(j);
3114 if (b.at(i)) tty->print("%d ", i);
3274 LIR_Op* op = instructions->at(j);
3327 // oop-maps at calls do not contain registers, so check is not needed
3402 IntervalList* state_for_block(BlockBegin* block) { return _saved_states.at(block->block_id()); }
3439 LIR_Opr opr = args->at(n);
3457 BlockBegin* block = _work_list.at(0);
3474 if (input_state->at(i) != NULL) {
3475 tty->print(" %4d", input_state->at(i)->reg_num());
3514 if (input_state->at(i) != saved_state->at(i)) {
3517 if (saved_state->at(i) != NULL) {
3558 } else if (input_state->at(reg) != NULL) {
3568 if (input_state->at(reg) != interval) {
3582 LIR_Op* op = ops->at(i);
3610 // invalidate all caller save registers at calls
3708 assert(_mapping_from.at(i) == NULL || _mapping_from.at(i) != _mapping_from.at(j), "cannot read from same interval twice");
3715 assert(_mapping_to.at(i) != _mapping_to.at(j), "cannot write to same interval twice");
3724 Interval* it = _mapping_from.at(i);
3726 assert(!used_regs.at(it->assigned_reg()), "cannot read from same register twice");
3730 assert(!used_regs.at(it->assigned_regHi()), "cannot read from same register twice");
3739 Interval* it = _mapping_to.at(i);
3740 assert(!used_regs.at(it->assigned_reg()), "cannot write to same register twice");
3744 assert(!used_regs.at(it->assigned_regHi()), "cannot write to same register twice");
3751 Interval* it = _mapping_from.at(i);
3757 Interval* it = _mapping_to.at(i);
3758 assert(!used_regs.at(it->assigned_reg()) || it->assigned_reg() == _mapping_from.at(i)->assigned_reg(), "stack slots used in _mapping_from must be disjoint to _mapping_to");
3875 Interval* from_interval = _mapping_from.at(i);
3886 Interval* from_interval = _mapping_from.at(i);
3887 Interval* to_interval = _mapping_to.at(i);
3895 insert_move(_mapping_from_opr.at(i), to_interval);
3915 Interval* from_interval = _mapping_from.at(spill_candidate);
4111 Interval* i1 = _split_children.at(i);
4118 Interval* i2 = _split_children.at(j);
4125 assert(i2->from() < i1->from(), "intervals start at same op_id");
4149 Interval* cur = _register_hint->_split_children.at(i);
4179 Interval* cur = _split_children.at(i);
4183 _split_children.at_put(i, _split_children.at(0));
4195 Interval* tmp = _split_children.at(i);
4224 Interval* cur = parent->_split_children.at(i);
4248 Interval* cur = _split_children.at(i);
4263 if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4264 return _use_pos_and_kinds.at(i);
4274 if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4275 return _use_pos_and_kinds.at(i);
4285 if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) == exact_use_kind) {
4286 return _use_pos_and_kinds.at(i);
4297 if (_use_pos_and_kinds.at(i) > from) {
4300 if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) {
4301 prev = _use_pos_and_kinds.at(i);
4316 assert(pos <= _use_pos_and_kinds.at(i), "already added a use-position with lower position");
4317 assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4319 assert(_use_pos_and_kinds.at(i) < _use_pos_and_kinds.at(i - 2), "not sorted descending");
4327 if (len == 0 || _use_pos_and_kinds.at(len - 2) > pos) {
4330 } else if (_use_pos_and_kinds.at(len - 1) < use_kind) {
4331 assert(_use_pos_and_kinds.at(len - 2) == pos, "list not sorted correctly");
4339 assert(first() == Range::end() || to < first()->next()->from(), "not inserting at begin of interval");
4340 assert(from <= first()->to(), "not inserting at begin of interval");
4363 assert(is_split_parent(), "list must be initialized at first split");
4373 // split this interval at the specified position and return
4414 while (start_idx >= 0 && _use_pos_and_kinds.at(start_idx) < split_pos) {
4421 new_use_pos_and_kinds.append(_use_pos_and_kinds.at(i));
4434 assert(_use_pos_and_kinds.at(i) < split_pos, "must be");
4435 assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4438 assert(result->_use_pos_and_kinds.at(i) >= split_pos, "must be");
4439 assert(result->_use_pos_and_kinds.at(i + 1) >= firstValidKind && result->_use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4446 // split this interval at the specified position and return
4465 assert(_first->next() != Range::end(), "must not be at end");
4526 const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" };
4573 assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4574 assert(prev < _use_pos_and_kinds.at(i), "use positions not sorted");
4576 out->print("%d %s ", _use_pos_and_kinds.at(i), UseKind2Name[_use_pos_and_kinds.at(i + 1)]);
4577 prev = _use_pos_and_kinds.at(i);
4605 // append interval at top of list
4729 // intervals may start at same position -> prefer fixed interval
4734 assert(any == Interval::end() || fixed == Interval::end() || any->from() != fixed->from() || kind == fixedKind, "if fixed and any-Interval start at same position, fixed must be processed first");
4979 assert(op_id > 0 && allocator()->block_of_op_with_id(op_id - 2) == op_block, "cannot insert move at block boundary");
4987 int index = (op_id - list->at(0)->id()) / 2;
4988 assert(list->at(index)->id() <= op_id, "error in calculation");
4990 while (list->at(index)->id() != op_id) {
4995 assert(list->at(index)->id() == op_id, "error in calculation");
4997 // insert new instruction before instruction at position index
5011 // Try to split at end of max_block. If this would be after
5023 // block with lower loop-depth found -> split at the end of this block
5045 // reason for using min_split_pos - 1: when the minimal split pos is exactly at the
5051 // when an interval ends at the end of the last block of the method
5053 // block at this op_id)
5067 TRACE_LINEAR_SCAN(4, tty->print_cr(" interval has hole just before max_split_pos, so splitting at max_split_pos"));
5078 TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization: loop end found at pos %d", loop_end_pos));
5115 split an interval at the optimal position between min_split_pos and
5124 assert(it->from() < min_split_pos, "cannot split at start of interval");
5133 assert(optimal_split_pos > it->from(), "cannot split at start of interval");
5137 // -> no split at all necessary
5138 TRACE_LINEAR_SCAN(4, tty->print_cr(" no split necessary because optimal split position is at end of interval"));
5150 TRACE_LINEAR_SCAN(4, tty->print_cr(" splitting at position %d", optimal_split_pos));
5167 split an interval at the optimal position between min_split_pos and
5183 assert(max_split_pos < it->to(), "cannot split at end end of interval");
5188 TRACE_LINEAR_SCAN(2, tty->print_cr(" spilling entire interval because split pos is at beginning of interval"));
5218 assert(optimal_split_pos < it->to(), "cannot split at end of interval");
5226 TRACE_LINEAR_SCAN(4, tty->print_cr(" splitting at position %d", optimal_split_pos));
5278 // at the optimal position before.
5306 // this register is at least free until reg_needed_until
5336 // this register is at least free until reg_needed_until
5363 assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
5390 // the register must be free at least until this position
5495 Interval* it = _spill_intervals[reg]->at(i);
5503 Interval* it = _spill_intervals[regHi]->at(i);
5521 assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
5532 tty->print("%d ", _spill_intervals[i]->at(j)->reg_num());
5539 // the register must be free at least until this position
5603 assert(need_split == false || split_pos > cur->from(), "splitting interval at from");
5713 assert(cur->first_usage(mustHaveRegister) == begin_pos, "must have use position at begin of interval because of move");
5714 assert(end_hint->first_usage(mustHaveRegister) == end_pos, "must have use position at begin of interval because of move");
5717 // register_hint is not spilled at begin_pos, so it would not be benefitial to immediately spill cur
5723 // delete use positions to prevent the intervals to get a register at beginning
5739 // activating an interval that has a stack slot assigned -> split it at first use position
5805 BlockBegin* block = code->at(i);
5831 LIR_OpList* instructions = _edge_instructions.at(edge);
5832 int idx = _edge_instructions_idx.at(edge);
5835 return instructions->at(idx);
5843 LIR_OpList* instructions = _edge_instructions.at(edge);
5844 int idx = _edge_instructions_idx.at(edge);
5855 // at least one block is already empty -> no optimization possible
5889 TRACE_LINEAR_SCAN(4, tty->print_cr("optimizing moves at end of block B%d", block->block_id()));
5924 // ignore the unconditional branch at the end of the block
5942 // insert the instruction at the beginning of the current block
5945 // delete the instruction at the end of all predecessors
5954 TRACE_LINEAR_SCAN(4, tty->print_cr("optimization moves at begin of block B%d", block->block_id()));
5971 LIR_Op* branch = cur_instructions->at(cur_instructions->length() - 2);
5980 // the instructions are inserted at the end of the block before these two branches
5986 LIR_Op* op = cur_instructions->at(i);
5998 assert(sux_instructions->at(0)->code() == lir_label, "block must start with label");
6008 // ignore the label at the beginning of the block
6025 // insert instruction at end of current block
6029 // delete the instructions at the beginning of all successors
6048 BlockBegin* osr_entry = code->at(0)->end()->as_Base()->osr_entry();
6051 assert(code->at(index) == osr_entry, "wrong index");
6065 while (i < max_end && code->at(i)->loop_depth() >= header_block->loop_depth()) {
6069 if (i == code->length() || code->at(i)->loop_depth() < header_block->loop_depth()) {
6071 BlockBegin* end_block = code->at(end_idx);
6081 code->at_put(j, code->at(j + 1));
6086 assert(code->at(end_idx)->is_set(BlockBegin::backward_branch_target_flag), "must be backward branch target");
6087 code->at(end_idx)->clear(BlockBegin::backward_branch_target_flag);
6088 code->at(header_idx)->set(BlockBegin::backward_branch_target_flag);
6095 BlockBegin* block = code->at(i);
6115 assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6134 assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6136 LIR_Op* op = instructions->at(i);
6158 BlockBegin* block = code->at(old_pos);
6181 BlockBegin* pred = _original_preds.at(j);
6189 code->at_put(new_pos, code->at(old_pos));
6203 BlockBegin* block = code->at(i);
6215 if (last_branch->block() == code->at(i + 1)) {
6217 TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id()));
6223 LIR_Op* prev_op = instructions->at(instructions->length() - 2);
6231 prev_op = instructions->at(j);
6239 if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
6241 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
6265 BlockBegin* block = code->at(i);
6269 assert(cur_instructions->at(0)->code() == lir_label, "first instruction must always be a label");
6281 (return_converted.at(block->block_id()) && block->number_of_sux() == 1),
6314 BlockBegin* block = code->at(i);
6319 LIR_OpBranch* op_branch = instructions->at(j)->as_OpBranch();
6492 BlockBegin* cur = allocator->ir()->code()->at(i);
6505 LIR_Op* op = instructions->at(j);