/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_Instruction.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_LIRGenerator.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciCPCache.hpp"
#include "ci/ciInstance.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/bitMap.inline.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/heapRegion.hpp"
#endif
#ifdef ASSERT
#else
#endif
// TODO: ARM - Use some recognizable constant which still fits architectural constraints
#ifdef ARM
#else
#endif
// Initialize array sizes
_vreg_table.trunc_to(0);
}
//--------------------------------------------------------------
// PhiResolver
// Resolves cycles:
//
// r1 := r2 becomes temp := r1
// r2 := r1 r1 := r2
// r2 := temp
// and orders moves:
//
// r2 := r3 becomes r1 := r2
// r1 := r2 r2 := r3
{
// reinitialize the shared state arrays
}
}
}
}
// Traverse assignment graph in depth first order and generate moves in post order
// ie. two assignments: b := c, a := b start with node c:
// Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
// Generates moves in this order: move b to a and move c to b
// ie. cycle a := b, b := a start with node a
// Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
// Generates moves in this order: move b to temp, move a to b, move temp to a
dest->set_visited();
}
} else if (!dest->start_node()) {
// cylce in graph detected
return;
} // else dest is a start node
dest->set_assigned();
dest->set_assigned();
}
}
}
PhiResolver::~PhiResolver() {
int i;
// resolve any cycles in moves from and to virtual registers
node->set_start_node();
}
}
// generate move for move from non virtual register to abitrary destination
}
}
}
if (opr->is_virtual()) {
}
// Make sure that all virtual operands show up in the list when
// they are used as the source of a move.
}
} else {
}
return node;
}
// tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
}
//--------------------------------------------------------------
// LIRItem
assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
if (opr->is_virtual()) {
}
}
if (result()->is_illegal()) {
// update the items result
}
if (!result()->is_register()) {
if (result()->is_constant()) {
} else {
}
}
}
if (!_result->is_constant()) {
}
} else {
load_item();
}
}
if (r != reg) {
// moves between different types need an intervening spill slot
}
#endif
}
}
if (oc) {
return oc->constant_value();
}
return NULL;
}
}
}
}
}
}
//--------------------------------------------------------------
}
#ifndef PRODUCT
if (PrintIRWithLIR) {
}
#endif
// set up the list of LIR instructions
if (LIRTraceExecution &&
}
}
#ifndef PRODUCT
if (PrintIRWithLIR) {
}
#endif
// LIR_Opr for unpinned constants shouldn't be referenced by other
// blocks so clear them out after processing the block.
for (int i = 0; i < _unpinned_constants.length(); i++) {
}
// clear our any registers for other local constants
_constants.trunc_to(0);
}
}
}
//-------------------------LIRGenerator-----------------------------
// This is where the tree-walk starts; instr must be root;
}
// This is called for each node in tree; the walk stops if a root is reached
//stop walk when encounter a root
assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
} else {
// assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
}
}
ValueStack* s = state;
for_each_state(s) {
assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
continue;
}
int index;
}
}
if (bci == SynchronizationEntryBCI) {
if (x->as_ExceptionObject() || x->as_Throw()) {
// all locals are dead on exit from the synthetic unlocker
} else {
assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
}
}
// Degenerate or breakpointed method.
bailout("Degenerate or breakpointed method");
} else {
}
} else {
// NULL out this local so that linear scan can assume that all non-NULL values are live.
s->invalidate_local(index);
}
}
}
}
}
return state_for(x, x->exception_state());
}
} else {
// no patching needed
}
}
if (index->is_constant()) {
} else {
}
}
void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
if (index->is_constant()) {
} else {
}
}
void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
}
switch(code) {
{
if (is_strictfp) {
} else {
}
}
break;
{
bool did_strength_reduce = false;
if (right->is_constant()) {
if (is_power_of_2(c)) {
// do not need tmp here
did_strength_reduce = true;
} else {
}
}
// we couldn't strength reduce so just emit the multiply
if (!did_strength_reduce) {
}
}
break;
// ldiv and lrem are implemented with a direct runtime call
{
if (is_strictfp) {
} else {
}
}
break;
default: ShouldNotReachHere();
}
}
void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
}
void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
}
void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
}
void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
}
switch(code) {
default: ShouldNotReachHere();
}
}
void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
}
switch(code) {
default: ShouldNotReachHere();
}
}
void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
if (!GenerateSynchronizationCode) return;
// for slow path, use debug info for state after successful locking
// for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
}
void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
if (!GenerateSynchronizationCode) return;
// setup registers
}
void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
// If klass is not loaded we do not know if the klass has finalizers:
Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
// allocate space for instance
} else {
}
}
if (c) {
return (c->value() == 0);
}
return false;
}
if (c) {
return (c->value() >= 0);
}
return false;
}
return (ciArrayKlass*)type;
} else {
return NULL;
}
}
return NULL;
return NULL;
}
}
if (t == NULL) {
return NULL;
}
return NULL;
}
}
return t;
}
// first try to identify the likely type of the arrays involved
{
}
}
// the types exactly match so the type is fully known
is_exact = true;
}
is_exact = true;
}
}
}
// at least pass along a good guess
src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
}
// if a probable array type has been identified, figure out if any
// of the required checks for a fast case can be elided.
if (!src_objarray)
if (!dst_objarray)
if (!x->arg_needs_null_check(0))
if (!x->arg_needs_null_check(2))
if (expected_type != NULL) {
// look for expressions like min(v, a.length) which ends up as
// x > y ? y : x or x >= y ? y : x
length_limit = ifop->y();
}
}
// try to skip null checks and range checks
if (length_limit != NULL &&
}
}
if (length_limit != NULL &&
}
}
// check from incoming constant values
if (positive_constant(src_pos))
if (positive_constant(dst_pos))
if (positive_constant(length))
// see if the range check can be elided, which might also imply
// that src or dst is non-null.
// it's the length of the source array
if (is_constant_zero(src_pos))
}
// it's the length of the destination array
if (is_constant_zero(dst_pos))
}
}
if (is_exact) {
}
}
}
if (expected_type != NULL) {
}
}
// src and dest positions are the same, or dst is zero so assume
// nonoverlapping copy.
}
// moving within a single array so no type checks are needed
}
}
}
return result;
}
return opr;
}
if (!value->is_register()) {
// force into a register
value = r;
}
// create a spill location
// move from register to spill
return tmp;
}
if (if_instr->should_profile()) {
if (if_instr->is_swapped()) {
int t = taken_count_offset;
}
// MDO cells are intptr_t, so the data_reg width is arch-dependent.
// Use leal instead of add to avoid destroying condition codes on x86
}
}
// Phi technique:
// This is about passing live values from one basic block to the other.
// In code generated with Java it is rather rare that more than one
// value is on the stack from one basic block to the other.
// We optimize our technique for efficient passing of one value
// (of type long, int, double..) but it can be extended.
// When entering or leaving a basic block, all registers and all spill
// slots are release and empty. We use the released registers
// and spill slots to pass the live values from one block
// to the other. The topmost value, i.e., the value on TOS of expression
// stack is passed in registers. All other values are stored in spilling
// area. Every Phi has an index which designates its spill slot
// At exit of a basic block, we fill the register(s) and spill slots.
// At entry of a basic block, the block_prolog sets up the content of phi nodes
// and locks necessary registers and spilling slots.
// move current value to referenced phi function
// cur_val can be null without phi being null in conjunction with inlining
"these can be produced lazily");
}
}
}
// Moves all stack values into their PHI position
// a block with only one predecessor never has phi functions
int index;
}
}
}
}
}
// add a little fudge factor for the bailout, since the bailout is
// only checked periodically. This gives a few extra registers to
// hand out before we really run out, which helps us keep from
// tripping over assertions.
bailout("out of virtual registers");
// wrap it around
}
}
_virtual_register_number += 1;
}
// Try to lock using register in hint
}
// does an rlock and sets result
set_result(x, reg);
return reg;
}
// does an rlock and sets result
switch (type) {
case T_BYTE:
case T_BOOLEAN:
break;
default:
break;
}
set_result(x, reg);
return reg;
}
//---------------------------------------------------------------------
if (oc) {
return oc->constant_value();
}
return NULL;
}
assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
// no moves are created for phi functions at the begin of exception
// handlers, so assign operands manually here
exceptionOopOpr());
set_result(x, result);
}
//----------------------------------------------------------------------
//----------------------------------------------------------------------
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
//----------------------------------------------------------------------
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// phi functions are never visited directly
}
// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
if (x->state_before() != NULL) {
// Any constant with a ValueStack requires patching so emit the patch here
if (!x->is_pinned()) {
// unpinned constants are handled specially so that they can be
// put into registers when they are used multiple times within a
// block. After the block completes their operand will be
// cleared so that other blocks can't refer to that register.
set_result(x, load_constant(x));
} else {
}
if (res->is_constant()) {
} else {
set_result(x, res);
}
}
} else {
}
}
// operand_for_instruction has the side effect of setting the result
// so there's no need to do it here.
}
}
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
}
} else {
}
set_no_result(x);
}
// Examble: ref.get()
// Combination of LoadField and g1 pre-write barrier
// need to perform the null check on the reference objecy
if (x->needs_null_check()) {
}
// Register the value in the referent field with the pre-barrier
result /* pre_val */,
false /* do_load */,
false /* patch */,
NULL /* info */);
}
// Example: clazz.isInstance(object)
// TODO could try to substitute this node with an equivalent InstanceOf
// if clazz is known to be a constant Class. This will pick up newly found
// constants after HIR construction. I'll leave this to a future change.
// as a first cut, make a simple leaf call to runtime to stay platform independent.
// could follow the aastore example in a future change.
// need to perform null check on clazz
if (x->needs_null_check()) {
}
x->type(),
NULL); // NULL CodeEmitInfo results in a leaf call
}
// Example: object.getClass ()
// need to perform the null check on the rcvr
if (x->needs_null_check()) {
}
}
// Example: Thread.currentThread()
__ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
}
set_no_result(x);
}
//------------------------local access--------------------------------------
if (x->operand()->is_illegal()) {
Constant* c = x->as_Constant();
if (c != NULL) {
} else {
// allocate a virtual register for this local or phi
x->set_operand(rlock(x));
}
}
return x->operand();
}
if (opr->is_virtual()) {
}
return NULL;
}
}
return NULL;
}
if (_vreg_flags.size_in_bits() == 0) {
_vreg_flags = temp;
}
}
return false;
}
}
// Block local constant handling. This code is useful for keeping
// unpinned constants and constants which aren't exposed in the IR in
// registers. Unpinned Constant instructions have their operands
// cleared when the block is finished so that other blocks can't end
// up referring to their registers.
}
for (int i = 0; i < _constants.length(); i++) {
switch (t) {
case T_INT:
case T_FLOAT:
break;
case T_LONG:
case T_DOUBLE:
break;
case T_OBJECT:
break;
}
return _reg_for_constants.at(i);
}
}
_constants.append(c);
return result;
}
// Various barriers
// Do the pre-write barrier, if any.
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
// No pre barriers
break;
case BarrierSet::ModRef:
case BarrierSet::Other:
// No pre barriers
break;
default :
}
}
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
break;
case BarrierSet::ModRef:
case BarrierSet::Other:
// No post barriers
break;
default :
}
}
////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
// First we test whether marking is in progress.
} else {
"Assumption");
}
new LIR_Address(thrd,
// Read the marking-in-progress flag.
if (do_load) {
if (patch)
if (!addr_opr->is_address()) {
}
} else {
}
}
// If the "new_val" is a constant NULL, no barrier is necessary.
if (new_val->is_constant() &&
if (!new_val->is_register()) {
if (new_val->is_constant()) {
} else {
}
}
if (addr->is_address()) {
} else {
}
}
if (TwoOperandLIRForm ) {
LIR_OprDesc::illegalOpr());
} else {
LIR_OprDesc::illegalOpr());
}
if (!new_val->is_register()) {
}
}
#endif // SERIALGC
////////////////////////////////////////////////////////////////////////
if (addr->is_address()) {
// ptr cannot be an object because we use this barrier for array card marks
// and addr can point in the middle of an array.
} else {
}
}
#ifdef ARM
// TODO: ARM - move to platform-dependent code
if (VM_Version::supports_movw()) {
} else {
__ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
}
LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
} else {
}
#else // ARM
if (TwoOperandLIRForm) {
} else {
}
} else {
T_BYTE));
}
#endif // ARM
}
//------------------------field access--------------------------------------
// Comment copied form templateTable_i486.cpp
// ----------------------------------------------------------------------------
// Volatile variables demand their effects be made known to all CPU's in
// order. Store buffers on most chips allow reads & writes to reorder; the
// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
// memory barrier (i.e., it's not sufficient that the interpreter does not
// reorder volatile references, the hardware also must not reorder them).
//
// According to the new Java Memory Model (JMM):
// (1) All volatiles are serialized wrt to each other.
// ALSO reads & writes act as aquire & release, so:
// (2) A read cannot let unrelated NON-volatile memory refs that happen after
// the read float up to before the read. It's OK for non-volatile memory refs
// that happen before the volatile read to float down below it.
// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
// that happen BEFORE the write float down to after the write. It's OK for
// non-volatile memory refs that happen after the volatile write to float up
// before it.
//
// We only put in barriers around volatile refs (they are expensive), not
// _between_ memory refs (that would require us to track the flavor of the
// previous memory refs). Requirements (2) and (3) require some barriers
// before volatile stores and after volatile loads. These nearly cover
// requirement (1) but miss the volatile-store-volatile-load case. This final
// case is placed after volatile-stores although it could just as well go
// before volatile-loads.
if (needs_patching) {
} else if (x->needs_null_check()) {
} else {
}
}
if (is_volatile || needs_patching) {
// load item if field is volatile (fewer special cases for volatiles)
// load item if field not initialized
// load item if field not constant
// because of code patching we cannot inline constants
} else {
}
} else {
}
set_no_result(x);
#ifndef PRODUCT
if (PrintNotLoaded && needs_patching) {
}
#endif
if (x->needs_null_check() &&
(needs_patching ||
// emit an explicit null check because the offset is too large
}
if (needs_patching) {
// we need to patch the offset in the instruction so don't allow
// generate_address to try to be smart about emitting the -1.
// Otherwise the patching code won't know how to find the
// instruction to patch.
} else {
}
__ membar_release();
}
if (is_oop) {
// Do the pre-write barrier, if any.
true /* do_load*/,
}
if (is_volatile && !needs_patching) {
} else {
}
if (is_oop) {
// Store to object so mark the card of the header
}
}
}
if (needs_patching) {
} else if (x->needs_null_check()) {
} else {
}
}
#ifndef PRODUCT
if (PrintNotLoaded && needs_patching) {
}
#endif
if (x->needs_null_check() &&
(needs_patching ||
// emit an explicit null check because the offset is too large
}
if (needs_patching) {
// we need to patch the offset in the instruction so don't allow
// generate_address to try to be smart about emitting the -1.
// Otherwise the patching code won't know how to find the
// instruction to patch.
} else {
}
if (is_volatile && !needs_patching) {
} else {
}
__ membar_acquire();
}
}
//------------------------java.nio.Buffer.checkIndex------------------------
// int java.nio.Buffer.checkIndex(int)
// NOTE: by the time we are in checkIndex() we are guaranteed that
// the buffer is non-null (because checkIndex is package-private and
// only called from within other methods in the buffer).
if (GenerateRangeChecks) {
cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
} else {
}
} else {
// Just load the index into the result register
}
}
//------------------------array access--------------------------------------
if (x->needs_null_check()) {
} else {
}
}
__ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
}
bool needs_range_check = true;
if (use_length) {
if (needs_range_check) {
}
}
// let it be a constant
} else {
}
if (x->needs_null_check()) {
} else {
}
}
// emit array address setup early so it schedules better
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
// TODO: use a (modified) version of array_range_check that does not require a
// constant length to be loaded to a register
} else {
// The range check performs the null check, so clear it out for the load
}
}
}
if (x->can_trap()) {
}
}
// the result is the same as from the node we are casting
}
set_no_result(x);
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
// check if the instruction has an xhandler in any of the nested scopes
bool unwind = false;
// this throw is not inside an xhandler
unwind = true;
} else {
// get some idea of the throw type
bool type_is_exact = true;
if (throw_type == NULL) {
type_is_exact = false;
}
}
}
// do null check before moving exception oop into fixed register
// to avoid a fixed interval with an oop during the null check.
// Use a copy of the CodeEmitInfo because debug information is
// different for null_check and throw.
if (GenerateCompilerNullChecks &&
// if the exception object wasn't created using new then it might be null.
__ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
}
// we need to go through the exception lookup path to get JVMTI
// notification done
unwind = false;
}
// move exception oop into fixed register
if (unwind) {
} else {
}
}
assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
if (input_opr->is_single_fpu()) {
} else {
set_result(x, result);
}
}
if (x->has_index()) {
}
int log2_scale = 0;
if (x->has_index()) {
log2_scale = x->log2_scale();
}
#ifndef _LP64
} else {
}
#endif
if (index_op->is_constant()) {
} else {
#ifdef X86
#ifdef _LP64
}
#endif
#else
#ifdef _LP64
}
#endif
} else {
}
#endif
}
} else {
} else {
}
}
}
int log2_scale = 0;
if (x->has_index()) {
log2_scale = x->log2_scale();
}
if (x->has_index()) {
}
} else {
}
set_no_result(x);
#ifndef _LP64
} else {
}
#endif
if (log2_scale != 0) {
// temporary fix (platform dependent code without shift on Intel would be better)
#ifdef _LP64
} else {
#endif
// TODO: ARM also allows embedded shift in the address
#ifdef _LP64
}
#endif
}
#ifdef _LP64
}
#endif
}
#ifndef SERIALGC
// We might be reading the value of the referent field of a
// Reference object in order to attach it back to the live
// object graph. If G1 is enabled then we need to record
// the value that is being returned in an SATB log buffer.
//
// We need to generate code similar to the following...
//
// if (offset == java_lang_ref_Reference::referent_offset) {
// if (src != NULL) {
// if (klass(src)->reference_type() != REF_NONE) {
// pre_barrier(..., value, ...);
// }
// }
// }
if (off.is_constant()) {
// The constant offset is something other than referent_offset.
// We can skip generating/checking the remaining guards and
// skip generation of the code stub.
gen_pre_barrier = false;
} else {
// The constant offset is the same as referent_offset -
// we do not need to generate a runtime offset check.
gen_offset_check = false;
}
}
// We don't need to generate stub if the source object is an array
gen_pre_barrier = false;
}
if (gen_pre_barrier) {
// We still need to continue with the checks.
if (src.is_constant()) {
if (src_con->is_null_object()) {
// The constant src object is null - We can skip
// generating the code stub.
gen_pre_barrier = false;
} else {
// Non-null constant source object. We still have to generate
// the slow stub - but we don't need to generate the runtime
// null object check.
gen_source_check = false;
}
}
}
if (gen_pre_barrier && !PatchALot) {
// Can the klass of object be statically determined to be
// a sub-class of Reference?
gen_type_check = false;
// Not Reference and not Object klass.
gen_pre_barrier = false;
}
}
}
if (gen_pre_barrier) {
// We can have generate one runtime check here. Let's start with
// the offset check.
if (gen_offset_check) {
// if (offset != referent_offset) -> continue
// If offset is an int then we can do the comparison with the
// referent_offset constant; otherwise we need to move
// referent_offset into a temporary register and generate
// a reg-reg compare.
} else {
}
}
if (gen_source_check) {
// offset is a const and equals referent offset
// if (source == null) -> continue
}
if (gen_type_check) {
// We have determined that offset == referent_offset && src != null.
// if (src->_klass->_reference_type == REF_NONE) -> continue
LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(instanceKlass::reference_type_offset()), T_BYTE);
}
{
// We have determined that src->_klass->_reference_type != REF_NONE
// so register the value in the referent field with the pre-barrier.
value /* pre_val */,
false /* do_load */,
false /* patch */,
NULL /* info */);
}
}
}
#endif // SERIALGC
}
} else {
}
set_no_result(x);
}
// let it be a constant
} else {
}
set_no_result(x);
}
do_UnsafePrefetch(x, false);
}
do_UnsafePrefetch(x, true);
}
for (int i = 0; i < lng; i++) {
} else {
}
}
}
if (len > 0) {
// still in same range
} else {
// skip tests which explicitly dispatch to the default
if (sux != default_sux) {
}
}
}
}
return res;
}
// we expect the keys to be sorted by increasing value
if (len > 0) {
for (int i = 1; i < len; i++) {
// still in same range
} else {
// skip tests which explicitly dispatch to the default
}
}
}
}
return res;
}
set_no_result(x);
if (x->is_safepoint()) {
}
// move values into phi locations
move_to_phi(x->state());
if (UseTableRanges) {
} else {
for (int i = 0; i < len; i++) {
}
}
}
set_no_result(x);
if (x->is_safepoint()) {
}
// move values into phi locations
move_to_phi(x->state());
if (UseTableRanges) {
} else {
for (int i = 0; i < len; i++) {
}
}
}
set_no_result(x);
// need to free up storage used for OSR entry point
}
if (x->is_safepoint()) {
// increment backedge counter if needed
}
// Gotos can be folded Ifs, handle this case.
if (x->should_profile()) {
int offset;
} else {
}
}
// emit phi-instruction move after safepoint since this simplifies
// describing the state as the safepoint.
move_to_phi(x->state());
}
// Emit moves from physical registers / stack slots to virtual registers
int java_index = 0;
// Types which are smaller than int are passed as int, so
// correct the type which passed.
switch (t) {
case T_BYTE:
case T_BOOLEAN:
case T_SHORT:
case T_CHAR:
t = T_INT;
break;
}
// Assign new location to Local instruction for this local
#ifndef __SOFTFP__
// The java calling convention passes double as long and float as int.
#endif // __SOFTFP__
java_index += type2size[t];
}
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
}
if (method()->is_synchronized()) {
} else {
}
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
// receiver is guaranteed non-NULL so don't need CodeEmitInfo
}
}
// increment invocation counters if needed
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
}
// all blocks with a successor must end with an unconditional jump
// to the successor even if they are consecutive
}
// construct our frame and model the production of incoming pointer
// to the OSR buffer.
}
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
if (loc->is_register()) {
} else {
} else
} else {
}
}
}
if (x->has_receiver()) {
if (loc->is_register()) {
} else {
}
}
}
// Visits all arguments, returns appropriate items without loading them
if (x->has_receiver()) {
}
for (int i = 0; i < x->number_of_arguments(); i++) {
}
return argument_items;
}
// The invoke with receiver has following phases:
// b) traverse all arguments -> item-array (invoke_visit_argument)
// c) push receiver on stack
// d) load each of the items and push on stack
// e) unlock receiver
// f) move receiver into receiver-register %o0
// g) lock result registers and emit call operation
//
// Before issuing a call, we must spill-save all values on stack
// that are in caller-save register. "spill-save" moves thos registers
// either in a free callee-save register or spills them if no free
// callee save register is available.
//
// The problem is where to invoke spill-save.
// - if invoked between e) and f), we may lock callee save
// register in "spill-save" that destroys the receiver register
// before f) is executed
// - if we rearange the f) to be earlier, by loading %o0, it
// may destroy a value on the stack that is currently in %o0
// and is waiting to be spilled
// - if we keep the receiver locked while doing spill-save,
// we cannot spill it as it is spill-locked
//
// setup result register
}
if (x->has_receiver()) {
}
// emit invoke code
// JSR 292
// Preserve the SP over MethodHandle call sites.
if (is_method_handle_invoke) {
info->set_is_method_handle_invoke(true);
}
switch (x->code()) {
case Bytecodes::_invokestatic:
break;
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
// for final target we still produce an inline cache, in order
// to be able to call mixed mode
} else if (x->vtable_index() < 0) {
} else {
}
break;
case Bytecodes::_invokedynamic: {
break;
}
default:
break;
}
// JSR 292
// Restore the SP after MethodHandle call sites.
if (is_method_handle_invoke) {
}
// Force rounding of results from non-strictfp when in strictfp
// scope (or when we don't know the strictness of the callee, to
// be safe.)
if (!x->target_is_loaded() || !x->target_is_strictfp()) {
}
}
}
if (result_register->is_valid()) {
}
}
}
// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
#ifdef ASSERT
{
assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
}
#endif
} else {
}
}
reg, new LIR_OprList());
}
#ifdef TRACE_HAVE_INTRINSICS
__ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
} else {
}
}
__ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_OBJECT), klass, info);
}
#endif
switch (x->id()) {
case vmIntrinsics::_intBitsToFloat :
case vmIntrinsics::_doubleToRawLongBits :
case vmIntrinsics::_longBitsToDouble :
case vmIntrinsics::_floatToRawIntBits : {
do_FPIntrinsics(x);
break;
}
#ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_counterTime:
break;
#endif
case vmIntrinsics::_currentTimeMillis:
break;
case vmIntrinsics::_nanoTime:
break;
// java.nio.Buffer.checkIndex
break;
case vmIntrinsics::_compareAndSwapInt:
do_CompareAndSwap(x, intType);
break;
case vmIntrinsics::_compareAndSwapLong:
break;
case vmIntrinsics::_Reference_get:
do_Reference_get(x);
break;
default: ShouldNotReachHere(); break;
}
}
// Need recv in a temporary register so it interferes with the other temporaries
// tmp is used to hold the counters on SPARC
}
}
// We can safely ignore accessors here, since c2 will inline them anyway,
// accessors are also always mature.
if (!x->inlinee()->is_accessor()) {
// Notify the runtime very infrequently only to take care of counter overflows
increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
}
}
int freq_log;
if (level == CompLevel_limited_profile) {
} else if (level == CompLevel_full_profile) {
} else {
}
// Increment the appropriate invocation/backedge counter and notify the runtime.
increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
}
if (level == CompLevel_limited_profile) {
} else if (level == CompLevel_full_profile) {
} else {
}
if (notify) {
// The bci for info can point to cmp for if's we want the if bci
}
}
if (x->pass_thread()) {
}
for (int i = 0; i < x->number_of_arguments(); i++) {
Value a = x->argument_at(i);
}
set_no_result(x);
} else {
}
}
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
}
LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
}
// get a result register
}
// move the arguments into the correct location
if (loc->is_register()) {
} else {
// if (!can_store_as_constant(arg)) {
// LIR_Opr tmp = new_register(arg->type());
// __ move(arg, tmp);
// arg = tmp;
// }
} else {
}
}
}
if (info) {
} else {
}
}
return result;
}
// get a result register
}
// move the arguments into the correct location
if (loc->is_register()) {
} else {
} else {
}
}
}
if (info) {
} else {
}
}
return result;
}
switch(code) {
default : ShouldNotReachHere(); break;
}
}
}