c1_LIRAssembler_sparc.cpp revision 1369
/*
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_c1_LIRAssembler_sparc.cpp.incl"
//------------------------------------------------------------
if (opr->is_constant()) {
case T_INT: {
}
default:
return false;
}
}
return false;
}
case lir_null_check:
return true;
case lir_add:
case lir_ushr:
case lir_shr:
case lir_shl:
// integer shifts and adds are always one instruction
case lir_move: {
// this works around a problem where moves with the same src and dst
// end up in the delay slot and then the assembler swallows the mov
// since it has no effect and then it complains because the delay slot
// is empty. returning false stops the optimizer from putting this in
// the delay slot
return false;
}
// don't put moves involving oops into the delay slot since the VerifyOops code
// will make it much larger than a single instruction.
if (VerifyOops) {
return false;
}
return false;
}
if (dst->is_register()) {
return !PatchALot;
} else if (src->is_single_stack()) {
return true;
}
}
if (src->is_register()) {
return !PatchALot;
} else if (dst->is_single_stack()) {
return true;
}
}
if (dst->is_register() &&
return true;
}
return false;
}
default:
return false;
}
}
return FrameMap::O0_oop_opr;
}
return FrameMap::I0_oop_opr;
}
}
int LIR_Assembler::initial_frame_size_in_bytes() {
}
// inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
// we fetch the class of the receiver (O0) and compare it with the cached class.
// If they do not match we jump to slow case.
int LIR_Assembler::check_icache() {
return offset;
}
void LIR_Assembler::osr_entry() {
// On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
//
// 1. Create a new compiled activation.
// 2. Initialize local variables in the compiled activation. The expression stack must be empty
// at the osr_bci; it is not initialized.
// 3. Jump to the continuation address in compiled code to resume execution.
// OSR entry point
// Create a frame for the compiled activation.
// OSR buffer is
//
// locals[nlocals-1..0]
// monitors[number_of_locks-1..0]
//
// locals is a direct copy of the interpreter frame so in the osr buffer
// so first slot in the local array is the last local from the interpreter
// and last slot is local[0] (receiver) from the interpreter
//
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
// in the interpreter frame (the method lock if a sync method)
// Initialize monitors in the compiled activation.
// I0: pointer to osr buffer
//
// All other registers are dead at this point and the locals will be
// copied into place by code emitted in the IR.
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
// the OSR buffer using 2 word entries: first the lock and then
// the oop.
for (int i = 0; i < number_of_locks; i++) {
#ifdef ASSERT
// verify the interpreter's monitor has a non-null object
{
Label L;
}
#endif // ASSERT
// Copy the lock field into the compiled activation.
}
}
}
// Optimized Library calls
// This is the fast version of java.lang.String.compare; it has not
// OSR-entry and therefore, we generate a slow version for OSR's
void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
{
// Get a pointer to the first character of string0 in tmp0 and get string0.count in str0
// Get a pointer to the first character of string1 in tmp1 and get string1.count in str1
// Also, get string0.count-string1.count in o7 and get the condition code set
// Note: some instructions have been hoisted for better instruction scheduling
// str1 may be null
}
{
// Compute the minimum of the string lengths, scale it and store it in limit
// If either string is empty (or both of them) the result is the difference in lengths
}
{
// Neither string is empty
// Shift base0 and base1 to the end of the arrays, negate limit
}
// If strings are equal up to min length, return the length difference.
// Otherwise, return the difference between the first mismatched chars.
}
// --------------------------------------------------------------------------------------------
if (!GenerateSynchronizationCode) return;
// compute pointer to BasicLock
}
else {
}
// unlock object
// _slow_case_stubs->append(slow_case);
// temporary fix: must be created after exceptionhandler, therefore as call stub
if (UseFastLocking) {
// try inlined fast unlocking first, revert to slow locking if it fails
// note: lock_reg points to the displaced header since the displaced header offset is 0!
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
} else {
// always do slow unlocking
// note: the slow unlocking code could be inlined here, however if we use
// slow unlocking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow unlocking code is the same in either case which simplifies
// debugging
}
// done
}
int LIR_Assembler::emit_exception_handler() {
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
// generate code for exception handler
if (handler_base == NULL) {
// not enough space left for the handler
bailout("exception handler overflow");
return -1;
}
int offset = code_offset();
__ end_a_stub();
return offset;
}
int LIR_Assembler::emit_deopt_handler() {
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
// generate code for deopt handler
if (handler_base == NULL) {
// not enough space left for the handler
bailout("deopt handler overflow");
return -1;
}
int offset = code_offset();
__ end_a_stub();
return offset;
}
if (o == NULL) {
} else {
}
}
// Allocate a new index in oop table to hold the oop once it's been patched
// NULL will be dynamically patched later and the patched value may be large. We must
}
int divisor = -1;
} else {
}
// convert division by a power of two into some shifts and logical operations
if (divisor == 2) {
} else {
}
return;
} else {
if (divisor == 2) {
} else {
}
return;
}
}
if (!VM_Version::v9_instructions_work()) {
// v9 doesn't require these nops
}
} else {
}
} else {
}
}
}
#ifdef ASSERT
#endif
case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
default : ShouldNotReachHere();
};
if (!VM_Version::v9_instructions_work()) {
}
} else {
default: ShouldNotReachHere();
};
// sparc has different condition codes for testing 32-bit
// vs. 64-bit values. We could always test xcc is we could
// guarantee that 32-bit loads always sign extended but that isn't
// true and since sign extension isn't free, it would impose a
// slight cost.
#ifdef _LP64
} else
#endif
}
// The peephole pass fills the delay slot
}
switch(code) {
#ifdef _LP64
#else
#endif
break;
}
}
break;
}
Label L;
// result must be 0 if value is NaN; test by comparing value to itself
if (!VM_Version::v9_instructions_work()) {
}
// move integer result from float register to int register
break;
}
#ifdef _LP64
#else
#endif
break;
}
break;
}
int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
break;
}
break;
}
default: ShouldNotReachHere();
}
}
// do nothing since all instructions are word aligned on sparc
}
// the peephole pass fills the delay slot
}
// the peephole pass fills the delay slot
}
} else {
// This will generate 2 instructions
// ld_ptr, set_hi, set
}
// the peephole pass fills the delay slot
}
}
}
// load with 32-bit displacement
int load_offset = code_offset();
switch(ld_type) {
case T_BOOLEAN: // fall through
case T_ADDRESS:// fall through
case T_ARRAY : // fall through
default : ShouldNotReachHere();
}
} else {
load_offset = code_offset();
switch(ld_type) {
case T_BOOLEAN: // fall through
case T_ADDRESS:// fall through
case T_ARRAY : // fall through
default : ShouldNotReachHere();
}
}
return load_offset;
}
// store with 32-bit displacement
void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
switch (type) {
case T_BOOLEAN: // fall through
case T_ADDRESS:// fall through
case T_ARRAY : // fall through
default : ShouldNotReachHere();
}
} else {
switch (type) {
case T_BOOLEAN: // fall through
case T_ADDRESS:// fall through
case T_ARRAY : //fall through
default : ShouldNotReachHere();
}
}
// Note: Do the store before verification as the code might be patched!
}
// load float with 32-bit displacement
void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
switch(ld_type) {
case T_FLOAT : w = FloatRegisterImpl::S; break;
case T_DOUBLE: w = FloatRegisterImpl::D; break;
default : ShouldNotReachHere();
}
} else {
}
} else {
}
}
// store float with 32-bit displacement
void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
switch(type) {
case T_FLOAT : w = FloatRegisterImpl::S; break;
case T_DOUBLE: w = FloatRegisterImpl::D; break;
default : ShouldNotReachHere();
}
} else {
}
} else {
}
}
int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) {
int store_offset;
// for offsets larger than a simm13 we setup the offset in O7
} else {
store_offset = code_offset();
switch (type) {
case T_BOOLEAN: // fall through
case T_LONG :
#ifdef _LP64
} else {
}
#else
#endif
break;
case T_ADDRESS:// fall through
case T_ARRAY : // fall through
case T_DOUBLE:
{
// split unaligned stores
} else {
}
break;
}
default : ShouldNotReachHere();
}
}
return store_offset;
}
int store_offset = code_offset();
switch (type) {
case T_BOOLEAN: // fall through
case T_LONG :
#ifdef _LP64
#else
#endif
break;
case T_ADDRESS:// fall through
case T_ARRAY : // fall through
default : ShouldNotReachHere();
}
return store_offset;
}
int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) {
int load_offset;
// for offsets larger than a simm13 we setup the offset in O7
} else {
load_offset = code_offset();
switch(type) {
case T_BOOLEAN: // fall through
case T_LONG :
if (!unaligned) {
#ifdef _LP64
#else
"must be sequential");
#endif
} else {
#ifdef _LP64
#else
} else {
}
#endif
}
break;
case T_ADDRESS:// fall through
case T_ARRAY : // fall through
case T_DOUBLE:
{
// split unaligned loads
} else {
}
break;
}
default : ShouldNotReachHere();
}
}
return load_offset;
}
int load_offset = code_offset();
switch(type) {
case T_BOOLEAN: // fall through
case T_ADDRESS:// fall through
case T_ARRAY : // fall through
case T_LONG :
#ifdef _LP64
#else
"must be sequential");
#endif
break;
default : ShouldNotReachHere();
}
return load_offset;
}
void LIR_Assembler::load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo *info, int offset) {
}
void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
}
void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) {
}
void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
}
}
}
}
void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
}
switch (c->type()) {
case T_INT:
case T_FLOAT:
case T_ADDRESS: {
int value = c->as_jint_bits();
if (value == 0) {
} else {
}
break;
}
case T_OBJECT: {
break;
}
case T_LONG:
case T_DOUBLE: {
int value_lo = c->as_jint_lo_bits();
if (value_lo == 0) {
} else {
}
int value_hi = c->as_jint_hi_bits();
if (value_hi == 0) {
} else {
}
break;
}
default:
}
}
}
switch (c->type()) {
case T_INT:
case T_FLOAT:
case T_ADDRESS: {
int value = c->as_jint_bits();
if (value == 0) {
}
} else {
}
break;
}
case T_LONG:
case T_DOUBLE: {
int value_lo = c->as_jint_lo_bits();
if (value_lo == 0) {
} else {
}
int value_hi = c->as_jint_hi_bits();
if (value_hi == 0) {
} else {
}
break;
}
case T_OBJECT: {
} else {
}
// handle either reg+reg or reg+disp address
} else {
}
break;
}
default:
}
}
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
switch (c->type()) {
case T_INT:
case T_ADDRESS:
{
if (to_reg->is_single_cpu()) {
} else {
}
}
break;
case T_LONG:
{
if (to_reg->is_double_cpu()) {
#ifdef _LP64
#else
#endif
#ifdef _LP64
} else if (to_reg->is_single_cpu()) {
#endif
} else {
}
}
break;
case T_OBJECT:
{
if (patch_code == lir_patch_none) {
} else {
}
}
break;
case T_FLOAT:
{
if (const_addr == NULL) {
bailout("const section overflow");
break;
}
if (to_reg->is_single_fpu()) {
} else {
}
}
break;
case T_DOUBLE:
{
if (const_addr == NULL) {
bailout("const section overflow");
break;
}
if (to_reg->is_double_fpu()) {
} else {
#ifdef _LP64
#else
#endif
}
}
break;
default:
}
}
}
switch (type) {
case T_INT:
case T_FLOAT: {
break;
}
case T_OBJECT: {
break;
}
case T_LONG:
case T_DOUBLE: {
break;
}
default:
}
}
}
}
}
if (needs_patching) {
patch_code == lir_patch_none ||
}
if (needs_patching) {
} else {
}
}
} else {
}
// remember the offset of the load. The patching_epilog must be done
// before the call to add_debug_info, otherwise the PcDescs don't get
// entered in increasing order.
int offset = code_offset();
} else {
}
}
}
if (VM_Version::has_v9()) {
}
}
if (VM_Version::has_v9()) {
}
}
if (src->is_single_word()) {
} else if (src->is_double_word()) {
}
}
if (dest->is_single_word()) {
} else if (dest->is_double_word()) {
}
}
if (from_reg->is_double_fpu()) {
// double to double moves
} else {
// float to float moves
}
if (from_reg->is_double_cpu()) {
#ifdef _LP64
#else
"should both be long and not overlap");
// long to long moves
#endif
#ifdef _LP64
} else if (to_reg->is_double_cpu()) {
// int to int moves
#endif
} else {
// int to int moves
}
} else {
}
}
}
bool unaligned) {
}
if (needs_patching) {
patch_code == lir_patch_none ||
}
if (needs_patching) {
} else {
}
}
} else {
}
// remember the offset of the store. The patching_epilog must be done
// before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
// entered in increasing order.
int offset;
} else {
}
}
}
// the poll may need a register so just pick one that isn't the return register
#ifdef TIERED
// Must move the result to G1
// Must leave proper result in O0,O1 and G1 (TIERED only)
}
#endif // TIERED
}
} else {
}
return offset;
}
void LIR_Assembler::emit_static_call_stub() {
bailout("static call stub overflow");
return;
}
// must be set to -1 at code generation time
__ end_a_stub();
}
if (opr1->is_single_fpu()) {
} else if (opr1->is_double_fpu()) {
} else if (opr1->is_single_cpu()) {
if (opr2->is_constant()) {
case T_INT:
} else {
}
}
break;
case T_OBJECT:
} else {
}
}
break;
default:
break;
}
} else {
if (opr2->is_address()) {
} else {
}
}
} else if (opr1->is_double_cpu()) {
#ifdef _LP64
#else
#endif
} else if (opr2->is_register()) {
#ifdef _LP64
#else
}
#endif
} else {
}
} else if (opr1->is_address()) {
} else {
}
}
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
if (left->is_single_fpu()) {
__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
} else if (left->is_double_fpu()) {
__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
} else {
}
} else if (code == lir_cmp_l2i) {
#ifdef _LP64
#else
dst->as_register());
#endif
} else {
}
}
switch (condition) {
default: ShouldNotReachHere();
};
// load up first part of constant before branch
// and do the rest in the delay slot.
}
} else if (opr1->is_constant()) {
} else if (opr1->is_register()) {
} else {
}
} else {
// the sethi has been done above, so just put in the low 10 bits
}
} else {
// can't do anything useful in the delay slot
}
if (opr2->is_constant()) {
} else if (opr2->is_register()) {
} else {
}
}
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
if (right->is_register()) {
if (dest->is_float_kind()) {
if (right->is_single_fpu()) {
w = FloatRegisterImpl::S;
} else {
w = FloatRegisterImpl::D;
}
switch (code) {
case lir_mul: // fall through
case lir_div: // fall through
default: ShouldNotReachHere();
}
} else if (dest->is_double_cpu()) {
#ifdef _LP64
switch (code) {
case lir_add:
break;
case lir_sub:
break;
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_add:
break;
case lir_sub:
break;
default: ShouldNotReachHere();
}
#endif
} else {
switch (code) {
default: ShouldNotReachHere();
}
}
} else {
if (dest->is_single_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else {
switch (code) {
default: ShouldNotReachHere();
}
}
}
}
void LIR_Assembler::fpop() {
// do nothing
}
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
switch (code) {
case lir_sin:
case lir_tan:
case lir_cos: {
break;
}
case lir_sqrt: {
break;
}
case lir_abs: {
break;
}
default: {
break;
}
}
}
if (right->is_constant()) {
if (dest->is_single_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else {
int simm13 = (int)c;
switch (code) {
case lir_logic_and:
#ifndef _LP64
#endif
break;
case lir_logic_or:
#ifndef _LP64
#endif
break;
case lir_logic_xor:
#ifndef _LP64
#endif
break;
default: ShouldNotReachHere();
}
}
} else {
if (dest->is_single_cpu()) {
switch (code) {
case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
default: ShouldNotReachHere();
}
} else {
#ifdef _LP64
left->as_register_lo();
right->as_register_lo();
switch (code) {
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_logic_and:
break;
case lir_logic_or:
break;
case lir_logic_xor:
break;
default: ShouldNotReachHere();
}
#endif
}
}
}
int elem_size = type2aelembytes(t);
switch (elem_size) {
case 1 : return 0;
case 2 : return 1;
case 4 : return 2;
case 8 : return 3;
}
return -1;
}
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
if (unwind) {
} else {
// reuse the debug info from the safepoint poll for the throw op itself
}
}
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
// set up the arraycopy stub information
// always do stub if no type information is available. it's ok if
// the known type isn't loaded since the code sanity checks
// in debug mode and the type isn't required when we know the exact type
// also check that the type is an array type.
// We also, for now, always call the stub if the barrier set requires a
// write_ref_pre barrier (which the stub does, but none of the optimized
// cases currently does).
return;
}
// make sure src and dst are non-null and load array length
}
}
// test src_pos register
}
// test dst_pos register
}
// make sure length isn't negative
}
}
}
}
#ifdef ASSERT
// Sanity check the known type with the incoming class. For the
// primitive case the types must match exactly with src.klass and
// dst.klass each exactly matching the default type. For the
// object array case, if no type check is needed then either the
// dst type is exactly the expected type and the src type is a
// subtype which we can't check or src is the same array as dst
// but not necessarily exactly of type default_type.
if (basic_type != T_OBJECT) {
} else {
}
}
#endif
if (shift == 0) {
} else {
}
if (shift == 0) {
} else {
}
if (basic_type != T_OBJECT) {
if (shift == 0) {
} else {
}
} else {
// oop_arraycopy takes a length in number of elements, so don't scale it.
}
}
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
if (dest->is_single_cpu()) {
#ifdef _LP64
switch (code) {
default: ShouldNotReachHere();
}
} else
#endif
switch (code) {
default: ShouldNotReachHere();
}
} else {
#ifdef _LP64
switch (code) {
case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
default: ShouldNotReachHere();
}
#endif
}
}
#ifdef _LP64
switch (code) {
default: ShouldNotReachHere();
}
return;
}
#endif
if (dest->is_single_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else if (dest->is_double_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else {
}
}
if (op->init_check()) {
}
op->header_size(),
op->object_size(),
}
if (UseSlowPath ||
} else {
}
}
if (code == lir_store_check) {
// get instance klass
load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
// perform the fast part of the checking logic
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
// we always need a stub for the failure case.
klass_RInfo = obj;
}
// We need two temporaries to perform this operation on SPARC,
// so to keep things simple we perform a redundant test here
// Object is null; update methodDataOop
bailout("out of memory building methodDataOop");
return;
}
int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
// The offset is large so bias the mdo by the base of the slot so
// that the ld can use simm13s to reference the slots of the data
}
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
}
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
} else {
}
// get object class
// not a safepoint as obj null check happens earlier
if (op->fast_check()) {
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
need_slow_path = false;
// perform the fast part of the checking logic
RegisterOrConstant(k->super_check_offset()));
} else {
// perform the fast part of the checking logic
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
}
}
} else if (code == lir_instanceof) {
klass_RInfo = obj;
}
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
} else {
}
// get object class
// not a safepoint as obj null check happens earlier
if (op->fast_check()) {
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
need_slow_path = false;
// perform the fast part of the checking logic
dst);
} else {
// perform the fast part of the checking logic
RegisterOrConstant(-1),
dst);
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
}
}
} else {
}
}
#ifdef _LP64
#else
// move high and low halves of long values into single registers
#endif
// perform the compare and swap operation
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1.
#ifdef _LP64
} else
#endif
{
}
} else {
}
}
void LIR_Assembler::set_24bit_FPU() {
}
void LIR_Assembler::reset_FPU() {
}
void LIR_Assembler::breakpoint() {
}
}
}
// compute pointer to BasicLock
} else {
}
}
// obj may not be an oop
if (UseFastLocking) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
}
} else {
// always do slow locking
// note: the slow locking code could be inlined here, however if we use
// slow locking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow locking code is the same in either case which simplifies
// debugging
}
} else {
if (UseFastLocking) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
} else {
// always do slow unlocking
// note: the slow unlocking code could be inlined here, however if we use
// slow unlocking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow unlocking code is the same in either case which simplifies
// debugging
}
}
}
// Update counter for all call types
bailout("out of memory building methodDataOop");
return;
}
int mdo_offset_bias = 0;
data->size_in_bytes())) {
// The offset is large so bias the mdo by the base of the slot so
// that the ld can use simm13s to reference the slots of the data
}
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
// Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes
// We know the type that will be seen at this call site; we can
// statically update the methodDataOop rather than needing to do
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
// avoid collisions by concurrent compilations
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
return;
}
}
// Receiver type not found in profile data; select an empty slot
// Note that this is less efficient than it should be because it
// always does a write to the receiver part of the
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
return;
}
}
} else {
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
// See if the receiver is receiver[n].
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
}
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
}
} else {
// Static call
}
}
void LIR_Assembler::align_backward_branch_target() {
}
// make sure we are expecting a delay
// this has the side effect of clearing the delay state
// so we can use _masm instead of _masm->delayed() to do the
// code generation.
// make sure we only emit one instruction
int offset = code_offset();
#ifdef ASSERT
}
"only one instruction can go in a delay slot");
#endif
// we may also be emitting the call info for the instruction
// which we are the delay slot of.
if (call_info) {
}
if (VerifyStackAtCalls) {
}
}
if (left->is_single_cpu()) {
} else if (left->is_single_fpu()) {
} else if (left->is_double_fpu()) {
} else {
#ifdef _LP64
#else
#endif
}
}
void LIR_Assembler::fxch(int i) {
}
void LIR_Assembler::fld(int i) {
}
void LIR_Assembler::ffree(int i) {
}
// if tmp is invalid, then the function being called doesn't destroy the thread
}
}
}
#ifdef ASSERT
__ verify_thread();
#endif // ASSERT
}
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
#ifdef _LP64
#endif
// (extended to allow indexed as well as constant displaced for JSR-166)
}
} else {
}
int null_check_offset = -1;
// G4 is high half, G5 is low half
if (VM_Version::v9_instructions_work()) {
// clear the top bits of G5, and scale up G4
// combine the two halves into the 64 bits of G4
} else {
}
} else {
} else {
}
}
if (VM_Version::v9_instructions_work()) {
} else {
}
} else {
} else {
}
// G4 is high half, G5 is low half
}
} else {
}
}
} else {
// use normal move for all other volatiles since they don't need
// special handling to remain atomic.
}
}
void LIR_Assembler::membar() {
// only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
}
void LIR_Assembler::membar_acquire() {
// no-op on TSO
}
void LIR_Assembler::membar_release() {
// no-op on TSO
}
// Macro to Pack two sequential registers containing 32 bit values
// into a single 64 bit register.
// rs and rs->successor() are packed into rd
// rd and rs may be the same register.
// Note: rs and rs->successor() are destroyed.
}
// Macro to unpack a 64 bit value in a register into
// two sequential registers.
// rd is unpacked into rd and rd->successor()
}
assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
}
}
case lir_cond_float_branch:
case lir_branch: {
// we'd like to be able to pull following instructions into
// this slot but we don't know enough to do it safely yet so
// only optimize block to block control flow.
// swap previous instruction into delay slot
#ifndef PRODUCT
if (LIRTracePeephole) {
}
#endif
continue;
}
}
if (!delay_op) {
}
break;
}
case lir_static_call:
case lir_virtual_call:
case lir_icvirtual_call:
case lir_optvirtual_call: {
// Only moves without info can be put into the delay slot.
// Also don't allow the setup of the receiver in the delay
// slot for vtable calls.
#ifndef PRODUCT
if (LIRTracePeephole) {
}
#endif
continue;
}
if (!delay_op) {
}
break;
}
}
}
}