/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/barrierSet.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/objArrayKlass.hpp"
#include "runtime/sharedRuntime.hpp"
//------------------------------------------------------------
if (opr->is_constant()) {
case T_INT: {
}
default:
return false;
}
}
return false;
}
case lir_null_check:
return true;
case lir_add:
case lir_ushr:
case lir_shr:
case lir_shl:
// integer shifts and adds are always one instruction
case lir_move: {
// this works around a problem where moves with the same src and dst
// end up in the delay slot and then the assembler swallows the mov
// since it has no effect and then it complains because the delay slot
// is empty. returning false stops the optimizer from putting this in
// the delay slot
return false;
}
// don't put moves involving oops into the delay slot since the VerifyOops code
// will make it much larger than a single instruction.
if (VerifyOops) {
return false;
}
return false;
}
if (UseCompressedOops) {
if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
}
if (dst->is_register()) {
return !PatchALot;
} else if (src->is_single_stack()) {
return true;
}
}
if (src->is_register()) {
return !PatchALot;
} else if (dst->is_single_stack()) {
return true;
}
}
if (dst->is_register() &&
return true;
}
return false;
}
default:
return false;
}
}
return FrameMap::O0_oop_opr;
}
}
}
// inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
// we fetch the class of the receiver (O0) and compare it with the cached class.
// If they do not match we jump to slow case.
return offset;
}
// On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
//
// 1. Create a new compiled activation.
// 2. Initialize local variables in the compiled activation. The expression stack must be empty
// at the osr_bci; it is not initialized.
// 3. Jump to the continuation address in compiled code to resume execution.
// OSR entry point
// Create a frame for the compiled activation.
// OSR buffer is
//
// locals[nlocals-1..0]
// monitors[number_of_locks-1..0]
//
// locals is a direct copy of the interpreter frame so in the osr buffer
// so first slot in the local array is the last local from the interpreter
// and last slot is local[0] (receiver) from the interpreter
//
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
// in the interpreter frame (the method lock if a sync method)
// Initialize monitors in the compiled activation.
// I0: pointer to osr buffer
//
// All other registers are dead at this point and the locals will be
// copied into place by code emitted in the IR.
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
// the OSR buffer using 2 word entries: first the lock and then
// the oop.
for (int i = 0; i < number_of_locks; i++) {
#ifdef ASSERT
// verify the interpreter's monitor has a non-null object
{
Label L;
}
#endif // ASSERT
// Copy the lock field into the compiled activation.
}
}
}
// Optimized Library calls
// This is the fast version of java.lang.String.compare; it has not
// OSR-entry and therefore, we generate a slow version for OSR's
void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
{
// Get a pointer to the first character of string0 in tmp0
// and get string0.length() in str0
// Get a pointer to the first character of string1 in tmp1
// and get string1.length() in str1
// Also, get string0.length()-string1.length() in
// o7 and get the condition code set
// Note: some instructions have been hoisted for better instruction scheduling
if (java_lang_String::has_offset_field()) {
} else {
}
// str1 may be null
if (java_lang_String::has_offset_field()) {
} else {
}
}
{
// Compute the minimum of the string lengths, scale it and store it in limit
// If either string is empty (or both of them) the result is the difference in lengths
}
{
// Neither string is empty
// Shift base0 and base1 to the end of the arrays, negate limit
}
// If strings are equal up to min length, return the length difference.
// Otherwise, return the difference between the first mismatched chars.
}
// --------------------------------------------------------------------------------------------
if (!GenerateSynchronizationCode) return;
// compute pointer to BasicLock
}
else {
}
// unlock object
// _slow_case_stubs->append(slow_case);
// temporary fix: must be created after exceptionhandler, therefore as call stub
if (UseFastLocking) {
// try inlined fast unlocking first, revert to slow locking if it fails
// note: lock_reg points to the displaced header since the displaced header offset is 0!
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
} else {
// always do slow unlocking
// note: the slow unlocking code could be inlined here, however if we use
// slow unlocking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow unlocking code is the same in either case which simplifies
// debugging
}
// done
}
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
// generate code for exception handler
if (handler_base == NULL) {
// not enough space left for the handler
bailout("exception handler overflow");
return -1;
}
__ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
__ end_a_stub();
return offset;
}
// Emit the code to remove the frame from the stack in the exception
// unwind path.
#ifndef PRODUCT
if (CommentedAssembly) {
}
#endif
// Fetch the exception from TLS and clear out exception related thread state
}
// Preform needed unlocking
if (method()->is_synchronized()) {
}
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
}
}
// dispatch to the unwind logic
// Emit the slow path assembly
}
return offset;
}
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
// generate code for deopt handler
if (handler_base == NULL) {
// not enough space left for the handler
bailout("deopt handler overflow");
return -1;
}
__ end_a_stub();
return offset;
}
if (o == NULL) {
} else {
}
}
// Allocate a new index in oop table to hold the oop once it's been patched
// NULL will be dynamically patched later and the patched value may be large. We must
}
} else {
}
// convert division by a power of two into some shifts and logical operations
if (divisor == 2) {
} else {
}
return;
} else {
if (divisor == 2) {
} else {
}
return;
}
}
if (!VM_Version::v9_instructions_work()) {
// v9 doesn't require these nops
}
} else {
}
} else {
}
}
}
#ifdef ASSERT
#endif
case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
default : ShouldNotReachHere();
};
if (!VM_Version::v9_instructions_work()) {
}
} else {
default: ShouldNotReachHere();
};
// sparc has different condition codes for testing 32-bit
// vs. 64-bit values. We could always test xcc is we could
// guarantee that 32-bit loads always sign extended but that isn't
// true and since sign extension isn't free, it would impose a
// slight cost.
#ifdef _LP64
} else
#endif
}
// The peephole pass fills the delay slot
}
switch(code) {
#ifdef _LP64
#else
#endif
break;
}
}
break;
}
Label L;
// result must be 0 if value is NaN; test by comparing value to itself
if (!VM_Version::v9_instructions_work()) {
}
// move integer result from float register to int register
break;
}
#ifdef _LP64
#else
#endif
break;
}
break;
}
int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
break;
}
break;
}
default: ShouldNotReachHere();
}
}
// do nothing since all instructions are word aligned on sparc
}
// The peephole pass fills the delay slot, add_call_info is done in
// LIR_Assembler::emit_delay.
}
// The peephole pass fills the delay slot, add_call_info is done in
// LIR_Assembler::emit_delay.
}
} else {
// This will generate 2 instructions
// ld_ptr, set_hi, set
}
// the peephole pass fills the delay slot
}
int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
int store_offset;
// for offsets larger than a simm13 we setup the offset in O7
} else {
}
store_offset = code_offset();
switch (type) {
case T_BOOLEAN: // fall through
case T_LONG :
#ifdef _LP64
} else {
}
#else
#endif
break;
case T_ADDRESS:
break;
case T_ARRAY : // fall through
case T_OBJECT:
{
if (UseCompressedOops && !wide) {
store_offset = code_offset();
} else {
}
break;
}
case T_DOUBLE:
{
// split unaligned stores
} else {
}
break;
}
default : ShouldNotReachHere();
}
}
return store_offset;
}
int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
}
switch (type) {
case T_BOOLEAN: // fall through
case T_LONG :
#ifdef _LP64
#else
#endif
break;
case T_ADDRESS:
break;
case T_ARRAY : // fall through
case T_OBJECT:
{
if (UseCompressedOops && !wide) {
store_offset = code_offset();
} else {
}
break;
}
default : ShouldNotReachHere();
}
return store_offset;
}
int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
int load_offset;
// for offsets larger than a simm13 we setup the offset in O7
} else {
load_offset = code_offset();
switch(type) {
case T_BOOLEAN: // fall through
case T_LONG :
if (!unaligned) {
#ifdef _LP64
#else
"must be sequential");
#endif
} else {
#ifdef _LP64
#else
} else {
}
#endif
}
break;
case T_ARRAY : // fall through
case T_OBJECT:
{
if (UseCompressedOops && !wide) {
} else {
}
break;
}
case T_DOUBLE:
{
// split unaligned loads
} else {
}
break;
}
default : ShouldNotReachHere();
}
}
}
return load_offset;
}
switch(type) {
case T_BOOLEAN: // fall through
case T_ARRAY : // fall through
case T_OBJECT:
{
if (UseCompressedOops && !wide) {
} else {
}
break;
}
case T_LONG :
#ifdef _LP64
#else
"must be sequential");
#endif
break;
default : ShouldNotReachHere();
}
}
return load_offset;
}
switch (c->type()) {
case T_INT:
case T_FLOAT: {
if (value == 0) {
} else {
}
break;
}
case T_ADDRESS: {
if (value == 0) {
} else {
}
break;
}
case T_OBJECT: {
break;
}
case T_LONG:
case T_DOUBLE: {
if (value_lo == 0) {
} else {
}
if (value_hi == 0) {
} else {
}
break;
}
default:
}
}
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
switch (c->type()) {
case T_INT:
case T_FLOAT:
case T_ADDRESS: {
if (value == 0) {
}
} else {
}
break;
}
case T_LONG:
case T_DOUBLE: {
if (value_lo == 0) {
} else {
}
if (value_hi == 0) {
} else {
}
break;
}
case T_OBJECT: {
} else {
}
// handle either reg+reg or reg+disp address
} else {
}
break;
}
default:
}
}
}
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
switch (c->type()) {
case T_INT:
case T_ADDRESS:
{
if (to_reg->is_single_cpu()) {
} else {
}
}
break;
case T_LONG:
{
if (to_reg->is_double_cpu()) {
#ifdef _LP64
#else
#endif
#ifdef _LP64
} else if (to_reg->is_single_cpu()) {
#endif
} else {
}
}
break;
case T_OBJECT:
{
if (patch_code == lir_patch_none) {
} else {
}
}
break;
case T_FLOAT:
{
if (const_addr == NULL) {
bailout("const section overflow");
break;
}
if (to_reg->is_single_fpu()) {
} else {
}
}
break;
case T_DOUBLE:
{
if (const_addr == NULL) {
bailout("const section overflow");
break;
}
if (to_reg->is_double_fpu()) {
} else {
#ifdef _LP64
#else
#endif
}
}
break;
default:
}
}
if (index->is_illegal()) {
} else {
}
}
switch (type) {
case T_INT:
case T_FLOAT: {
break;
}
case T_OBJECT: {
break;
}
case T_LONG:
case T_DOUBLE: {
break;
}
default:
}
}
}
}
}
if (needs_patching) {
patch_code == lir_patch_none ||
}
if (needs_patching) {
} else {
}
}
} else {
}
// remember the offset of the load. The patching_epilog must be done
// before the call to add_debug_info, otherwise the PcDescs don't get
// entered in increasing order.
} else {
}
}
}
if (VM_Version::has_v9()) {
}
}
if (VM_Version::has_v9()) {
}
}
if (src->is_single_word()) {
} else if (src->is_double_word()) {
}
}
if (dest->is_single_word()) {
} else if (dest->is_double_word()) {
}
}
if (from_reg->is_double_fpu()) {
// double to double moves
} else {
// float to float moves
}
if (from_reg->is_double_cpu()) {
#ifdef _LP64
#else
"should both be long and not overlap");
// long to long moves
#endif
#ifdef _LP64
} else if (to_reg->is_double_cpu()) {
// int to int moves
#endif
} else {
// int to int moves
}
} else {
}
}
}
}
if (needs_patching) {
patch_code == lir_patch_none ||
}
if (needs_patching) {
} else {
}
}
} else {
}
// remember the offset of the store. The patching_epilog must be done
// before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
// entered in increasing order.
int offset;
} else {
}
}
}
// the poll may need a register so just pick one that isn't the return register
// Must move the result to G1
// Must leave proper result in O0,O1 and G1 (TIERED only)
#ifdef ASSERT
// mangle it so any problems will show up
#endif
}
#endif // TIERED
}
} else {
}
return offset;
}
bailout("static call stub overflow");
return;
}
// must be set to -1 at code generation time
__ end_a_stub();
}
if (opr1->is_single_fpu()) {
} else if (opr1->is_double_fpu()) {
} else if (opr1->is_single_cpu()) {
if (opr2->is_constant()) {
case T_INT:
} else {
}
}
break;
case T_OBJECT:
} else {
}
}
break;
default:
break;
}
} else {
if (opr2->is_address()) {
} else {
}
}
} else if (opr1->is_double_cpu()) {
#ifdef _LP64
#else
#endif
} else if (opr2->is_register()) {
#ifdef _LP64
#else
}
#endif
} else {
}
} else if (opr1->is_address()) {
} else {
}
}
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
if (left->is_single_fpu()) {
__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
} else if (left->is_double_fpu()) {
__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
} else {
}
} else if (code == lir_cmp_l2i) {
#ifdef _LP64
#else
dst->as_register());
#endif
} else {
}
}
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
switch (condition) {
default: ShouldNotReachHere();
};
// load up first part of constant before branch
// and do the rest in the delay slot.
}
} else if (opr1->is_constant()) {
} else if (opr1->is_register()) {
} else {
}
#ifdef _LP64
} else
#endif
} else {
// the sethi has been done above, so just put in the low 10 bits
}
} else {
// can't do anything useful in the delay slot
}
if (opr2->is_constant()) {
} else if (opr2->is_register()) {
} else {
}
}
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
if (right->is_register()) {
if (dest->is_float_kind()) {
if (right->is_single_fpu()) {
w = FloatRegisterImpl::S;
} else {
w = FloatRegisterImpl::D;
}
switch (code) {
case lir_mul: // fall through
case lir_div: // fall through
default: ShouldNotReachHere();
}
} else if (dest->is_double_cpu()) {
#ifdef _LP64
switch (code) {
case lir_add:
break;
case lir_sub:
break;
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_add:
break;
case lir_sub:
break;
default: ShouldNotReachHere();
}
#endif
} else {
switch (code) {
default: ShouldNotReachHere();
}
}
} else {
if (dest->is_single_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else {
switch (code) {
default: ShouldNotReachHere();
}
}
}
}
// do nothing
}
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
switch (code) {
case lir_sin:
case lir_tan:
case lir_cos: {
break;
}
case lir_sqrt: {
break;
}
case lir_abs: {
break;
}
default: {
break;
}
}
}
if (right->is_constant()) {
if (dest->is_single_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else {
int simm13 = (int)c;
switch (code) {
case lir_logic_and:
#ifndef _LP64
#endif
break;
case lir_logic_or:
#ifndef _LP64
#endif
break;
case lir_logic_xor:
#ifndef _LP64
#endif
break;
default: ShouldNotReachHere();
}
}
} else {
if (dest->is_single_cpu()) {
switch (code) {
case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
default: ShouldNotReachHere();
}
} else {
#ifdef _LP64
left->as_register_lo();
right->as_register_lo();
switch (code) {
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_logic_and:
break;
case lir_logic_or:
break;
case lir_logic_xor:
break;
default: ShouldNotReachHere();
}
#endif
}
}
}
switch (elem_size) {
case 1 : return 0;
case 2 : return 1;
case 4 : return 2;
case 8 : return 3;
}
return -1;
}
// reuse the debug info from the safepoint poll for the throw op itself
}
}
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
#ifdef _LP64
// higher 32bits must be null
#endif
// set up the arraycopy stub information
// always do stub if no type information is available. it's ok if
// the known type isn't loaded since the code sanity checks
// in debug mode and the type isn't required when we know the exact type
// also check that the type is an array type.
} else {
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
}
if (copyfunc_addr != NULL) {
} else {
}
return;
}
// make sure src and dst are non-null and load array length
}
}
// test src_pos register
}
// test dst_pos register
}
// make sure length isn't negative
}
}
}
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
if (UseCompressedOops) {
// We don't need decode because we just need to compare
} else {
}
} else {
// For object arrays, if src is a sub class of dst then we can
// safely do the copy.
__ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
// src is not a sub class of dst so we have to do a
// per-element check.
// Check that at least both of them object arrays.
}
}
if (shift == 0) {
} else {
}
if (shift == 0) {
} else {
}
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
} else {
}
}
}
#ifdef ASSERT
// Sanity check the known type with the incoming class. For the
// primitive case the types must match exactly with src.klass and
// dst.klass each exactly matching the default type. For the
// object array case, if no type check is needed then either the
// dst type is exactly the expected type and the src type is a
// subtype which we can't check or src is the same array as dst
// but not necessarily exactly of type default_type.
if (UseCompressedOops) {
// tmp holds the default type. It currently comes uncompressed after the
// load of a constant, so encode it.
// load the raw value of the dst klass, since we will be comparing
// uncompressed values directly.
if (basic_type != T_OBJECT) {
// load the raw value of the src klass.
} else {
}
} else {
if (basic_type != T_OBJECT) {
} else {
}
}
}
#endif
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
if (shift == 0) {
} else {
}
if (shift == 0) {
} else {
}
const char *name;
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
// arraycopy stubs takes a length in number of elements, so don't scale it.
}
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
if (dest->is_single_cpu()) {
#ifdef _LP64
switch (code) {
default: ShouldNotReachHere();
}
} else
#endif
switch (code) {
default: ShouldNotReachHere();
}
} else {
#ifdef _LP64
switch (code) {
case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
default: ShouldNotReachHere();
}
#endif
}
}
#ifdef _LP64
switch (code) {
default: ShouldNotReachHere();
}
return;
}
#endif
if (dest->is_single_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else if (dest->is_double_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else {
}
}
if (op->init_check()) {
}
op->header_size(),
op->object_size(),
}
if (UseSlowPath ||
} else {
}
}
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
// See if the receiver is receiver[n].
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
}
}
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
// The offset is large so bias the mdo by the base of the slot so
// that the ld can use simm13s to reference the slots of the data
}
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
// we always need a stub for the failure case.
klass_RInfo = obj;
}
int mdo_offset_bias = 0;
if (op->should_profile()) {
if (mdo_offset_bias > 0) {
}
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
} else {
}
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
} else {
}
// get object class
// not a safepoint as obj null check happens earlier
if (op->fast_check()) {
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
need_slow_path = false;
// perform the fast part of the checking logic
RegisterOrConstant(k->super_check_offset()));
} else {
// perform the fast part of the checking logic
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
// Fall through to success case
}
}
if (op->should_profile()) {
if (mdo_offset_bias > 0) {
}
// Jump over the failure case
// Cast failure case
if (mdo_offset_bias > 0) {
}
Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
}
}
if (code == lir_store_check) {
// check if it needs to be profiled
int mdo_offset_bias = 0;
if (op->should_profile()) {
}
if (op->should_profile()) {
if (mdo_offset_bias > 0) {
}
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
} else {
}
// get instance klass
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
// fall through to the success case
if (op->should_profile()) {
if (mdo_offset_bias > 0) {
}
// Cast failure case
if (mdo_offset_bias > 0) {
}
Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
}
} else if (code == lir_checkcast) {
} else if (code == lir_instanceof) {
} else {
}
}
#ifdef _LP64
// perform the compare and swap operation
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1.
#else
// move high and low halves of long values into single registers
// perform the compare and swap operation
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
// overwritten with the original value in "addr" and will be equal to t1.
// Produce icc flag for 32bit.
#endif
if (UseCompressedOops) {
} else {
}
} else {
}
} else {
}
}
}
}
}
}
}
// compute pointer to BasicLock
} else {
}
}
// obj may not be an oop
if (UseFastLocking) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
}
} else {
// always do slow locking
// note: the slow locking code could be inlined here, however if we use
// slow locking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow locking code is the same in either case which simplifies
// debugging
}
} else {
if (UseFastLocking) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
} else {
// always do slow unlocking
// note: the slow unlocking code could be inlined here, however if we use
// slow unlocking, speed doesn't matter anyway and this solution is
// simpler and requires less duplicated code - additionally, the
// slow unlocking code is the same in either case which simplifies
// debugging
}
}
}
// Update counter for all call types
#ifdef _LP64
#else
#endif
int mdo_offset_bias = 0;
data->size_in_bytes())) {
// The offset is large so bias the mdo by the base of the slot so
// that the ld can use simm13s to reference the slots of the data
}
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
// Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes
!callee_is_static && // required for optimized MH invokes
// We know the type that will be seen at this call site; we can
// statically update the methodDataOop rather than needing to do
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
// avoid collisions by concurrent compilations
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
return;
}
}
// Receiver type not found in profile data; select an empty slot
// Note that this is less efficient than it should be because it
// always does a write to the receiver part of the
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
return;
}
}
} else {
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
}
} else {
// Static call
}
}
}
// make sure we are expecting a delay
// this has the side effect of clearing the delay state
// so we can use _masm instead of _masm->delayed() to do the
// code generation.
// make sure we only emit one instruction
#ifdef ASSERT
}
"only one instruction can go in a delay slot");
#endif
// we may also be emitting the call info for the instruction
// which we are the delay slot of.
if (call_info) {
}
if (VerifyStackAtCalls) {
}
}
if (left->is_single_cpu()) {
} else if (left->is_single_fpu()) {
} else if (left->is_double_fpu()) {
} else {
#ifdef _LP64
#else
#endif
}
}
}
}
}
// if tmp is invalid, then the function being called doesn't destroy the thread
}
}
}
#ifdef ASSERT
__ verify_thread();
#endif // ASSERT
}
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
#ifdef _LP64
#endif
// (extended to allow indexed as well as constant displaced for JSR-166)
}
} else {
}
// G4 is high half, G5 is low half
if (VM_Version::v9_instructions_work()) {
// clear the top bits of G5, and scale up G4
// combine the two halves into the 64 bits of G4
} else {
}
} else {
} else {
}
}
if (VM_Version::v9_instructions_work()) {
} else {
}
} else {
} else {
}
// G4 is high half, G5 is low half
}
} else {
}
}
} else {
// use normal move for all other volatiles since they don't need
// special handling to remain atomic.
}
}
// only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
}
// no-op on TSO
}
// no-op on TSO
}
// no-op
//__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
}
// no-op
//__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
}
// no-op
//__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
}
}
// Pack two sequential registers containing 32 bit values
// into a single 64 bit register.
// src and src->successor() are packed into dst
// src and dst may be the same register.
// Note: src is destroyed
}
// Unpack a 64 bit value in a register into
// two sequential registers.
// src is unpacked into dst and dst->successor()
}
assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
}
}
case lir_cond_float_branch:
case lir_branch: {
// we'd like to be able to pull following instructions into
// this slot but we don't know enough to do it safely yet so
// only optimize block to block control flow.
// swap previous instruction into delay slot
#ifndef PRODUCT
if (LIRTracePeephole) {
}
#endif
continue;
}
}
if (!delay_op) {
}
break;
}
case lir_static_call:
case lir_virtual_call:
case lir_icvirtual_call:
case lir_optvirtual_call:
case lir_dynamic_call: {
// Only moves without info can be put into the delay slot.
// Also don't allow the setup of the receiver in the delay
// slot for vtable calls.
#ifndef PRODUCT
if (LIRTracePeephole) {
}
#endif
} else {
i++;
}
// It's done here instead of in LIRGenerator because there's
// such a mismatch between the single reg and double reg
// calling convention.
}
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
} else {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
}
inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
}
#endif
break;
}
}
}
}
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
#ifdef _LP64
#else
#endif
} else {
}
}