/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/barrierSet.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "nativeInst_x86.hpp"
#include "oops/objArrayKlass.hpp"
#include "runtime/sharedRuntime.hpp"
// These masks are used to provide 128-bit aligned bitmasks to the XMM
// instructions, to allow sign-masking or sign-bit flipping. They allow
// Note: 'double' and 'long long' have 32-bits alignment on x86.
// Use the expression (adr)&(~0xF) to provide 128-bits aligned address
// of 128-bits operands for SSE instructions.
// Store the value to a 128-bits operand.
return operand;
}
// Buffer for 128-bits masks used by SSE instructions.
// Static initialization during VM startup.
static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
NEEDS_CLEANUP // remove this definitions ?
}
}
}
}
if (opr->is_constant()) {
case T_INT: {
return true;
}
default:
return false;
}
}
return false;
}
return FrameMap::receiver_opr;
}
}
//--------------fpu register translations-----------------------
if (const_addr == NULL) {
bailout("const section overflow");
} else {
return const_addr;
}
}
if (const_addr == NULL) {
bailout("const section overflow");
} else {
return const_addr;
}
}
}
}
}
}
}
}
}
if (opr->is_single_cpu()) {
} else if (opr->is_double_cpu()) {
} else if (opr->is_constant()) {
} else {
}
} else {
}
}
if (opr->is_single_cpu()) {
} else {
}
}
}
//-------------------------------------------
}
return res;
} else {
}
}
intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
} else {
return Address();
}
}
}
return as_Address(addr);
}
// we jump here if osr happens with the interpreter
// state set up to continue at the beginning of the
// loop that triggered osr - in particular, we have
// the following registers setup:
//
// rcx: osr buffer
//
// build frame
// OSR buffer is
//
// locals[nlocals-1..0]
// monitors[0..number_of_locks]
//
// locals is a direct copy of the interpreter frame so in the osr buffer
// so first slot in the local array is the last local from the interpreter
// and last slot is local[0] (receiver) from the interpreter
//
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
// in the interpreter frame (the method lock if a sync method)
// Initialize monitors in the compiled activation.
// rcx: pointer to osr buffer
//
// All other registers are dead at this point and the locals will be
// copied into place by code emitted in the IR.
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
// the OSR buffer using 2 word entries: first the lock and then
// the oop.
for (int i = 0; i < number_of_locks; i++) {
#ifdef ASSERT
// verify the interpreter's monitor has a non-null object
{
Label L;
}
#endif
}
}
}
// inline cache check; done before the frame is built.
if (!do_post_padding) {
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
}
}
if (do_post_padding) {
// force alignment after the cache check.
// It's been verified to be aligned if !VerifyOops
}
return offset;
}
}
// This specifies the rsp decrement needed to build the frame
// if rounding, must let FrameMap know!
// The frame_map records size in slots (32bit word)
// subtract two words to account for return address and link
}
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
// generate code for exception handler
if (handler_base == NULL) {
// not enough space left for the handler
bailout("exception handler overflow");
return -1;
}
// the exception oop and pc are in rax, and rdx
// no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception
// search an exception handler (rax: exception oop, rdx: throwing pc)
__ end_a_stub();
return offset;
}
// Emit the code to remove the frame from the stack in the exception
// unwind path.
#ifndef PRODUCT
if (CommentedAssembly) {
}
#endif
// Fetch the exception from TLS and clear out exception related thread state
}
// Preform needed unlocking
if (method()->is_synchronized()) {
}
}
}
// remove the activation and dispatch to the unwind handler
// Emit the slow path assembly
}
return offset;
}
// if the last instruction is a call (typically to do a throw which
// is coming at the end after block reordering) the return address
// must still point into the code area in order to avoid assertion
// failures when searching for the corresponding bci => add a nop
// (was bug 5/14/1999 - gri)
// generate code for exception handler
if (handler_base == NULL) {
// not enough space left for the handler
bailout("deopt handler overflow");
return -1;
}
__ end_a_stub();
return offset;
}
// This is the fast version of java.lang.String.compare; it has not
// OSR-entry and therefore, we generate a slow version for OSR's
void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
// Get addresses of first characters from both Strings
if (java_lang_String::has_offset_field()) {
} else {
}
// rbx, may be NULL
if (java_lang_String::has_offset_field()) {
} else {
}
// compute minimum length (in rax) and difference of lengths (on top of stack)
// is minimum length 0?
// compare first characters
// starting loop
// set rsi.edi to the end of the arrays (arrays have same length)
// negate the index
// compare the strings in a loop
// strings are equal up to min length
// leave instruction is going to discard the TOS value
}
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
}
// Pop the stack before the safepoint code
// Note: we do not need to round double result; float result has the right precision
// the poll sets the condition code, but no data registers
if (Assembler::is_polling_page_far()) {
} else {
}
}
if (Assembler::is_polling_page_far()) {
} else {
}
return offset;
}
}
}
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
switch (c->type()) {
case T_INT: {
break;
}
case T_ADDRESS: {
break;
}
case T_LONG: {
#ifdef _LP64
#else
#endif // _LP64
break;
}
case T_OBJECT: {
if (patch_code != lir_patch_none) {
} else {
}
break;
}
case T_FLOAT: {
if (dest->is_single_xmm()) {
if (c->is_zero_float()) {
} else {
}
} else {
if (c->is_zero_float()) {
} else if (c->is_one_float()) {
} else {
}
}
break;
}
case T_DOUBLE: {
if (dest->is_double_xmm()) {
if (c->is_zero_double()) {
} else {
}
} else {
if (c->is_zero_double()) {
} else if (c->is_one_double()) {
} else {
}
}
break;
}
default:
}
}
switch (c->type()) {
case T_INT: // fall through
case T_FLOAT:
break;
case T_ADDRESS:
break;
case T_OBJECT:
break;
case T_LONG: // fall through
case T_DOUBLE:
#ifdef _LP64
#else
lo_word_offset_in_bytes), c->as_jint_lo_bits());
hi_word_offset_in_bytes), c->as_jint_hi_bits());
#endif // _LP64
break;
default:
}
}
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
switch (type) {
case T_INT: // fall through
case T_FLOAT:
break;
case T_ADDRESS:
break;
case T_OBJECT: // fall through
case T_ARRAY:
if (c->as_jobject() == NULL) {
if (UseCompressedOops && !wide) {
} else {
}
} else {
if (is_literal_address(addr)) {
} else {
#ifdef _LP64
if (UseCompressedOops && !wide) {
} else {
}
#else
#endif
}
}
break;
case T_LONG: // fall through
case T_DOUBLE:
#ifdef _LP64
if (is_literal_address(addr)) {
} else {
}
#else
// Always reachable in 32bit so this doesn't produce useless move literal
#endif // _LP64
break;
case T_BOOLEAN: // fall through
case T_BYTE:
break;
case T_CHAR: // fall through
case T_SHORT:
break;
default:
};
}
}
// move between cpu-registers
if (dest->is_single_cpu()) {
#ifdef _LP64
// Can do LONG -> OBJECT
return;
}
#endif
}
} else if (dest->is_double_cpu()) {
#ifdef _LP64
// Surprising to me but we can see move of a long to t_object
return;
}
#endif
#ifdef _LP64
#else
} else {
}
#endif // LP64
// special moves from fpu-register to xmm-register
// necessary for method results
// move between xmm-registers
} else if (dest->is_single_xmm()) {
} else if (dest->is_double_xmm()) {
// move between fpu-registers (no instruction necessary because of fpu-stack)
} else {
}
}
if (src->is_single_cpu()) {
} else {
}
} else if (src->is_double_cpu()) {
} else if (src->is_single_xmm()) {
} else if (src->is_double_xmm()) {
} else if (src->is_single_fpu()) {
} else if (src->is_double_fpu()) {
} else {
}
}
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
#ifdef _LP64
if (UseCompressedOops && !wide) {
}
#endif
}
if (patch_code != lir_patch_none) {
}
switch (type) {
case T_FLOAT: {
if (src->is_single_xmm()) {
} else {
}
break;
}
case T_DOUBLE: {
if (src->is_double_xmm()) {
} else {
}
break;
}
case T_ARRAY: // fall through
case T_OBJECT: // fall through
if (UseCompressedOops && !wide) {
} else {
}
break;
case T_ADDRESS:
break;
case T_INT:
break;
case T_LONG: {
#ifdef _LP64
#else
}
}
} else {
}
}
#endif // _LP64
break;
}
case T_BYTE: // fall through
case T_BOOLEAN: {
break;
}
case T_CHAR: // fall through
case T_SHORT:
break;
default:
}
}
if (patch_code != lir_patch_none) {
}
}
if (dest->is_single_cpu()) {
} else {
}
} else if (dest->is_double_cpu()) {
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
} else if (dest->is_single_xmm()) {
} else if (dest->is_double_xmm()) {
} else if (dest->is_single_fpu()) {
} else if (dest->is_double_fpu()) {
} else {
}
}
if (src->is_single_stack()) {
} else {
#ifndef _LP64
#else
//no pushl on 64bits
#endif
}
} else if (src->is_double_stack()) {
#ifdef _LP64
#else
// push and pop the part at src + wordSize, adding wordSize for the previous push
#endif // _LP64
} else {
}
}
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
switch (type) {
case T_BOOLEAN: // fall through
case T_BYTE: // fall through
case T_CHAR: // fall through
case T_SHORT:
// on pre P6 processors we may get partial register stalls
// so blow away the value of to_rinfo before loading a
// partial word into it. Do it here so that it precedes
// the potential patch point below.
}
break;
}
if (patch_code != lir_patch_none) {
}
}
switch (type) {
case T_FLOAT: {
if (dest->is_single_xmm()) {
} else {
}
break;
}
case T_DOUBLE: {
if (dest->is_double_xmm()) {
} else {
}
break;
}
case T_OBJECT: // fall through
case T_ARRAY: // fall through
if (UseCompressedOops && !wide) {
} else {
}
break;
case T_ADDRESS:
break;
case T_INT:
break;
case T_LONG: {
#ifdef _LP64
#else
}
// addresses with 2 registers are only formed as a result of
// array access so this code will never have to deal with
// patches or null checks.
}
} else {
}
}
#endif // _LP64
break;
}
case T_BOOLEAN: // fall through
case T_BYTE: {
} else {
}
break;
}
case T_CHAR: {
} else {
}
break;
}
case T_SHORT: {
} else {
}
break;
}
default:
}
}
#ifdef _LP64
if (UseCompressedOops && !wide) {
}
#endif
}
}
if (VM_Version::supports_sse()) {
switch (ReadPrefetchInstr) {
case 0:
case 1:
case 2:
default:
ShouldNotReachHere(); break;
}
} else if (VM_Version::supports_3dnow_prefetch()) {
}
}
if (VM_Version::supports_sse()) {
switch (AllocatePrefetchInstr) {
case 0:
case 1:
case 2:
case 3:
default:
ShouldNotReachHere(); break;
}
} else if (VM_Version::supports_3dnow_prefetch()) {
}
}
NEEDS_CLEANUP; // This could be static?
switch (elem_size) {
}
}
case lir_idiv:
case lir_irem:
op->result_opr(),
break;
default: ShouldNotReachHere(); break;
}
}
#ifdef ASSERT
#endif
} else {
default: ShouldNotReachHere();
}
} else {
default: ShouldNotReachHere();
}
}
}
}
#ifdef _LP64
#else
#endif // LP64
break;
#ifdef _LP64
#else
#endif
break;
break;
break;
break;
if (dest->is_single_xmm()) {
} else if (dest->is_double_xmm()) {
} else {
// do nothing (float result is rounded later through spilling)
}
break;
if (dest->is_single_xmm()) {
} else if (dest->is_double_xmm()) {
} else {
}
break;
if (src->is_single_xmm()) {
} else if (src->is_double_xmm()) {
} else {
}
// IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
break;
assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
// float result is rounded later through spilling
break;
assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
// instruction sequence too long to inline it here
{
}
break;
default: ShouldNotReachHere();
}
}
if (op->init_check()) {
}
op->header_size(),
op->object_size(),
}
if (UseSlowPath ||
} else {
// everything is ok
} else {
}
len,
tmp1,
tmp2,
}
}
// See if the receiver is receiver[n].
}
// Didn't find receiver; find next empty slot and fill it in
__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
}
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
// we always need a stub for the failure case.
// check if it needs to be profiled
if (op->should_profile()) {
}
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded() && !UseCompressedOops) {
} else {
}
if (!k->is_loaded()) {
} else {
#ifdef _LP64
#endif // _LP64
}
if (op->should_profile()) {
// Object is null; update MDO and exit
} else {
}
if (op->fast_check()) {
// get object class
// not a safepoint as obj null check happens earlier
#ifdef _LP64
if (UseCompressedOops) {
} else {
}
#else
if (k->is_loaded()) {
} else {
}
#endif
// successful cast, fall through to profile or jump
} else {
// get object class
// not a safepoint as obj null check happens earlier
if (k->is_loaded()) {
// See if we get an immediate positive hit
#ifdef _LP64
#else
#endif // _LP64
// successful cast, fall through to profile or jump
} else {
// See if we get an immediate positive hit
// check for self
#ifdef _LP64
#else
#endif // _LP64
#ifdef _LP64
#else
#endif // _LP64
// result is a boolean
// successful cast, fall through to profile or jump
}
} else {
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
// result is a boolean
// successful cast, fall through to profile or jump
}
}
if (op->should_profile()) {
}
}
if (code == lir_store_check) {
// check if it needs to be profiled
if (op->should_profile()) {
}
if (op->should_profile()) {
// Object is null; update MDO and exit
} else {
}
// get instance klass (it's already uncompressed)
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
// result is a boolean
// fall through to the success case
if (op->should_profile()) {
}
} else
if (code == lir_checkcast) {
}
} else
if (code == lir_instanceof) {
} else {
}
}
}
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
#ifdef _LP64
if (UseCompressedOops) {
}
// cmpval (rax) is implicitly used by this instruction
} else
#endif
{
}
}
} else {
}
}
#ifdef _LP64
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
}
#endif // _LP64
} else {
}
}
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
switch (condition) {
default: ShouldNotReachHere();
}
if (opr1->is_cpu_register()) {
} else if (opr1->is_constant()) {
} else {
}
// optimized version that does not require a branch
if (opr2->is_single_cpu()) {
} else if (opr2->is_double_cpu()) {
assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
} else if (opr2->is_single_stack()) {
} else if (opr2->is_double_stack()) {
__ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
} else {
}
} else {
if (opr2->is_cpu_register()) {
} else if (opr2->is_constant()) {
} else {
}
}
}
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
if (left->is_single_cpu()) {
if (right->is_single_cpu()) {
// cpu register - cpu register
switch (code) {
default: ShouldNotReachHere();
}
// cpu register - stack
switch (code) {
default: ShouldNotReachHere();
}
} else if (right->is_constant()) {
// cpu register - constant
switch (code) {
case lir_add: {
break;
}
case lir_sub: {
break;
}
default: ShouldNotReachHere();
}
} else {
}
} else if (left->is_double_cpu()) {
if (right->is_double_cpu()) {
// cpu register - cpu register
switch (code) {
case lir_add:
break;
case lir_sub:
break;
case lir_mul:
#ifdef _LP64
#else
#endif // _LP64
break;
default:
}
} else if (right->is_constant()) {
// cpu register - constant
#ifdef _LP64
switch (code) {
case lir_add:
break;
case lir_sub:
break;
default:
}
#else
switch (code) {
case lir_add:
break;
case lir_sub:
break;
default:
}
#endif // _LP64
} else {
}
} else if (left->is_single_xmm()) {
if (right->is_single_xmm()) {
switch (code) {
case lir_mul_strictfp: // fall through
case lir_div_strictfp: // fall through
default: ShouldNotReachHere();
}
} else {
if (right->is_single_stack()) {
} else if (right->is_constant()) {
// hack for now
} else {
}
switch (code) {
case lir_mul_strictfp: // fall through
case lir_div_strictfp: // fall through
default: ShouldNotReachHere();
}
}
} else if (left->is_double_xmm()) {
if (right->is_double_xmm()) {
switch (code) {
case lir_mul_strictfp: // fall through
case lir_div_strictfp: // fall through
default: ShouldNotReachHere();
}
} else {
if (right->is_double_stack()) {
} else if (right->is_constant()) {
// hack for now
} else {
}
switch (code) {
case lir_mul_strictfp: // fall through
case lir_div_strictfp: // fall through
default: ShouldNotReachHere();
}
}
} else if (left->is_single_fpu()) {
if (right->is_single_fpu()) {
arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
} else {
if (right->is_single_stack()) {
} else if (right->is_constant()) {
// hack for now
} else {
}
switch (code) {
case lir_mul_strictfp: // fall through
case lir_div_strictfp: // fall through
default: ShouldNotReachHere();
}
}
} else if (left->is_double_fpu()) {
}
if (right->is_double_fpu()) {
arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
} else {
if (right->is_double_stack()) {
} else if (right->is_constant()) {
// hack for now
} else {
}
switch (code) {
case lir_mul_strictfp: // fall through
case lir_div_strictfp: // fall through
default: ShouldNotReachHere();
}
}
}
if (left->is_single_stack()) {
} else if (left->is_address()) {
} else {
}
if (right->is_single_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else if (right->is_constant()) {
switch (code) {
case lir_add: {
break;
}
case lir_sub: {
break;
}
default: ShouldNotReachHere();
}
} else {
}
} else {
}
}
void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
switch (code) {
case lir_add:
break;
case lir_sub:
if (left_is_tos) {
} else {
}
break;
case lir_mul_strictfp: // fall through
case lir_mul:
break;
case lir_div_strictfp: // fall through
case lir_div:
if (left_is_tos) {
} else {
}
break;
case lir_rem:
assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
break;
default:
}
}
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
if (value->is_double_xmm()) {
switch(code) {
case lir_abs :
{
}
}
break;
// all other intrinsics are not available in the SSE instruction set, so FPU is used
default : ShouldNotReachHere();
}
} else if (value->is_double_fpu()) {
switch(code) {
case lir_sin :
// Should consider not saving rbx, if not necessary
break;
case lir_cos :
// Should consider not saving rbx, if not necessary
break;
case lir_tan :
// Should consider not saving rbx, if not necessary
break;
case lir_exp :
break;
case lir_pow :
break;
default : ShouldNotReachHere();
}
} else {
}
}
// assert(left->destroys_register(), "check");
if (left->is_single_cpu()) {
if (right->is_constant()) {
switch (code) {
default: ShouldNotReachHere();
}
// added support for stack operands
switch (code) {
default: ShouldNotReachHere();
}
} else {
switch (code) {
default: ShouldNotReachHere();
}
}
} else {
if (right->is_constant()) {
#ifdef _LP64
switch (code) {
case lir_logic_and:
break;
case lir_logic_or:
break;
case lir_logic_xor:
break;
default: ShouldNotReachHere();
}
#else
switch (code) {
case lir_logic_and:
break;
case lir_logic_or:
break;
case lir_logic_xor:
break;
default: ShouldNotReachHere();
}
#endif // _LP64
} else {
#ifdef _LP64
} else {
}
#else
#endif
switch (code) {
case lir_logic_and:
break;
case lir_logic_or:
break;
case lir_logic_xor:
break;
default: ShouldNotReachHere();
}
}
#ifdef _LP64
#else
} else {
}
#endif // _LP64
}
}
// we assume that rax, and rdx can be overwritten
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
// assert(left->destroys_register(), "check");
// assert(right->destroys_register(), "check");
if (right->is_constant()) {
if (divisor == 2) {
} else {
}
} else {
}
} else {
} else {
}
}
}
if (opr1->is_single_cpu()) {
if (opr2->is_single_cpu()) {
// cpu register - cpu register
} else {
}
// cpu register - stack
} else {
}
} else if (opr2->is_constant()) {
// cpu register - constant
// In 64bit oops are single register
jobject o = c->as_jobject();
if (o == NULL) {
} else {
#ifdef _LP64
#else
#endif // _LP64
}
} else {
}
// cpu register - address
} else if (opr2->is_address()) {
}
} else {
}
} else if(opr1->is_double_cpu()) {
if (opr2->is_double_cpu()) {
#ifdef _LP64
#else
// cpu register - cpu register
}
#endif // _LP64
} else if (opr2->is_constant()) {
// cpu register - constant 0
#ifdef _LP64
#else
#endif // _LP64
} else {
}
} else if (opr1->is_single_xmm()) {
if (opr2->is_single_xmm()) {
// xmm register - xmm register
// xmm register - stack
} else if (opr2->is_constant()) {
// xmm register - constant
} else if (opr2->is_address()) {
// xmm register - address
}
} else {
}
} else if (opr1->is_double_xmm()) {
if (opr2->is_double_xmm()) {
// xmm register - xmm register
// xmm register - stack
} else if (opr2->is_constant()) {
// xmm register - constant
} else if (opr2->is_address()) {
// xmm register - address
}
} else {
}
assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
#ifdef _LP64
}
#endif // LP64
}
// special case: address - constant
#ifdef _LP64
// %%% Make this explode if addr isn't reachable until we figure out a
// better strategy by giving noreg as the temp for as_Address
#else
#endif // _LP64
} else {
}
} else {
}
}
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
if (left->is_single_xmm()) {
__ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
} else if (left->is_double_xmm()) {
__ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
} else {
}
} else {
#ifdef _LP64
#else
left->as_register_lo(),
right->as_register_hi(),
right->as_register_lo());
#endif // _LP64
}
}
// make sure that the displacement word of the call ends up word aligned
switch (code) {
case lir_static_call:
case lir_optvirtual_call:
case lir_dynamic_call:
break;
case lir_icvirtual_call:
break;
case lir_virtual_call: // currently, sparc-specific for niagara
default: ShouldNotReachHere();
}
while (offset++ % BytesPerWord != 0) {
}
}
}
"must be aligned");
}
"must be aligned");
}
/* Currently, vtable-dispatch is only enabled for sparc platforms */
}
bailout("static call stub overflow");
return;
}
// make sure that the displacement word of the call ends up word aligned
while (offset++ % BytesPerWord != 0) {
}
}
// must be set to -1 at code generation time
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
__ end_a_stub();
}
// exception object is not added to oop map by LinearScan
// (LinearScan assumes that no oops are in fixed registers)
// get current pc information
// pc is only needed if the method has an exception handler, the unwind code does not need it.
// search an exception handler (rax: exception oop, rdx: throwing pc)
if (compilation()->has_fpu_code()) {
} else {
}
// enough room for two byte trap
}
}
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
// optimized version for linear scan:
// * count must be already in ECX (guaranteed by LinearScan)
// * left and dest must be equal
// * tmp must be unused
if (left->is_single_cpu()) {
switch (code) {
default: ShouldNotReachHere();
}
} else if (left->is_double_cpu()) {
#ifdef _LP64
switch (code) {
default: ShouldNotReachHere();
}
#else
switch (code) {
default: ShouldNotReachHere();
}
#endif // LP64
} else {
}
}
if (dest->is_single_cpu()) {
// first move left into dest so that left is not destroyed by the shift
switch (code) {
default: ShouldNotReachHere();
}
} else if (dest->is_double_cpu()) {
#ifndef _LP64
#else
// first move left into dest so that left is not destroyed by the shift
switch (code) {
default: ShouldNotReachHere();
}
#endif // _LP64
} else {
}
}
}
}
}
// This code replaces a call to arraycopy; no exception may
// be thrown in this code, they must be thrown in the System.arraycopy
// activation frame; we could save some checks if this would not be the case
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
// if we don't know anything, just go through the generic arraycopy
if (default_type == NULL) {
// save outgoing arguments on stack in case call to System.arraycopy is needed
// HACK ALERT. This code used to push the parameters in a hardwired fashion
// for interpreter calling conventions. Now we have to do it in new style conventions.
// For the moment until C1 gets the new register allocator I just force all the
// args to the right place (except the register args) and then on the back side
// reload the register args properly if we go slow path. Yuck
// These are proper for the calling convention
store_parameter(dst, 0);
// these are just temporary placements until we need to reload
// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
#ifdef _LP64
// The arguments are in java calling convention so we can trivially shift them to C
// convention
#ifdef _WIN64
// Allocate abi space for args but be sure to keep stack aligned
} else {
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
}
#else
} else {
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
}
#endif // _WIN64
#else
} else {
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
}
#endif // _LP64
if (copyfunc_addr != NULL) {
}
// Reload values from the stack so they are where the stub
// expects them.
if (copyfunc_addr != NULL) {
}
return;
}
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
int shift_amount;
switch (elem_size) {
case 1 :
shift_amount = 0;
break;
case 2 :
shift_amount = 1;
break;
case 4 :
shift_amount = 2;
break;
case 8 :
shift_amount = 3;
break;
default:
}
// length and pos's are all sign extended at this point on 64bit
// test for NULL
}
}
// check if negative
}
}
}
}
}
#ifdef _LP64
#endif
// We don't know the array types are compatible
if (basic_type != T_OBJECT) {
// Simple test for basic type arrays
if (UseCompressedOops) {
} else {
}
} else {
// For object arrays, if src is a sub class of dst then we can
// safely do the copy.
// src is not a sub class of dst so we have to do a
// per-element check.
// Check that at least both of them object arrays.
}
}
// Spill because stubs can use any register they like and it's
// easier to restore just those that we care about.
store_parameter(dst, 0);
#ifndef _LP64
#else
#ifdef _WIN64
// Allocate abi space for args but be sure to keep stack aligned
#else
#endif
#endif
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
// Restore previously spilled arguments
}
}
}
#ifdef ASSERT
// Sanity check the known type with the incoming class. For the
// primitive case the types must match exactly with src.klass and
// dst.klass each exactly matching the default type. For the
// object array case, if no type check is needed then either the
// dst type is exactly the expected type and the src type is a
// subtype which we can't check or src is the same array as dst
// but not necessarily exactly of type default_type.
#ifdef _LP64
if (UseCompressedOops) {
}
#endif
if (basic_type != T_OBJECT) {
} else {
}
}
#endif
#ifndef PRODUCT
if (PrintC1Statistics) {
}
#endif
#ifdef _LP64
#else
store_parameter(tmp, 0);
#endif // _LP64
const char *name;
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
}
if (!UseFastLocking) {
if (UseBiasedLocking) {
}
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
}
// done
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
} else {
}
}
// Update counter for all call types
// Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes
!callee_is_static && // required for optimized MH invokes
// We know the type that will be seen at this call site; we can
// statically update the methodDataOop rather than needing to do
// dynamic tests on the receiver type
// NOTE: we should probably put a lock around this search to
// avoid collisions by concurrent compilations
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
return;
}
}
// Receiver type not found in profile data; select an empty slot
// Note that this is less efficient than it should be because it
// always does a write to the receiver part of the
// VirtualCallData rather than just the first time
for (i = 0; i < VirtualCallData::row_limit(); i++) {
return;
}
}
} else {
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
}
} else {
// Static call
}
}
}
}
}
if (left->is_single_cpu()) {
} else if (left->is_double_cpu()) {
#ifdef _LP64
#else
} else {
}
#endif // _LP64
} else if (dest->is_single_xmm()) {
}
} else if (dest->is_double_xmm()) {
}
} else {
}
}
}
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
}
}
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
}
if (src->is_double_xmm()) {
if (dest->is_double_cpu()) {
#ifdef _LP64
#else
#endif // _LP64
} else if (dest->is_double_stack()) {
} else if (dest->is_address()) {
} else {
}
} else if (dest->is_double_xmm()) {
if (src->is_double_stack()) {
} else if (src->is_address()) {
} else {
}
} else if (src->is_double_fpu()) {
if (dest->is_double_stack()) {
} else if (dest->is_address()) {
} else {
}
} else if (dest->is_double_fpu()) {
if (src->is_double_stack()) {
} else if (src->is_address()) {
} else {
}
} else {
}
}
// QQQ sparc TSO uses this,
}
// No x86 machines currently require load fences
// __ load_fence();
}
// No x86 machines currently require store fences
// __ store_fence();
}
// no-op
//__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
}
// no-op
//__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
}
// no-op
//__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
}
}
#ifdef _LP64
// __ get_thread(result_reg->as_register_lo());
#else
#endif // _LP64
}
// do nothing for now
}
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
}
} else {
}
#ifdef _LP64
if (UseCompressedOops) {
} else {
}
#else
#endif
#ifdef _LP64
}
} else {
}
#else
#endif
} else {
}
}