assembler_x86.cpp revision 710
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_assembler_x86.cpp.incl"
// Implementation of AddressLiteral
_is_lval = false;
switch (rtype) {
// Oops are a special case. Normally they would be their own section
// but in cases like icBuffer they are literals in the code stream that
// we don't have a section for. We use none so that we get a literal address
// which is always patchable.
break;
case relocInfo::external_word_type:
break;
case relocInfo::internal_word_type:
break;
case relocInfo::opt_virtual_call_type:
break;
case relocInfo::static_call_type:
break;
case relocInfo::runtime_call_type:
break;
case relocInfo::poll_return_type:
break;
break;
default:
break;
}
}
// Implementation of Address
#ifdef _LP64
// Not implementable on 64bit machines
// Should have been handled higher up the call chain.
return Address();
}
// exceedingly dangerous constructor
switch (rtype) {
case relocInfo::external_word_type:
break;
case relocInfo::internal_word_type:
break;
case relocInfo::runtime_call_type:
// HMM
break;
case relocInfo::poll_return_type:
break;
break;
default:
}
}
#else // LP64
return array;
}
// exceedingly dangerous constructor
}
#endif // _LP64
// Convert the raw encoding form into the form expected by the constructor for
// Address. An index of 4 (rsp) corresponds to having no index, so convert
// that to noreg for the Address constructor.
if (disp_is_oop) {
}
if (valid_index) {
Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
return madr;
} else {
return madr;
}
}
// Implementation of Assembler
int AbstractAssembler::code_fill_byte() {
}
// make this go away someday
}
#ifdef ASSERT
#endif
// Do not use AbstractAssembler::relocate, which is not intended for
// embedded words. Instead, relocate to the enclosing instruction.
// hack. call32 is too wide for mask so use disp32
if (format == call32_operand)
else
}
}
if (enc >= 8) {
enc -= 8;
}
return enc;
}
static int encode(XMMRegister r) {
if (enc >= 8) {
enc -= 8;
}
return enc;
}
}
} else {
}
}
// immediate-to-memory forms
} else {
}
}
InstructionMark im(this);
}
}
RelocationHolder const& rspec,
int rip_relative_correction) {
// Encode the registers as needed in the fields they are used in
// [base + index*scale + disp]
// [base + index*scale]
// [00 reg 100][ss index base]
// [base + index*scale + imm8]
// [01 reg 100][ss index base] imm8
} else {
// [base + index*scale + disp32]
// [10 reg 100][ss index base] disp32
}
// [rsp + disp]
// [rsp]
// [00 reg 100][00 100 100]
emit_byte(0x24);
// [rsp + imm8]
// [01 reg 100][00 100 100] disp8
emit_byte(0x24);
} else {
// [rsp + imm32]
// [10 reg 100][00 100 100] disp32
emit_byte(0x24);
}
} else {
// [base + disp]
// [base]
// [00 reg base]
// [base + disp8]
// [01 reg base] disp8
} else {
// [base + disp32]
// [10 reg base] disp32
}
}
} else {
// [index*scale + disp]
// [00 reg 100][ss index 101] disp32
// [disp] (64bit) RIP-RELATIVE (32bit) abs
// [00 000 101] disp32
// Note that the RIP-rel. correction applies to the generated
// disp field, but _not_ to the target address in the rspec.
// disp was created by converting the target address minus the pc
// at the start of the instruction. That needs more correction here.
// intptr_t disp = target - next_ip;
// Do rip-rel adjustment for 64bit
"must be 32bit offset (RIP relative address)");
} else {
// [disp] ABSOLUTE
// [00 reg 100][00 100 101] disp32
emit_byte(0x25);
}
}
}
RelocationHolder const& rspec) {
}
// Secret local extension to Assembler::WhichOperand:
#define end_pc_operand (_WhichOperand_limit)
// Decode the given instruction, and return the address of
// an embedded 32-bit operand word.
// If "which" is disp32_operand, selects the displacement portion
// of an effective address specifier.
// If "which" is imm64_operand, selects the trailing immediate constant.
// If "which" is call32_operand, selects the displacement of a call or jump.
// Caller is responsible for ensuring that there is such an operand,
// and that it is 32/64 bits wide.
// If "which" is end_pc_operand, find the end of the instruction.
bool is_64bit = false;
debug_only(bool has_disp32 = false);
int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
switch (0xFF & *ip++) {
// These convenience macros generate groups of "case" labels for the switch.
case (x)+4: case (x)+5: case (x)+6: case (x)+7
case REP8((x)+8)
case CS_segment:
case SS_segment:
case DS_segment:
case ES_segment:
case FS_segment:
case GS_segment:
// Seems dubious
goto again_after_prefix;
case 0x67:
case REX:
case REX_B:
case REX_X:
case REX_XB:
case REX_R:
case REX_RB:
case REX_RX:
case REX_RXB:
goto again_after_prefix;
case REX_W:
case REX_WB:
case REX_WX:
case REX_WXB:
case REX_WR:
case REX_WRB:
case REX_WRX:
case REX_WRXB:
is_64bit = true;
goto again_after_prefix;
case 0xFF: // pushq a; decl a; incl a; call a; jmp a
case 0x88: // movb a, r
case 0x89: // movl a, r
case 0x8A: // movb r, a
case 0x8B: // movl r, a
case 0x8F: // popl a
debug_only(has_disp32 = true);
break;
case 0x68: // pushq #32
if (which == end_pc_operand) {
return ip + 4;
}
return ip; // not produced by emit_operand
case 0x66: // movw ... (size prefix)
switch (0xFF & *ip++) {
case REX:
case REX_B:
case REX_X:
case REX_XB:
case REX_R:
case REX_RB:
case REX_RX:
case REX_RXB:
case REX_W:
case REX_WB:
case REX_WX:
case REX_WXB:
case REX_WR:
case REX_WRB:
case REX_WRX:
case REX_WRXB:
goto again_after_size_prefix2;
case 0x8B: // movw r, a
case 0x89: // movw a, r
debug_only(has_disp32 = true);
break;
case 0xC7: // movw a, #16
debug_only(has_disp32 = true);
break;
ip--; // reparse the 0x0F
goto again_after_prefix;
default:
}
break;
// these asserts are somewhat nonsensical
#ifndef _LP64
#else
#endif // _LP64
return ip;
case 0x69: // imul r, a, #32
case 0xC7: // movl a, #32(oop?)
tail_size = 4;
break;
case 0x0F: // movx..., etc.
switch (0xFF & *ip++) {
case 0x12: // movlps
case 0x28: // movaps
case 0x2E: // ucomiss
case 0x2F: // comiss
case 0x54: // andps
case 0x55: // andnps
case 0x56: // orps
case 0x57: // xorps
case 0x6E: // movd
case 0x7E: // movd
case 0xAE: // ldmxcsr a
// 64bit side says it these have both operands but that doesn't
// appear to be true
debug_only(has_disp32 = true);
break;
case 0xAD: // shrd r, a, %cl
case 0xAF: // imul r, a
case 0xBE: // movsbl r, a (movsxb)
case 0xBF: // movswl r, a (movsxw)
case 0xB6: // movzbl r, a (movzxb)
case 0xB7: // movzwl r, a (movzxw)
case 0xB0: // cmpxchgb
case 0xB1: // cmpxchg
case 0xC1: // xaddl
case 0xC7: // cmpxchg8
debug_only(has_disp32 = true);
// fall out of the switch to decode the address
break;
case 0xAC: // shrd r, a, #8
debug_only(has_disp32 = true);
break;
return ip;
default:
}
break;
case 0x81: // addl a, #32; addl r, #32
// also: orl, adcl, sbbl, andl, subl, xorl, cmpl
// on 32bit in the case of cmpl, the imm might be an oop
tail_size = 4;
break;
case 0x83: // addl a, #8; addl r, #8
// also: orl, adcl, sbbl, andl, subl, xorl, cmpl
tail_size = 1;
break;
case 0x9B:
switch (0xFF & *ip++) {
case 0xD9: // fnstcw a
debug_only(has_disp32 = true);
break;
default:
}
break;
case 0xF7: // mull a
case 0x8D: // lea r, a
case 0x87: // xchg r, a
case 0x85: // test r, a
break;
case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
case 0xC6: // movb a, #8
case 0x80: // cmpb a, #8
case 0x6B: // imul r, a, #8
break;
case 0xE8: // call rdisp32
case 0xE9: // jmp rdisp32
return ip;
case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
case 0xDD: // fld_d a; fst_d a; fstp_d a
case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
case 0xDF: // fild_d a; fistp_d a
case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
debug_only(has_disp32 = true);
break;
case 0xF0: // Lock
goto again_after_prefix;
case 0xF3: // For SSE
case 0xF2: // For SSE2
switch (0xFF & *ip++) {
case REX:
case REX_B:
case REX_X:
case REX_XB:
case REX_R:
case REX_RB:
case REX_RX:
case REX_RXB:
case REX_W:
case REX_WB:
case REX_WX:
case REX_WXB:
case REX_WR:
case REX_WRB:
case REX_WRX:
case REX_WRXB:
ip++;
default:
ip++;
}
break;
default:
}
#ifdef _LP64
#else
// assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
#endif // LP64
// parse the output of emit_operand
int op3 = -1;
const int b100 = 4;
const int b101 = 5;
}
// now ip points at the disp (if any)
switch (op2 >> 6) {
case 0:
// [00 reg 100][ss index base]
// [00 reg 100][00 100 esp]
// [00 reg base]
// [00 reg 100][ss index 101][disp32]
// [00 reg 101] [disp32]
if (which == disp32_operand)
return ip; // caller wants the disp32
}
break;
case 1:
// [01 reg 100][ss index base][disp8]
// [01 reg 100][00 100 esp][disp8]
// [01 reg base] [disp8]
break;
case 2:
// [10 reg 100][ss index base][disp32]
// [10 reg 100][00 100 esp][disp32]
// [10 reg base] [disp32]
if (which == disp32_operand)
return ip; // caller wants the disp32
break;
case 3:
// [11 reg base] (not a memory addressing mode)
break;
}
if (which == end_pc_operand) {
}
#ifdef _LP64
#else
#endif // LP64
return ip;
}
// Secretly share code with locate_operand:
}
#ifdef ASSERT
return;
// assert(format == imm32_operand, "cannot specify a nonzero format");
} else if (r->is_data()) {
} else {
return;
}
}
#endif // ASSERT
}
int rip_relative_correction) {
}
}
// MMX operations
}
// work around gcc (3.2.1-7a) bug
}
}
// Now the Assembler instruction (identical for 32/64 bits)
}
InstructionMark im(this);
emit_byte(0x13);
}
}
InstructionMark im(this);
}
InstructionMark im(this);
emit_byte(0x01);
}
}
InstructionMark im(this);
emit_byte(0x03);
}
}
void Assembler::addr_nop_4() {
// 4 bytes: NOP DWORD PTR [EAX+0]
emit_byte(0x0F);
emit_byte(0x1F);
emit_byte(0); // 8-bits offset (1 byte)
}
void Assembler::addr_nop_5() {
// 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
emit_byte(0x0F);
emit_byte(0x1F);
emit_byte(0); // 8-bits offset (1 byte)
}
void Assembler::addr_nop_7() {
// 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
emit_byte(0x0F);
emit_byte(0x1F);
emit_long(0); // 32-bits offset (4 bytes)
}
void Assembler::addr_nop_8() {
// 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
emit_byte(0x0F);
emit_byte(0x1F);
emit_long(0); // 32-bits offset (4 bytes)
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x58);
}
InstructionMark im(this);
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x58);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x58);
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x58);
}
}
InstructionMark im(this);
emit_byte(0x23);
}
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x54);
}
emit_byte(0x0F);
}
// suspect disp32 is always good
if (L.is_bound()) {
const int long_size = 5;
InstructionMark im(this);
// 1110 1000 #32-bit disp
emit_byte(0xE8);
} else {
InstructionMark im(this);
// 1110 1000 #32-bit disp
emit_byte(0xE8);
}
}
// This was originally using a 32bit register encoding
// and surely we want 64bit!
// this is a 32bit encoding but in 64bit mode the default
// operand size is 64bit so there is no need for the
// wide prefix. So prefix only happens if we use the
int x = offset();
// this may be true but dbx disassembles it as if it
// were 32bits...
// int encode = prefix_and_encode(dst->encoding());
// if (offset() != x) assert(dst->encoding() >= 8, "what?");
emit_byte(0xFF);
}
InstructionMark im(this);
emit_byte(0xFF);
}
InstructionMark im(this);
emit_byte(0xE8);
// Technically, should use call32_operand, but this format is
// implied by the fact that we're emitting a call instruction.
}
emit_byte(0x99);
}
emit_byte(0x0F);
}
emit_byte(0x0F);
}
InstructionMark im(this);
emit_byte(0x80);
}
InstructionMark im(this);
emit_byte(0x81);
}
}
}
InstructionMark im(this);
emit_byte(0x3B);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x81);
}
// The 32-bit cmpxchg compares the value at adr with the contents of rax,
// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
// The ZF is set if the compared values were equal, and cleared otherwise.
if (Atomics & 2) {
// caveat: no instructionmark, so this isn't relocatable.
// Emit a synthetic, non-atomic, CAS equivalent.
// Beware. The synthetic form sets all ICCs, not just ZF.
// cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r)
Label L ;
bind(L);
}
} else {
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xB1);
}
}
// NOTE: dbx seems to decode this as comiss even though the
// 0x66 is there. Strangly ucomisd comes out correct
emit_byte(0x66);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0x2F);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0xE6);
}
emit_byte(0x0F);
emit_byte(0x5B);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x5A);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x2A);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x2A);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x5A);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x2C);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x2C);
}
// Don't use it directly. Use MacroAssembler::decrement() instead.
InstructionMark im(this);
emit_byte(0xFF);
}
InstructionMark im(this);
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x5E);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x5E);
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x5E);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x5E);
}
emit_byte(0x0F);
emit_byte(0x77);
}
emit_byte(0xF4);
}
emit_byte(0xF7);
}
emit_byte(0x0F);
emit_byte(0xAF);
}
emit_byte(0x6B);
} else {
emit_byte(0x69);
}
}
// Don't use it directly. Use MacroAssembler::increment() instead.
InstructionMark im(this);
emit_byte(0xFF);
}
InstructionMark im(this);
if (L.is_bound()) {
const int short_size = 2;
const int long_size = 6;
// 0111 tttn #8-bit disp
} else {
// 0000 1111 1000 tttn #32-bit disp
"must be 32bit offset (call4)");
emit_byte(0x0F);
}
} else {
// Note: could eliminate cond. jumps to this jump if condition
// is the same however, seems to be rather unlikely case.
// Note: use jccb() if label to be bound is very close to get
// an 8-bit displacement
emit_byte(0x0F);
emit_long(0);
}
}
if (L.is_bound()) {
const int short_size = 2;
"Dispacement too large for a short jmp");
// 0111 tttn #8-bit disp
} else {
InstructionMark im(this);
emit_byte(0);
}
}
InstructionMark im(this);
emit_byte(0xFF);
}
if (L.is_bound()) {
InstructionMark im(this);
const int short_size = 2;
const int long_size = 5;
emit_byte(0xEB);
} else {
emit_byte(0xE9);
}
} else {
// By default, forward jumps are always 32-bit displacements, since
// we can't yet know where the label will be bound. If you're sure that
// the forward jump will not run beyond 256 bytes, use jmpb to
// force an 8-bit displacement.
InstructionMark im(this);
emit_byte(0xE9);
emit_long(0);
}
}
emit_byte(0xFF);
}
InstructionMark im(this);
emit_byte(0xE9);
}
if (L.is_bound()) {
const int short_size = 2;
"Dispacement too large for a short jmp");
emit_byte(0xEB);
} else {
InstructionMark im(this);
emit_byte(0xEB);
emit_byte(0);
}
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xAE);
}
InstructionMark im(this);
#ifdef _LP64
#endif // LP64
emit_byte(0x8D);
}
if (Atomics & 1) {
// Emit either nothing, a NOP, or a NOP: prefix
emit_byte(0x90) ;
} else {
emit_byte(0xF0);
}
}
// Emit mfence instruction
emit_byte( 0x0F );
emit_byte( 0xAE );
emit_byte( 0xF0 );
}
}
emit_byte(0x66);
if (dstenc < 8) {
if (srcenc >= 8) {
srcenc -= 8;
}
} else {
if (srcenc < 8) {
} else {
srcenc -= 8;
}
dstenc -= 8;
}
emit_byte(0x0F);
emit_byte(0x28);
}
if (dstenc < 8) {
if (srcenc >= 8) {
srcenc -= 8;
}
} else {
if (srcenc < 8) {
} else {
srcenc -= 8;
}
dstenc -= 8;
}
emit_byte(0x0F);
emit_byte(0x28);
}
InstructionMark im(this);
emit_byte(0x8A);
}
InstructionMark im(this);
emit_byte(0xC6);
}
InstructionMark im(this);
emit_byte(0x88);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x6E);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x7E);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x6F);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x6F);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x7F);
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x6F);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x6F);
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x7F);
}
// Uses zero extension on 64bit
}
emit_byte(0x8B);
}
InstructionMark im(this);
emit_byte(0x8B);
}
InstructionMark im(this);
emit_byte(0xC7);
}
InstructionMark im(this);
emit_byte(0x89);
}
// New cpus require to use movsd and movss to avoid partial register stall
// when loading from memory. But for old Opteron use movlpd instead of movsd.
// The selection is done in MacroAssembler::movdbl() and movflt().
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x12);
}
emit_byte(0x0F);
emit_byte(0x6F);
}
emit_byte(0x0F);
emit_byte(0x7F);
// workaround gcc (3.2.1-7a) bug
// In that version of gcc with only an emit_operand(MMX, Address)
// gcc will tail jump and try and reverse the parameters completely
// obliterating dst in the process. By having a version available
// that doesn't need to swap the args at the tail jump the bug is
// avoided.
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x7E);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0xD6);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xBE);
}
emit_byte(0x0F);
emit_byte(0xBE);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x10);
}
InstructionMark im(this);
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x10);
}
InstructionMark im(this);
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x11);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x10);
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x10);
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x11);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xBF);
}
emit_byte(0x0F);
emit_byte(0xBF);
}
InstructionMark im(this);
emit_byte(0xC7);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x8B);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x89);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xB6);
}
emit_byte(0x0F);
emit_byte(0xB6);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xB7);
}
emit_byte(0x0F);
emit_byte(0xB7);
}
InstructionMark im(this);
emit_byte(0xF7);
}
emit_byte(0xF7);
}
InstructionMark im(this);
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x59);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x59);
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x59);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x59);
}
emit_byte(0xF7);
}
#ifdef ASSERT
assert(i > 0, " ");
// The fancy nops aren't currently recognized by debuggers making it a
// pain to disassemble code while debugging. If asserts are on clearly
// speed is not an issue so simply use the single byte traditional nop
// to do alignment.
for (; i > 0 ; i--) emit_byte(0x90);
return;
#endif // ASSERT
//
// Using multi-bytes nops "0x0F 0x1F [address]" for Intel
// 1: 0x90
// 2: 0x66 0x90
// 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
// 4: 0x0F 0x1F 0x40 0x00
// 5: 0x0F 0x1F 0x44 0x00 0x00
// 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
// 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// The rest coding is Intel specific - don't use consecutive address nops
// 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
// 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
while(i >= 15) {
// For Intel don't generate consecutive addess nops (mix with regular nops)
i -= 15;
addr_nop_8();
}
switch (i) {
case 14:
case 13:
case 12:
addr_nop_8();
break;
case 11:
case 10:
case 9:
case 8:
addr_nop_8();
break;
case 7:
addr_nop_7();
break;
case 6:
case 5:
addr_nop_5();
break;
case 4:
addr_nop_4();
break;
case 3:
// Don't use "0x0F 0x1F 0x00" - need patching safe padding
case 2:
case 1:
break;
default:
assert(i == 0, " ");
}
return;
}
//
// Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
// 1: 0x90
// 2: 0x66 0x90
// 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
// 4: 0x0F 0x1F 0x40 0x00
// 5: 0x0F 0x1F 0x44 0x00 0x00
// 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
// 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// The rest coding is AMD specific - use consecutive address nops
// 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
// 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
// 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
// 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
// Size prefixes (0x66) are added for larger sizes
while(i >= 22) {
i -= 11;
addr_nop_8();
}
// Generate first nop for size between 21-12
switch (i) {
case 21:
i -= 1;
case 20:
case 19:
i -= 1;
case 18:
case 17:
i -= 1;
case 16:
case 15:
i -= 8;
addr_nop_8();
break;
case 14:
case 13:
i -= 7;
addr_nop_7();
break;
case 12:
i -= 6;
addr_nop_5();
break;
default:
}
// Generate second nop for size between 11-1
switch (i) {
case 11:
case 10:
case 9:
case 8:
addr_nop_8();
break;
case 7:
addr_nop_7();
break;
case 6:
case 5:
addr_nop_5();
break;
case 4:
addr_nop_4();
break;
case 3:
// Don't use "0x0F 0x1F 0x00" - need patching safe padding
case 2:
case 1:
break;
default:
assert(i == 0, " ");
}
return;
}
// Using nops with size prefixes "0x66 0x90".
// From AMD Optimization Guide:
// 1: 0x90
// 2: 0x66 0x90
// 3: 0x66 0x66 0x90
// 4: 0x66 0x66 0x66 0x90
// 5: 0x66 0x66 0x90 0x66 0x90
// 6: 0x66 0x66 0x90 0x66 0x66 0x90
// 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
// 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
// 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
// 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
//
while(i > 12) {
i -= 4;
emit_byte(0x66);
emit_byte(0x66);
}
// 1 - 12 nops
if(i > 8) {
if(i > 9) {
i -= 1;
emit_byte(0x66);
}
i -= 3;
emit_byte(0x66);
emit_byte(0x66);
emit_byte(0x90);
}
// 1 - 8 nops
if(i > 4) {
if(i > 6) {
i -= 1;
emit_byte(0x66);
}
i -= 3;
emit_byte(0x66);
emit_byte(0x66);
emit_byte(0x90);
}
switch (i) {
case 4:
emit_byte(0x66);
case 3:
emit_byte(0x66);
case 2:
emit_byte(0x66);
case 1:
emit_byte(0x90);
break;
default:
assert(i == 0, " ");
}
}
emit_byte(0xF7);
}
InstructionMark im(this);
emit_byte(0x81);
}
}
InstructionMark im(this);
emit_byte(0x0B);
}
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x3A);
emit_byte(0x61);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x3A);
emit_byte(0x61);
}
// generic
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0xB8);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0xB8);
}
emit_byte(0x9D);
}
// NOTE: this will adjust stack by 8byte on 64bits
InstructionMark im(this);
emit_byte(0x8F);
}
emit_byte(0x0F);
}
InstructionMark im(this);
emit_byte(0x18);
}
InstructionMark im(this);
emit_byte(0x0D);
}
InstructionMark im(this);
emit_byte(0x18);
}
InstructionMark im(this);
emit_byte(0x18);
}
InstructionMark im(this);
emit_byte(0x18);
}
InstructionMark im(this);
emit_byte(0x0D);
}
a_byte(p);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x70);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x70);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x70);
}
InstructionMark im(this);
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x70);
}
// HMM Table D-1 says sse2 or mmx
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x73);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x38);
emit_byte(0x17);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x38);
emit_byte(0x17);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x60);
}
// in 64bits we push 64bits onto the stack but only
// take a 32bit immediate
emit_byte(0x68);
}
}
emit_byte(0x9C);
}
// Note this will push 64bit on 64bit
InstructionMark im(this);
emit_byte(0xFF);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0xEF);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0xEF);
}
if (imm8 == 1) {
emit_byte(0xD1);
} else {
emit_byte(0xC1);
}
}
// copies data from [esi] to [edi] using rcx pointer sized words
// generic
emit_byte(0xF3);
// MOVSQ
emit_byte(0xA5);
}
// sets rcx pointer sized words with rax, value at [edi]
// generic
emit_byte(0xF3);
// STOSQ
emit_byte(0xAB);
}
// scans rcx pointer sized words at [edi] for occurance of rax,
// generic
emit_byte(0xF2);
// SCASQ
emit_byte(0xAF);
}
#ifdef _LP64
// scans rcx 4 byte words at [edi] for occurance of rax,
// generic
emit_byte(0xF2);
// SCASL
emit_byte(0xAF);
}
#endif
if (imm16 == 0) {
emit_byte(0xC3);
} else {
emit_byte(0xC2);
}
}
#ifdef _LP64
// Not supported in 64bit mode
#endif
emit_byte(0x9E);
}
if (imm8 == 1) {
emit_byte(0xD1);
} else {
emit_byte(0xC1);
}
}
emit_byte(0xD3);
}
InstructionMark im(this);
}
}
InstructionMark im(this);
emit_byte(0x1B);
}
}
emit_byte(0x0F);
}
if (imm8 == 1 ) {
emit_byte(0xD1);
} else {
emit_byte(0xC1);
}
}
emit_byte(0xD3);
}
emit_byte(0xC1);
}
emit_byte(0xD3);
}
// copies a single word from [esi] to [edi]
emit_byte(0xA5);
}
// HMM Table D-1 says sse2
// NOT_LP64(assert(VM_Version::supports_sse(), ""));
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x51);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xAE);
}
InstructionMark im(this);
emit_byte(0x83);
} else {
emit_byte(0x81);
}
}
}
InstructionMark im(this);
emit_byte(0x29);
}
InstructionMark im(this);
emit_byte(0x2B);
}
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x5C);
}
InstructionMark im(this);
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x5C);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x5C);
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x5C);
}
}
// not using emit_arith because test
// doesn't support sign-extension of
// 8bit operands
if (encode == 0) {
emit_byte(0xA9);
} else {
emit_byte(0xF7);
}
}
}
InstructionMark im(this);
emit_byte(0x85);
}
emit_byte(0x66);
}
emit_byte(0x66);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0x2E);
}
emit_byte(0x0F);
emit_byte(0x2E);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xC1);
}
InstructionMark im(this);
emit_byte(0x87);
}
emit_byte(0x87);
}
}
InstructionMark im(this);
emit_byte(0x33);
}
}
emit_byte(0x66);
}
InstructionMark im(this);
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x57);
}
emit_byte(0x0F);
emit_byte(0x57);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0x57);
}
#ifndef _LP64
// 32bit only pieces of the assembler
// NO PREFIX AS NEVER 64BIT
InstructionMark im(this);
emit_byte(0x81);
}
// NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
InstructionMark im(this);
emit_byte(0x81);
}
// The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
// into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xc7);
}
// Don't use it directly. Use MacroAssembler::decrementl() instead.
}
#endif // _LP64
// 64bit typically doesn't use the x87 but needs to for the trig funcs
emit_byte(0xD9);
emit_byte(0xE1);
}
}
InstructionMark im(this);
emit_byte(0xDC);
}
InstructionMark im(this);
emit_byte(0xD8);
}
}
}
emit_byte(0xD9);
emit_byte(0xE0);
}
}
}
InstructionMark im(this);
emit_byte(0xDC);
}
InstructionMark im(this);
emit_byte(0xD8);
}
emit_byte(0xDE);
emit_byte(0xD9);
}
emit_byte(0xD9);
emit_byte(0xFF);
}
emit_byte(0xD9);
emit_byte(0xF6);
}
}
InstructionMark im(this);
emit_byte(0xDC);
}
InstructionMark im(this);
emit_byte(0xD8);
}
}
// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
// is erroneous for some of the floating-point instructions below.
}
}
InstructionMark im(this);
emit_byte(0xDC);
}
InstructionMark im(this);
emit_byte(0xD8);
}
}
}
}
InstructionMark im(this);
emit_byte(0xDF);
}
InstructionMark im(this);
emit_byte(0xDB);
}
emit_byte(0xD9);
emit_byte(0xF7);
}
emit_byte(0x9B);
emit_byte(0xDB);
emit_byte(0xE3);
}
InstructionMark im(this);
emit_byte(0xDB);
}
InstructionMark im(this);
emit_byte(0xDF);
}
InstructionMark im(this);
emit_byte(0xDB);
}
emit_byte(0xD9);
emit_byte(0xE8);
}
InstructionMark im(this);
emit_byte(0xDD);
}
InstructionMark im(this);
emit_byte(0xD9);
}
}
InstructionMark im(this);
emit_byte(0xDB);
}
InstructionMark im(this);
emit_byte(0xd9);
}
InstructionMark im(this);
emit_byte(0xD9);
}
emit_byte(0xD9);
emit_byte(0xEC);
}
emit_byte(0xD9);
emit_byte(0xED);
}
emit_byte(0xD9);
emit_byte(0xEE);
}
fldln2();
fxch();
fyl2x();
}
fldlg2();
fxch();
fyl2x();
}
}
InstructionMark im(this);
emit_byte(0xDC);
}
InstructionMark im(this);
emit_byte(0xD8);
}
}
}
InstructionMark im(this);
emit_byte(0xDD);
}
InstructionMark im(this);
emit_byte(0x9B);
emit_byte(0xD9);
}
emit_byte(0xdF);
emit_byte(0xE0);
}
emit_byte(0xD9);
emit_byte(0xF8);
}
emit_byte(0xD9);
emit_byte(0xF5);
}
InstructionMark im(this);
emit_byte(0xDD);
}
emit_byte(0xD9);
emit_byte(0xFE);
}
emit_byte(0xD9);
emit_byte(0xFA);
}
InstructionMark im(this);
emit_byte(0xDD);
}
InstructionMark im(this);
emit_byte(0xD9);
}
InstructionMark im(this);
emit_byte(0xDD);
}
}
InstructionMark im(this);
emit_byte(0xD9);
}
InstructionMark im(this);
emit_byte(0xDB);
}
}
InstructionMark im(this);
emit_byte(0xDC);
}
InstructionMark im(this);
emit_byte(0xD8);
}
}
}
}
InstructionMark im(this);
emit_byte(0xDC);
}
InstructionMark im(this);
emit_byte(0xD8);
}
}
}
emit_byte(0xD9);
emit_byte(0xF2);
emit_byte(0xDD);
emit_byte(0xD8);
}
emit_byte(0xD9);
emit_byte(0xE4);
}
// make sure the instruction is supported (introduced for P6, together with cmov)
}
// make sure the instruction is supported (introduced for P6, together with cmov)
}
emit_byte(0x9B);
}
}
emit_byte(0xD9);
emit_byte(0xF1);
}
#ifndef _LP64
// Don't use it directly. Use MacroAssembler::incrementl() instead.
}
}
InstructionMark im(this);
emit_byte(0xC7);
}
InstructionMark im(this);
}
emit_byte(0x61);
}
InstructionMark im(this);
emit_byte(0x68);
}
emit_byte(0x60);
}
emit_byte(0x0F);
emit_byte(0x95);
}
emit_byte(0x0F);
emit_byte(0xA5);
}
emit_byte(0x0F);
emit_byte(0xAD);
}
#else // LP64
// 64bit only pieces of the assembler
// This should only be used by 64bit instructions that can use rip-relative
// it cannot be used by instructions that want an immediate value.
// None will force a 64bit literal to the code stream. Likely a placeholder
// for something that will be patched later and we need to certain it will
// always be reachable.
return false;
}
// This should be rip relative and easily reachable.
return true;
}
// This should be rip relative within the code cache and easily
// reachable until we get huge code caches. (At which point
// ic code is going to have issues).
return true;
}
return false;
}
// Stress the correction code
if (ForceUnreachable) {
// Must be runtimecall reloc, see if it is in the codecache
// Flipping stuff in the codecache to be unreachable causes issues
// with things like inline caches where the additional instructions
// are not handled.
return false;
}
}
// For external_word_type/runtime_call_type if it is reachable from where we
// are now (possibly a temp buffer) and where we might end up
// anywhere in the codeCache then we are always reachable.
// to be more pessimistic.
// Because rip relative is a disp + address_of_next_instruction and we
// don't know the value of address_of_next_instruction we apply a fudge factor
// to make sure we will be ok no matter the size of the instruction we get placed into.
// We don't have to fudge the checks above here because they are already worst case.
// + 4 because better safe than sorry.
if (disp < 0) {
} else {
}
}
int format) {
} else {
}
}
RelocationHolder const& rspec,
int format) {
// Do not use AbstractAssembler::relocate, which is not intended for
// embedded words. Instead, relocate to the enclosing instruction.
#ifdef ASSERT
#endif
}
if (reg_enc >= 8) {
reg_enc -= 8;
}
return reg_enc;
}
if (reg_enc < 8) {
} else {
reg_enc -= 8;
}
return reg_enc;
}
if (dst_enc < 8) {
if (src_enc >= 8) {
src_enc -= 8;
}
} else {
if (src_enc < 8) {
} else {
src_enc -= 8;
}
dst_enc -= 8;
}
}
if (dst_enc < 8) {
if (src_enc < 8) {
} else {
src_enc -= 8;
}
} else {
if (src_enc < 8) {
} else {
src_enc -= 8;
}
dst_enc -= 8;
}
}
}
}
if (adr.base_needs_rex()) {
if (adr.index_needs_rex()) {
} else {
}
} else {
if (adr.index_needs_rex()) {
}
}
}
if (adr.base_needs_rex()) {
if (adr.index_needs_rex()) {
} else {
}
} else {
if (adr.index_needs_rex()) {
} else {
}
}
}
if (adr.base_needs_rex()) {
if (adr.index_needs_rex()) {
} else {
}
} else {
if (adr.index_needs_rex()) {
}
}
} else {
if (adr.base_needs_rex()) {
if (adr.index_needs_rex()) {
} else {
}
} else {
if (adr.index_needs_rex()) {
} else {
}
}
}
}
if (adr.base_needs_rex()) {
if (adr.index_needs_rex()) {
} else {
}
} else {
if (adr.index_needs_rex()) {
} else {
}
}
} else {
if (adr.base_needs_rex()) {
if (adr.index_needs_rex()) {
} else {
}
} else {
if (adr.index_needs_rex()) {
} else {
}
}
}
}
if (adr.base_needs_rex()) {
if (adr.index_needs_rex()) {
} else {
}
} else {
if (adr.index_needs_rex()) {
}
}
} else {
if (adr.base_needs_rex()) {
if (adr.index_needs_rex()) {
} else {
}
} else {
if (adr.index_needs_rex()) {
} else {
}
}
}
}
}
InstructionMark im(this);
emit_byte(0x13);
}
}
InstructionMark im(this);
}
InstructionMark im(this);
emit_byte(0x01);
}
}
InstructionMark im(this);
emit_byte(0x03);
}
}
}
InstructionMark im(this);
emit_byte(0x23);
}
}
emit_byte(0x0F);
}
emit_byte(0x99);
}
emit_byte(0x0F);
emit_byte(0xAE);
}
emit_byte(0x0F);
}
InstructionMark im(this);
emit_byte(0x0F);
}
InstructionMark im(this);
emit_byte(0x81);
}
}
InstructionMark im(this);
emit_byte(0x3B);
}
}
InstructionMark im(this);
emit_byte(0x3B);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xB1);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x2A);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x2A);
}
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x2C);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0x2C);
}
// Don't use it directly. Use MacroAssembler::decrementl() instead.
// Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
emit_byte(0xFF);
}
// Don't use it directly. Use MacroAssembler::decrementq() instead.
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
emit_byte(0xFF);
}
// Don't use it directly. Use MacroAssembler::decrementq() instead.
InstructionMark im(this);
emit_byte(0xFF);
}
emit_byte(0x0F);
emit_byte(0xAE);
}
emit_byte(0x0F);
emit_byte(0xAE);
}
emit_byte(0xF7);
}
emit_byte(0x0F);
emit_byte(0xAF);
}
emit_byte(0x6B);
} else {
emit_byte(0x69);
}
}
// Don't use it directly. Use MacroAssembler::incrementl() instead.
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
emit_byte(0xFF);
}
// Don't use it directly. Use MacroAssembler::incrementq() instead.
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
emit_byte(0xFF);
}
// Don't use it directly. Use MacroAssembler::incrementq() instead.
InstructionMark im(this);
emit_byte(0xFF);
}
}
InstructionMark im(this);
emit_byte(0x8D);
}
InstructionMark im(this);
}
InstructionMark im(this);
}
InstructionMark im(this);
}
InstructionMark im(this);
emit_byte(0xC7);
}
InstructionMark im(this);
emit_byte(0x81);
}
InstructionMark im(this);
emit_byte(0x81);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x6E);
}
emit_byte(0x66);
emit_byte(0x0F);
emit_byte(0x7E);
}
emit_byte(0x8B);
}
InstructionMark im(this);
emit_byte(0x8B);
}
InstructionMark im(this);
emit_byte(0x89);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xBE);
}
emit_byte(0x0F);
emit_byte(0xBE);
}
// dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
// and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
// as a result we shouldn't use until tested at runtime...
InstructionMark im(this);
}
InstructionMark im(this);
emit_byte(0xC7);
}
InstructionMark im(this);
emit_byte(0x63);
}
emit_byte(0x63);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xBF);
}
emit_byte(0x0F);
emit_byte(0xBF);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xB6);
}
emit_byte(0x0F);
emit_byte(0xB6);
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xB7);
}
emit_byte(0x0F);
emit_byte(0xB7);
}
emit_byte(0xF7);
}
emit_byte(0xF7);
}
InstructionMark im(this);
emit_byte(0x81);
}
}
InstructionMark im(this);
emit_byte(0x0B);
}
}
// skip rsp
}
InstructionMark im(this);
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0xB8);
}
emit_byte(0xF3);
emit_byte(0x0F);
emit_byte(0xB8);
}
InstructionMark im(this);
emit_byte(0x8F);
}
// we have to store original rsp. ABI says that 128 bytes
// below rsp are local scratch.
// skip rsp
}
InstructionMark im(this);
emit_byte(0xFF);
}
if (imm8 == 1) {
emit_byte(0xD1);
} else {
emit_byte(0xC1);
}
}
if (imm8 == 1) {
emit_byte(0xD1);
} else {
emit_byte(0xC1);
}
}
emit_byte(0xD3);
}
InstructionMark im(this);
}
}
InstructionMark im(this);
emit_byte(0x1B);
}
}
if (imm8 == 1) {
emit_byte(0xD1);
} else {
emit_byte(0xC1);
}
}
emit_byte(0xD3);
}
emit_byte(0xC1);
}
emit_byte(0xD3);
}
InstructionMark im(this);
emit_byte(0xF2);
emit_byte(0x0F);
emit_byte(0x51);
}
InstructionMark im(this);
emit_byte(0x83);
} else {
emit_byte(0x81);
}
}
}
InstructionMark im(this);
emit_byte(0x29);
}
InstructionMark im(this);
emit_byte(0x2B);
}
}
// not using emit_arith because test
// doesn't support sign-extension of
// 8bit operands
if (encode == 0) {
emit_byte(0xA9);
} else {
emit_byte(0xF7);
}
}
}
InstructionMark im(this);
emit_byte(0x0F);
emit_byte(0xC1);
}
InstructionMark im(this);
emit_byte(0x87);
}
emit_byte(0x87);
}
}
InstructionMark im(this);
emit_byte(0x33);
}
#endif // !LP64
};
// Implementation of MacroAssembler
// First all the versions that have distinct versions depending on 32/64 bit
// Unless the difference is trivial (1 line or so).
#ifndef _LP64
// 32bit versions
}
}
bool swap_reg_contains_mark,
bool need_tmp_reg = false;
need_tmp_reg = true;
} else {
}
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
// Biased locking
// See whether the lock is currently biased toward our thread and
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
// First check to see whether biasing is even enabled for this object
int null_check_offset = -1;
if (!swap_reg_contains_mark) {
null_check_offset = offset();
}
if (need_tmp_reg) {
}
if (need_tmp_reg) {
}
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
// Note that because there is no current thread register on x86 we
// need to store off the mark word we read out of the object to
// avoid reloading it and needing to recheck invariants below. This
// store is unfortunate but it makes the overall code shorter and
// simpler.
if (need_tmp_reg) {
}
if (swap_reg_contains_mark) {
null_check_offset = offset();
}
xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
if (need_tmp_reg) {
}
}
// At this point we know that the header has the bias pattern and
// that we are not the bias owner in the current epoch. We need to
// figure out more details about the state of the header in order to
// know what operations can be legally performed on the object's
// header.
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
// Biasing is still enabled for this data type. See whether the
// epoch of the current bias is still valid, meaning that the epoch
// bits of the mark word are equal to the epoch bits of the
// prototype header. (Note that the prototype header's epoch bits
// only change at a safepoint.) If not, attempt to rebias the object
// toward the current thread. Note that we must be absolutely sure
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
// The epoch of the current bias is still valid but we know nothing
// about the owner; it might be set or it might be clear. Try to
// acquire the bias of the object using an atomic operation. If this
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
if (need_tmp_reg) {
}
lock();
}
if (need_tmp_reg) {
}
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
// interpreter runtime in the slow case.
}
}
// At this point we know the epoch has expired, meaning that the
// current "bias owner", if any, is actually invalid. Under these
// circumstances _only_, we are allowed to use the current header's
// value as the comparison value when doing the cas to acquire the
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
if (need_tmp_reg) {
}
orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
lock();
}
if (need_tmp_reg) {
}
// If the biasing toward our thread failed, then another thread
// succeeded in biasing it toward itself and we need to revoke that
// bias. The revocation will occur in the runtime in the slow case.
}
}
// The prototype mark in the klass doesn't have the bias bit set any
// more, indicating that objects of this data type are not supposed
// to be biased any more. We are going to try to reset the mark of
// this object to the prototype value and fall through to the
// CAS-based locking scheme. Note that if our CAS fails, it means
// that another thread raced us for the privilege of revoking the
// bias of this particular object, so it's okay to continue in the
// normal locking code.
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
if (need_tmp_reg) {
}
movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
lock();
}
if (need_tmp_reg) {
}
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in
// removing the bias bit from the object's header.
}
return null_check_offset;
}
int number_of_arguments) {
}
}
}
// According to Intel Doc. AP-526, "Integer Divide", p.18.
cdql();
} else {
}
}
void MacroAssembler::fat_nop() {
// A 5 byte nop that is safe for patching (see patch_verified_entry)
emit_byte(0x90);
}
// set parity bit if FPU flag C2 is set (via rax)
sahf();
// branch
}
// set parity bit if FPU flag C2 is set (via rax)
sahf();
// branch
}
// 32bit can do a case table jump in one instruction but we no longer allow the base
// to be installed in the Address class
}
// Note: y_lo will be destroyed
// Long compare for Java (semantics as described in JVM spec.)
// x_hi is the return register
}
}
// leal(dst, as_Address(adr));
// see note in movl as to why we must use a move
}
void MacroAssembler::leave() {
}
// Multiplication of two Java long values stored on the stack
// as illustrated below. Result is in rdx:rax.
//
// rsp ---> [ ?? ] \ \
// .... | y_rsp_offset |
// [ y_lo ] / (in bytes) | x_rsp_offset
// [ y_hi ] | (in bytes)
// .... |
// [ x_lo ] /
// [ x_hi ]
// ....
//
// Basic idea: lo(result) = lo(x_lo * y_lo)
// hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
// load x_hi, y_hi and check if quick
// multiplication is possible
// do full multiplication
// 1st step
// 2nd step
// 3rd step
}
}
// Java shift left long support (semantics as described in JVM spec., p.305)
// (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
// shift value is in rcx !
const int n = BitsPerWord;
Label L;
cmpl(s, n); // if (s < n)
// Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
bind(L); // s (mod n) < n
}
// Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
// (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
const int n = BitsPerWord;
Label L;
cmpl(s, n); // if (s < n)
// Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
bind(L); // s (mod n) < n
}
}
}
} else {
}
}
}
}
// src should NEVER be a real pointer. Use AddressLiteral for true pointers
}
}
void MacroAssembler::pop_callee_saved_registers() {
}
void MacroAssembler::pop_fTOS() {
}
void MacroAssembler::push_callee_saved_registers() {
}
void MacroAssembler::push_fTOS() {
}
}
} else {
}
}
}
}
}
}
}
#ifndef PRODUCT
#endif
void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
// In order to get locks to work, we need to fake a in_VM state
if (ShowMessageBoxOnError) {
BytecodeCounter::print();
}
// To see where a verify_oop failed, get $ebx+40/X for this frame.
// This is the value of eip which points to where verify_oop will return.
#ifndef PRODUCT
#endif
}
} else {
assert(false, "DEBUG MESSAGE");
}
}
// push address of message
pusha(); // push registers
hlt();
}
// push address of message
}
#else // _LP64
// 64 bit versions
// amd64 always does this as a pc-rel
// we can be absolute or disp based on the instruction type
}
return array;
}
bool swap_reg_contains_mark,
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
// Biased locking
// See whether the lock is currently biased toward our thread and
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
// First check to see whether biasing is even enabled for this object
int null_check_offset = -1;
if (!swap_reg_contains_mark) {
null_check_offset = offset();
}
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
}
// At this point we know that the header has the bias pattern and
// that we are not the bias owner in the current epoch. We need to
// figure out more details about the state of the header in order to
// know what operations can be legally performed on the object's
// header.
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
// Biasing is still enabled for this data type. See whether the
// epoch of the current bias is still valid, meaning that the epoch
// bits of the mark word are equal to the epoch bits of the
// prototype header. (Note that the prototype header's epoch bits
// only change at a safepoint.) If not, attempt to rebias the object
// toward the current thread. Note that we must be absolutely sure
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
// The epoch of the current bias is still valid but we know nothing
// about the owner; it might be set or it might be clear. Try to
// acquire the bias of the object using an atomic operation. If this
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
lock();
}
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
// interpreter runtime in the slow case.
}
}
// At this point we know the epoch has expired, meaning that the
// current "bias owner", if any, is actually invalid. Under these
// circumstances _only_, we are allowed to use the current header's
// value as the comparison value when doing the cas to acquire the
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
lock();
}
// If the biasing toward our thread failed, then another thread
// succeeded in biasing it toward itself and we need to revoke that
// bias. The revocation will occur in the runtime in the slow case.
}
}
// The prototype mark in the klass doesn't have the bias bit set any
// more, indicating that objects of this data type are not supposed
// to be biased any more. We are going to try to reset the mark of
// this object to the prototype value and fall through to the
// CAS-based locking scheme. Note that if our CAS fails, it means
// that another thread raced us for the privilege of revoking the
// bias of this particular object, so it's okay to continue in the
// normal locking code.
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
lock();
}
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in
// removing the bias bit from the object's header.
}
return null_check_offset;
}
Label L, E;
#ifdef _WIN64
// Windows always allocates space for it's register args
#endif
// Align stack if necessary
{
}
jmp(E);
bind(L);
{
}
bind(E);
#ifdef _WIN64
// restore stack pointer
#endif
}
} else {
}
}
// Full implementation of Java ldiv and lrem; checks for special
// case as described in JVM spec., p.243 & p.271. The function
// returns the (pc) offset of the idivl instruction - may be needed
// for implicit exceptions.
//
// normal case special case
//
// input : rax: dividend min_long
//
// output: rax: quotient (= rax idiv reg) min_long
// rdx: remainder (= rax irem reg) 0
// check for special case
// remainder = 0)
// handle normal case
cdqq();
int idivq_offset = offset();
// normal and special case exit
return idivq_offset;
}
if (value == 0) { ; return; }
}
if (value == 0) { ; return; }
}
void MacroAssembler::fat_nop() {
// A 5 byte nop that is safe for patching (see patch_verified_entry)
// Recommened sequence from 'Software Optimization Guide for the AMD
// Hammer Processor'
emit_byte(0x66);
emit_byte(0x66);
emit_byte(0x90);
emit_byte(0x66);
emit_byte(0x90);
}
if (value == 0) { ; return; }
}
if (value == 0) { ; return; }
}
// 32bit can do a case table jump in one instruction but we no longer allow the base
// to be installed in the Address class
}
ShouldNotReachHere(); // 64bit doesn't use two regs
}
}
}
void MacroAssembler::leave() {
// %%% is this really better? Why not on 32bit too?
}
ShouldNotReachHere(); // 64bit doesn't use two regs
}
}
}
} else {
} else {
}
}
}
}
}
// src should NEVER be a real pointer. Use AddressLiteral for true pointers
}
// These are mostly for initializing NULL
}
}
}
} else {
}
}
bool clear_pc) {
// we must set sp to zero to clear frame
// must clear fp, so that compiled frames are not confused; it is
// possible that we need it only for debugging
if (clear_fp) {
}
if (clear_pc) {
}
}
// determine last_java_sp register
if (!last_java_sp->is_valid()) {
last_java_sp = rsp;
}
// last_java_fp is optional
if (last_java_fp->is_valid()) {
}
// last_java_pc is optional
if (last_java_pc != NULL) {
}
}
}
}
}
}
}
}
}
}
pusha(); // get regs on stack
hlt();
}
push_CPU_state(); // keeps alignment at 16 bytes
}
#ifndef PRODUCT
#endif
// In order to get locks to work, we need to fake a in_VM state
if (ShowMessageBoxOnError ) {
#ifndef PRODUCT
BytecodeCounter::print();
}
#endif
// To see where a verify_oop failed, get $ebx+40/X for this frame.
// XXX correct this offset for amd64
// This is the value of eip which points to where verify_oop will return.
#ifndef PRODUCT
#endif
}
} else {
msg);
}
}
#endif // _LP64
// Now versions that are common to 32/64 bit
}
}
}
}
}
}
}
pushf();
lock();
popf();
}
// Writes to stack successive pages until offset reached to check for
// stack overflow + shadow pages. This clobbers tmp.
// Bang stack for total size given plus shadow page size.
// Bang one page at a time because large size can bang beyond yellow and
// red zones.
// Bang down shadow pages too.
// The -1 because we already subtracted 1 page.
for (int i = 0; i< StackShadowPages-1; i++) {
// this could be any sized move but this is can be a debugging crumb
// so the bigger the better.
}
}
// Check for biased locking unlock case, which is a no-op
// Note: we do not have to check the thread ID for two reasons.
// First, the interpreter checks for IllegalMonitorStateException at
// a higher level. Second, if the bias was revoked while we held the
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
}
// implements x == 0 ? 0 : 1
// note: must only look at least-significant byte of x
// since C-style booleans are stored in one byte
// only! (was bug)
andl(x, 0xFF);
}
// Wouldn't need if AddressLiteral version had new name
}
}
} else {
}
}
// Implementation of call_VM versions
bool check_exceptions) {
Label C, E;
jmp(E);
bind(C);
ret(0);
bind(E);
}
bool check_exceptions) {
Label C, E;
jmp(E);
bind(C);
ret(0);
bind(E);
}
bool check_exceptions) {
Label C, E;
jmp(E);
bind(C);
ret(0);
bind(E);
}
bool check_exceptions) {
Label C, E;
jmp(E);
bind(C);
ret(0);
bind(E);
}
int number_of_arguments,
bool check_exceptions) {
}
bool check_exceptions) {
}
bool check_exceptions) {
}
bool check_exceptions) {
}
int number_of_arguments,
bool check_exceptions) {
// determine java_thread register
if (!java_thread->is_valid()) {
#ifdef _LP64
#else
java_thread = rdi;
#endif // LP64
}
// determine last_java_sp register
if (!last_java_sp->is_valid()) {
last_java_sp = rsp;
}
// debugging support
// push java thread (becomes first argument of C function)
// set last Java frame before call
// Only interpreter should have to set fp
// do the call, remove parameters
// restore the thread (cannot use the pushed argument since arguments
// may be overwritten by C code generated by an optimizing compiler);
// however can use the register value directly if it is callee saved.
// rdi & rsi (also r15) are callee saved -> nothing to do
#ifdef ASSERT
{ Label L;
stop("MacroAssembler::call_VM_base: rdi not callee saved?");
bind(L);
}
#endif
} else {
}
// reset last Java frame
// Only interpreter should have to clear fp
reset_last_Java_frame(java_thread, true, false);
#ifndef CC_INTERP
// C++ interp handles this in the interpreter
#endif /* CC_INTERP */
if (check_exceptions) {
// check for pending exceptions (java_thread is set upon return)
#ifndef _LP64
#else
// This used to conditionally jump to forward_exception however it is
// possible if we relocate that the branch will not reach. So we must jump
// around so we can always reach
#endif // LP64
}
// get oop result if there is one and reset the value in the thread
if (oop_result->is_valid()) {
}
}
void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
// Calculate the value for last_Java_sp
// somewhat subtle. call_VM does an intermediate call
// which places a return address on the stack just under the
// stack pointer as the user finsihed with it. This allows
// use to retrieve last_Java_pc from last_Java_sp[-1].
// On 32bit we then have to push additional args on the stack to accomplish
// the actual requested call. On 64bit call_VM only can use register args
// so the only extra space is the return address that call_VM created.
// This hopefully explains the calculations here.
#ifdef _LP64
// We've pushed one address, correct last_Java_sp
#else
#endif // LP64
}
}
}
}
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
}
}
}
} else {
}
}
} else {
}
}
}
}
void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
Label L;
if (unordered_is_less) {
} else { // unordered is greater
}
bind(L);
}
void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
Label L;
if (unordered_is_less) {
} else { // unordered is greater
}
bind(L);
}
} else {
}
}
#ifdef _LP64
} else {
}
#else
} else {
}
#endif // _LP64
}
#ifdef _LP64
// moves src2's literal address
#else
#endif // _LP64
}
lock();
} else {
lock();
}
}
}
}
}
Label L;
jcc(negated_cond, L);
bind(L);
}
// Full implementation of Java idiv and irem; checks for
// special case as described in JVM spec., p.243 & p.271.
// The function returns the (pc) offset of the idivl
// instruction - may be needed for implicit exceptions.
//
// normal case special case
//
// input : rax,: dividend min_int
// reg: divisor (may not be rax,/rdx) -1
//
// output: rax,: quotient (= rax, idiv reg) min_int
// rdx: remainder (= rax, irem reg) 0
const int min_int = 0x80000000;
// check for special case
// handle normal case
cdql();
int idivl_offset = offset();
// normal and special case exit
return idivl_offset;
}
if (value == 0) { ; return; }
}
if (value == 0) { ; return; }
}
if (offset == 1) {
} else {
}
bind (_is_positive);
}
// !defined(COMPILER2) is because of stupid core builds
void MacroAssembler::empty_FPU_stack() {
if (VM_Version::supports_mmx()) {
emms();
} else {
for (int i = 8; i-- > 0; ) ffree(i);
}
}
#endif // !LP64 || C1 || !C2
// Defines obj, preserves var_size_in_bytes
int con_size_in_bytes,
} else {
if (var_size_in_bytes == noreg) {
} else {
}
// if end < obj then we wrapped around => object too long => slow case
// Compare obj with the top addr, and if still equal, store the new top addr in
// end at the address of the top addr pointer. Sets ZF if was equal, and clears
// it otherwise. Use lock prefix for atomicity on MPs.
}
}
void MacroAssembler::enter() {
}
}
if (VM_Version::supports_cmov()) {
if (pop_left) {
} else {
}
if (pop_right) {
fpop();
}
} else {
if (pop_left) {
if (pop_right) {
fcompp();
} else {
}
} else {
}
// convert FPU condition into eflags condition via rax,
sahf();
}
// condition codes set as follows:
//
// CF (corresponds to C0) if x < y
// PF (corresponds to C2) if unordered
// ZF (corresponds to C3) if x = y
}
}
void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
Label L;
if (unordered_is_less) {
} else { // unordered is greater
}
bind(L);
}
}
}
}
}
void MacroAssembler::fpop() {
ffree();
fincstp();
}
{ Label L;
bind(L);
fprem();
#ifdef _LP64
#else
sahf();
#endif // _LP64
}
// Result is in ST0.
// Note: fxch & fpop to get rid of ST1
// (otherwise FPU stack could overflow eventually)
fxch(1);
fpop();
}
} else {
}
}
}
if (value == 0) { ; return; }
}
if (value == 0) { ; return; }
}
} else {
}
}
InstructionMark im(this);
const int short_size = 2;
const int long_size = 6;
// 0111 tttn #8-bit disp
} else {
// 0000 1111 1000 tttn #32-bit disp
emit_byte(0x0F);
}
} else {
#ifdef ASSERT
warning("reversing conditional branch");
#endif /* ASSERT */
}
}
} else {
}
}
int off;
} else {
}
return off;
}
// Note: load_signed_short used to be called load_signed_word.
// Although the 'w' in x86 opcodes refers to the term "word" in the assembler
// manual, which means 16 bits, that usage is found nowhere in HotSpot code.
// The term "word" in HotSpot means a 32- or 64-bit machine word.
int off;
// This is dubious to me since it seems safe to do a signed 16 => 64 bit
// version but this is what 64bit has always done. This seems to imply
// that users are only using 32bits worth.
} else {
}
return off;
}
// According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
// and "3.9 Partial Register Penalties", p. 22).
int off;
} else {
}
return off;
}
// Note: load_unsigned_short used to be called load_unsigned_word.
// According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
// and "3.9 Partial Register Penalties", p. 22).
int off;
} else {
}
return off;
}
int size_in_bytes, bool is_signed) {
#ifndef _LP64
// For case 8, caller is responsible for manually loading
// the second word into another register.
case ~8: // fall through:
#else
case ~8: // fall through:
#endif
case ~4: // fall through:
default: ShouldNotReachHere();
}
}
} else {
}
}
} else {
}
}
// C++ bool manipulation
if(sizeof(bool) == 1)
else if(sizeof(bool) == 2)
else if(sizeof(bool) == 4)
else
// unsupported
}
if(sizeof(bool) == 1)
else if(sizeof(bool) == 2)
else if(sizeof(bool) == 4)
else
// unsupported
}
if(sizeof(bool) == 1)
else if(sizeof(bool) == 2)
else if(sizeof(bool) == 4)
else
// unsupported
}
}
if (UseXmmLoadAndClearUpper) {
} else {
}
} else {
if (UseXmmLoadAndClearUpper) {
} else {
}
}
}
} else {
}
}
}
}
// src should NEVER be a real pointer. Use AddressLiteral for true pointers
}
}
} else {
}
}
if (needs_explicit_null_check(offset)) {
// provoke OS NULL exception if reg = NULL by
// accessing M[reg] w/o changing any (non-CC) registers
// NOTE: cmpl is plenty here to provoke a segv
// Note: should probably use testl(rax, Address(reg, 0));
// may be shorter code (however, this version of
// testl needs to be implemented first)
} else {
// nothing to do, (later) access of M[reg + offset]
// will provoke OS NULL exception if reg = NULL
}
}
void MacroAssembler::os_breakpoint() {
// instead of directly emitting a breakpoint, call os:breakpoint for better debugability
// (e.g., MSVC can't call ps() otherwise)
}
void MacroAssembler::pop_CPU_state() {
pop_IU_state();
}
void MacroAssembler::pop_FPU_state() {
}
void MacroAssembler::pop_IU_state() {
popa();
popf();
}
// Save Integer and Float state
// Warning: Stack must be 16 byte aligned (64bit)
void MacroAssembler::push_CPU_state() {
}
void MacroAssembler::push_FPU_state() {
#ifndef _LP64
fwait();
#else
#endif // LP64
}
void MacroAssembler::push_IU_state() {
// Push flags first because pusha kills them
pushf();
// Make sure rsp stays 16-byte aligned
pusha();
}
// determine java_thread register
if (!java_thread->is_valid()) {
java_thread = rdi;
}
// we must set sp to zero to clear frame
if (clear_fp) {
}
if (clear_pc)
}
}
}
}
// Write serialization page so VM thread can do a pseudo remote membar.
// We use the current thread pointer to calculate a thread specific
// offset to write to within the page. This minimizes bus traffic
// due to cache line collision.
// Size of store must match masking code above
}
// Calls to C land
//
// When entering C land, the rbp, & rsp of the last Java frame have to be recorded
// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
// has to be reset to 0. This is required to allow proper stack traversal.
// determine java_thread register
if (!java_thread->is_valid()) {
java_thread = rdi;
}
// determine last_java_sp register
if (!last_java_sp->is_valid()) {
last_java_sp = rsp;
}
// last_java_fp is optional
if (last_java_fp->is_valid()) {
}
// last_java_pc is optional
if (last_java_pc != NULL) {
}
}
}
}
} else {
}
}
} else {
}
}
//////////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
#ifndef _LP64
#endif
bool tosca_live) {
PtrQueue::byte_offset_of_index()));
PtrQueue::byte_offset_of_buf()));
// if (!marking_in_progress) goto done;
cmpl(in_progress, 0);
} else {
cmpb(in_progress, 0);
}
// if (x.f == NULL) goto done;
// Can we store original value in the thread's buffer?
#ifdef _LP64
#else
#endif
#ifdef _LP64
#else
#endif
// save the live input values
#ifdef _LP64
#else
#endif
}
#ifndef _LP64
#endif
PtrQueue::byte_offset_of_index()));
PtrQueue::byte_offset_of_buf()));
// Does store cross heap regions?
// crosses regions, storing NULL?
// storing region crossing non-NULL, is card already dirty?
#ifdef _LP64
// get the address of the card
#else
#endif
// storing a region crossing, non-NULL oop, card is clean.
// dirty card and log.
cmpl(queue_index, 0);
#ifdef _LP64
#else
#endif
// save the live input values
#ifdef _LP64
#else
#endif
}
#endif // SERIALGC
//////////////////////////////////////////////////////////////////////////////////
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
}
}
// split the store check operation so that other instructions can be scheduled inbetween
}
// The calculation for byte_map_base is as follows:
// byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
// So this essentially converts an address to a displacement and
// it will never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement
} else {
// By doing it as an ExternalAddress disp could be converted to a rip-relative
// displacement and done in a single instruction given favorable mapping and
// a smarter version of as_Address. Worst case it is two instructions which
// is no worse off then loading disp into a register and doing as a simple
// Address() as above.
// We can't do as ExternalAddress as the only style since if disp == 0 we'll
// assert since NULL isn't acceptable in a reloci (see 6644928). In any case
// in some cases we'll get a single instruction version.
}
}
}
}
// src2 must be rval
} else {
}
}
// C++ bool manipulation
if(sizeof(bool) == 1)
else if(sizeof(bool) == 2) {
// testw implementation needed for two byte bools
} else if(sizeof(bool) == 4)
else
// unsupported
}
}
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
int con_size_in_bytes,
verify_tlab();
if (var_size_in_bytes == noreg) {
} else {
}
// update the tlab top pointer
// recover var_size_in_bytes if necessary
if (var_size_in_bytes == end) {
}
verify_tlab();
}
// Preserves rbx, and rdx.
// No allocation in the shared eden.
}
// calculate amount of free space
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
// Retain
// %%% yuck as movptr...
if (TLABStats) {
// increment number of slow_allocations
}
if (TLABStats) {
// increment number of refills
// accumulate wastage -- t1 is amount free in tlab
}
// if tlab is currently allocated (top or end != null) then
// fill [top, end + alignment_reserve) with array object
// set up the mark word
movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
// set the length to the remaining space
// set klass to intArrayKlass
// dubious reloc why not an oop reloc?
// store klass last. concurrent gcs assumes klass length is valid if
// klass field is not null.
// refill the tlab with an eden allocation
// add object_size ??
// Check that t1 was preserved in eden_allocate.
#ifdef ASSERT
if (UseTLAB) {
stop("assert(t1 != tlab size)");
}
#endif
verify_tlab();
}
static const double pi_4 = 0.7853981633974483;
// A hand-coded argument reduction for values in fabs(pi/4, pi/2)
// was attempted in this code; unfortunately it appears that the
// switch to 80-bit precision and back causes this to be
// unprofitable compared with simply performing a runtime call if
// the argument is out of the (-pi/4, pi/4) range.
if (!VM_Version::supports_cmov()) {
// fcmp needs a temporary so preserve rbx,
}
// x ?<= pi/4
fabs(); // Stack: |X| PI/4 X
// fastest case: -pi/4 <= x <= pi/4
switch(trig) {
case 's':
fsin();
break;
case 'c':
fcos();
break;
case 't':
ftan();
break;
default:
assert(false, "bad intrinsic");
break;
}
}
// slow case: runtime call
// Preserve registers across runtime call
pusha();
if (num_fpu_regs_in_use > 1) {
// Must preserve all other FPU regs (could alternatively convert
// SharedRuntime::dsin and dcos into assembly routines known not to trash
// FPU state, but can not trust C compiler)
// NOTE that in this case we also push the incoming argument to
// the stack and restore it later; we also use this stack slot to
// hold the return value from dsin or dcos.
for (int i = 0; i < num_fpu_regs_in_use; i++) {
}
}
#ifdef _LP64
#endif // _LP64
// NOTE: we must not use call_VM_leaf here because that requires a
// complete interpreter frame in debug mode -- same bug as 4387334
// MacroAssembler::call_VM_leaf_base is perfectly safe and will
// do proper 64bit abi
// Need to add stack banging before this runtime call if it needs to
// be taken; however, there is no generic stack banging routine at
// the MacroAssembler level
switch(trig) {
case 's':
{
}
break;
case 'c':
{
}
break;
case 't':
{
}
break;
default:
assert(false, "bad intrinsic");
break;
}
#ifdef _LP64
#endif // _LP64
if (num_fpu_regs_in_use > 1) {
// Must save return value to stack and then restore entire FPU stack
for (int i = 0; i < num_fpu_regs_in_use; i++) {
}
}
popa();
// Come here with result in F-TOS
}
}
// Look up the method for a megamorphic invokeinterface call.
// The target method is determined by <intf_klass, itable_index>.
// The receiver klass is in recv_klass.
// On success, the result will be in method_result, and execution falls through.
// On failure, execution transfers to the given label.
"caller must use same register for non-constant itable index as for method");
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
// %%% Could store the aligned, prescaled offset in the klassoop.
if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary
// see code for instanceKlass::start_of_itable!
}
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
// if (scan->interface() == intf) {
// result = (klass + scan->offset() + itable_index);
// }
// }
if (peel) {
} else {
// (invert the test to fall through to found_method...)
}
if (!peel) break;
// Check that the previous entry is non-null. A null entry means that
// the receiver class doesn't implement the interface, and wasn't the
// same as when the caller was compiled.
}
// Got a hit.
}
}
if (super_check_offset.is_register()) {
} else if (must_load_sco) {
}
int label_nulls = 0;
// Hacked jcc, which "knows" that L_fallthrough, at least, is in
// range of a jccb. If this routine grows larger, reconsider at
// least some of these.
// Hacked jmp, which may only be used just before L_fallthrough.
// If the pointers are equal, we are done (e.g., String[] elements).
// This self-check enables sharing of secondary supertype arrays among
// non-primary types such as array-of-interface. Otherwise, each such
// type would need its own customized SSA.
// We move this check to the front of the fast path because many
// type checks are in fact trivially successful in this manner,
// so we get a nicely predicted branch right at the start of the check.
// Check the supertype display:
if (must_load_sco) {
// Positive movl does right thing on LP64.
}
// This check has worked decisively for primary supers.
// Secondary supers are sought in the super_cache ('super_cache_addr').
// (Secondary supers are interfaces and very deeply nested subtypes.)
// This works in the same check above because of a tricky aliasing
// between the super_cache and the primary super display elements.
// (The 'super_check_addr' can address either, as the case requires.)
// Note that the cache is updated below if it does not help us find
// what we need immediately.
// So if it was a primary super, we can just fail immediately.
// Otherwise, it's the slow path for us (no success at this point).
if (super_check_offset.is_register()) {
if (L_failure == &L_fallthrough) {
} else {
}
// Need a slow path; fast failure is impossible.
if (L_slow_path == &L_fallthrough) {
} else {
}
} else {
// No slow path; it's a fast decision.
if (L_failure == &L_fallthrough) {
} else {
}
}
}
bool set_cond_codes) {
int label_nulls = 0;
// a couple of useful fields in sub_klass:
// Do a linear scan of the secondary super-klass chain.
// This code is rarely used, so simplicity is a virtue here.
// The repne_scan instruction uses fixed registers, which we must spill.
// Don't worry too much about pre-existing connections with the input regs.
// Get super_klass value into rax (even if it was in rdi or rcx).
}
#ifndef PRODUCT
#endif //PRODUCT
// We will consult the secondary-super array.
// Load the array length. (Positive movl does right thing on LP64.)
// Skip to start of data.
// Scan RCX words at [RDI] for an occurrence of RAX.
// Set NZ/Z based on last compare.
#ifdef _LP64
// This part is tricky, as values in supers array could be 32 or 64 bit wide
// and we store values in objArrays always encoded, thus we need to encode
// the value of rax before repne. Note that rax is dead after the repne.
if (UseCompressedOops) {
// The superclass is never null; it would be a basic system error if a null
// pointer were to sneak in here. Note that we have already loaded the
// Klass::super_check_offset from the super_klass in the fast path,
// so if there is a null in that register, we are already in the afterlife.
repne_scanl();
} else
#endif // _LP64
repne_scan();
// Unspill the temp. registers:
if (set_cond_codes) {
// Special hack for the AD files: rdi is guaranteed non-zero.
}
if (L_failure == &L_fallthrough)
// Success. Cache the super we found and proceed in triumph.
if (L_success != &L_fallthrough) {
}
}
}
}
} else {
}
}
} else {
}
}
if (!VerifyOops) return;
// Pass register number to verify_oop_subroutine
char* b = new char[strlen(s) + 50];
// avoid using pushptr, as it modifies scratch registers
// and our contract is not to modify anything
// call indirectly to solve generation ordering problem
}
int offset) {
if (value != 0)
// load indirectly to solve generation ordering problem
#ifdef ASSERT
Label L;
hlt();
bind(L);
#endif
if (offset != 0)
return RegisterOrConstant(tmp);
}
// registers on entry:
// - rax ('check' register): required MethodType
// - rcx: method handle
// - rdx, rsi, or ?: killable temp
// compare method type against that of the receiver
cmpptr(mtype_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
}
// A method handle has a "vmslots" field which gives the size of its
// argument list in JVM stack slots. This field is either located directly
// in every method handle, or else is indirectly accessed through the
// method handle's MethodType. This macro hides the distinction.
// load mh.type.form.vmslots
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
// hoist vmslots into every mh to avoid dependent load chain
movl(vmslots_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)));
} else {
movptr(temp2_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
movptr(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)));
movl(vmslots_reg, Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)));
}
}
// registers on entry:
// - rcx: method handle
// - rdx: killable temp (interpreted only)
// - rax: killable temp (compiled only)
// pick out the interpreted side of the handler
movptr(temp_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg)));
// off we go...
// for the various stubs which take control at this point,
// see MethodHandles::generate_method_handle_stub
}
int extra_slot_offset) {
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
#ifdef ASSERT
#endif
if (arg_slot.is_constant()) {
} else {
}
}
if (!VerifyOops) return;
// Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
// Pass register number to verify_oop_subroutine
char* b = new char[strlen(s) + 50];
sprintf(b, "verify_oop_addr: %s", s);
// addr may contain rsp so we will have to adjust it based on the push
// we just did
// NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
// stores rax into addr which is backwards of what was intended.
} else {
}
// pass msg argument
// avoid using pushptr, as it modifies scratch registers
// and our contract is not to modify anything
// call indirectly to solve generation ordering problem
// Caller pops the arguments and restores rax, from the stack
}
void MacroAssembler::verify_tlab() {
#ifdef ASSERT
if (UseTLAB && VerifyOops) {
stop("assert(top >= start)");
stop("assert(top <= end)");
}
#endif
}
class ControlWord {
public:
void print() const {
// rounding control
const char* rc;
switch (rounding_control()) {
case 0: rc = "round near"; break;
};
// precision control
const char* pc;
switch (precision_control()) {
case 0: pc = "24 bits "; break;
};
// flags
char f[9];
f[0] = ' ';
f[1] = ' ';
f[8] = '\x0';
// output
}
};
class StatusWord {
public:
void print() const {
// condition codes
char c[5];
c[4] = '\x0';
// flags
char f[9];
f[8] = '\x0';
// output
}
};
class TagWord {
public:
void print() const {
}
};
class FPU_Register {
public:
bool is_indefinite() const {
}
void print() const {
};
};
class FPU_State {
public:
enum {
register_size = 10,
number_of_registers = 8,
register_mask = 7
};
const char* tag_as_string(int tag) const {
switch (tag) {
case 0: return "valid";
case 1: return "zero";
case 2: return "special";
case 3: return "empty";
}
return NULL;
}
void print() const {
// print computation registers
{ int t = _status_word.top();
for (int i = 0; i < number_of_registers; i++) {
int j = (i - t) & register_mask;
}
}
printf("\n");
// print control registers
}
};
class Flag_Register {
public:
void print() const {
// flags
char f[8];
f[7] = '\x0';
// output
}
};
class IU_Register {
public:
void print() const {
}
};
class IU_State {
public:
void print() const {
// computation registers
printf("\n");
// control registers
}
};
class CPU_State {
public:
void print() const {
printf("--------------------------------------------------\n");
printf("\n");
_fpu_state.print();
printf("--------------------------------------------------\n");
}
};
};
void MacroAssembler::print_CPU_state() {
}
static int counter = 0;
counter++;
// For leaf calls, only verify that the top few elements remain empty.
// We only need 1 empty at the top for C2 code.
if( stack_depth < 0 ) {
printf("FPR7 not empty\n");
assert(false, "error");
return false;
}
return true; // All other stack states do not matter
}
"bad FPU control word");
// compute stack depth
int i = 0;
int d = i;
// verify findings
if (i != FPU_State::number_of_registers) {
// stack not contiguous
printf("%s: stack not contiguous at ST%d\n", s, i);
assert(false, "error");
return false;
}
// check if computed stack depth corresponds to expected stack depth
if (stack_depth < 0) {
// expected stack depth is -stack_depth or less
if (d > -stack_depth) {
// too many elements on the stack
assert(false, "error");
return false;
}
} else {
// expected stack depth is stack_depth
if (d != stack_depth) {
// wrong stack depth
assert(false, "error");
return false;
}
}
// everything is cool
return true;
}
if (!VerifyFPU) return;
// pass message string s
// check for error
{ Label L;
int3(); // break if error condition
bind(L);
}
}
#ifdef _LP64
if (UseCompressedOops) {
} else
#endif
}
#ifdef _LP64
if (UseCompressedOops) {
if (Universe::narrow_oop_shift() != 0) {
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
} else {
movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
}
} else
#endif
{
movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
}
}
#ifdef _LP64
if (UseCompressedOops) {
} else
#endif
}
#ifdef _LP64
if (UseCompressedOops) {
// Store to klass gap in destination
}
}
if (UseCompressedOops) {
} else {
}
}
if (UseCompressedOops) {
} else {
}
}
// Algorithm must match oop.inline.hpp encode_heap_oop.
verify_oop(r, "broken oop in encode_heap_oop");
if (Universe::narrow_oop_shift() != 0) {
}
return;
}
#ifdef ASSERT
if (CheckCompressedOops) {
stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
}
#endif
verify_oop(r, "broken oop in encode_heap_oop");
testq(r, r);
subq(r, r12_heapbase);
}
#ifdef ASSERT
if (CheckCompressedOops) {
testq(r, r);
stop("null oop passed to encode_heap_oop_not_null");
}
#endif
verify_oop(r, "broken oop in encode_heap_oop_not_null");
subq(r, r12_heapbase);
}
if (Universe::narrow_oop_shift() != 0) {
}
}
#ifdef ASSERT
if (CheckCompressedOops) {
stop("null oop passed to encode_heap_oop_not_null2");
}
#endif
}
}
if (Universe::narrow_oop_shift() != 0) {
}
}
if (Universe::narrow_oop_shift() != 0) {
}
verify_oop(r, "broken oop in decode_heap_oop");
return;
}
#ifdef ASSERT
if (CheckCompressedOops) {
stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
}
#endif
addq(r, r12_heapbase);
#if 0
// alternate decoding probably a wash.
testq(r, r);
#endif
verify_oop(r, "broken oop in decode_heap_oop");
}
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_shift() != 0) {
}
} else {
}
}
// Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (Universe::narrow_oop_shift() != 0) {
}
}
}
}
}
}
void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops) {
}
}
#endif // _LP64
switch (cond) {
// Note some conditions are synonyms for others
}
}
}
SkipIfEqual::~SkipIfEqual() {
}