/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
#define CPU_X86_VM_ASSEMBLER_X86_HPP
class BiasedLockingCounters;
// Contains all the definitions needed for x86 assembly code generation.
// Calling convention
public:
enum {
#ifdef _LP64
#ifdef _WIN64
#else
#endif // _WIN64
#else
n_register_parameters = 0 // 0 registers used to pass arguments
#endif // _LP64
};
};
#ifdef _LP64
// Symbolically name the register arguments used by the c calling convention.
#ifdef _WIN64
#else
#endif // _WIN64
// Symbolically name the register arguments used by the Java calling convention.
// We have control over the convention for java so we can do what we please.
// What pleases us is to offset the java calling convention so that when
// we call a suitable jni method the arguments are lined up and we don't
// have to do little shuffling. A suitable jni method is non-static and a
// small number of arguments (two fewer args on windows)
//
// |-------------------------------------------------------|
// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 |
// |-------------------------------------------------------|
// | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg)
// |-------------------------------------------------------|
// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 |
// |-------------------------------------------------------|
// Windows runs out of register args here
#ifdef _WIN64
#else
#endif /* _WIN64 */
#else
// rscratch1 will apear in 32bit code that is dead but of course must compile
// Using noreg ensures if the dead code is incorrectly live and executed it
// will cause an assertion failure
#endif // _LP64
// JSR 292 fixed register usages:
// Address is an abstraction used to represent a memory location
// using any of the amd64 addressing modes with one object.
//
// Note: A register location is represented via a Register, not
// via an address for efficiency & simplicity reasons.
class ArrayAddress;
public:
enum ScaleFactor {
times_1 = 0,
};
return times_1;
}
return (1 << (int)scale);
}
private:
int _disp;
// Easily misused constructors make them private
// %%% can we make these go away?
public:
// creation
Address()
_disp(0) {
}
// No default displacement otherwise Register can be implicitly
// converted to 0(Register) which is quite a different animal.
}
"inconsistent address");
}
"inconsistent address");
}
Address a = (*this);
return a;
}
Address a = (*this);
if (disp.is_register()) {
}
return a;
}
// disregard _rspec
}
// The following two overloads are used in connection with the
// ByteSize type (see sizes.hpp). They simplify the use of
// ByteSize'd arguments in assembly code. Note that their equivalent
// for the optimized build are the member functions with int disp
// argument since ByteSize is mapped to an int type in that case.
//
// Note: DO NOT introduce similar overloaded functions for WordSize
// arguments as in the optimized mode, both ByteSize and WordSize
// are mapped to the same type and thus the compiler cannot make a
// distinction anymore (=> compiler errors).
#ifdef ASSERT
}
"inconsistent address");
}
"inconsistent address");
}
#endif // ASSERT
// accessors
// Convert the raw encoding form into the form expected by the constructor for
// Address. An index of 4 (rsp) corresponds to having no index, so convert
// that to noreg for the Address constructor.
private:
bool base_needs_rex() const {
}
bool index_needs_rex() const {
}
friend class Assembler;
friend class MacroAssembler;
friend class LIR_Assembler; // base/index/scale/disp
};
//
// AddressLiteral has been split out from Address because operands of this type
// need to be treated specially on 32bit vs. 64bit platforms. By splitting it out
// the few instructions that need to deal with address literals are unique and the
// MacroAssembler does not have to implement every instruction in the Assembler
// in order to search for address literals that may need special handling depending
// directories.
//
friend class ArrayAddress;
// Typically we use AddressLiterals we want to use their rval
// However in some situations we want the lval (effect address) of the item.
// We provide a special factory for making those lvals.
bool _is_lval;
// If the target is far we'll need to load the ea of this to
// a register to reach it. Otherwise if near we can do rip
// relative addressing.
protected:
// creation
: _is_lval(false),
{}
public:
_is_lval(false),
{}
return ret;
}
private:
friend class Assembler;
friend class MacroAssembler;
friend class Address;
friend class LIR_Assembler;
};
// Convience classes
public:
};
public:
};
private:
// Sometimes ExternalAddress is used for values which aren't
// exactly addresses, like the card table base.
// external_word_type can't be used for values in the first page
// so just skip the reloc in that case.
return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
}
public:
};
public:
};
// x86 can do array addressing as a single operation since disp can be an absolute
// address amd64 can't. We create a class that expresses the concept but does extra
// magic on amd64 to get the final result
private:
public:
ArrayAddress() {};
};
// level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write
// is what you get. The Assembler is generating code into a CodeBuffer.
friend class AbstractAssembler; // for the non-virtual hack
friend class LIR_Assembler; // as_Address()
friend class StubGenerator;
public:
};
enum Prefix {
// segment overrides
};
enum VexPrefix {
};
enum VexSimdPrefix {
};
enum VexOpcode {
};
enum WhichOperand {
// input to locate_operand, and format code for relocations
#ifndef _LP64
#else
#endif
};
// NOTE: The general philopsophy of the declarations here is that 64bit versions
// of instructions are freely declared without the need for wrapping them an ifdef.
// (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
// In the .cpp file the implementations are wrapped so that they are dropped out
// of the resulting jvm. This is done mostly to keep the footprint of minimal
// to the size it was prior to merging up the 32bit and 64bit assemblers.
//
// in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
private:
// 64bit prefixes
int prefixq_and_encode(int reg_enc);
bool vector256);
}
}
}
}
VexSimdPrefix pre) {
bool rex_w = true;
}
VexSimdPrefix pre) {
// It is OK to cast from Register to XMMRegister to pass argument here
// since only encoding is used in simd_prefix_and_encode() and number of
// Gen and Xmm registers are the same.
}
}
}
VexSimdPrefix pre) {
bool rex_w = true;
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w);
}
}
bool rex_w = true;
}
// Helper functions for groups of instructions
// Force generation of a 4 byte immediate value even if it fits into 8bit
// only 32bit??
int disp,
RelocationHolder const& rspec,
int rip_relative_correction = 0);
// operands that only take the original 32bit registers
int disp,
RelocationHolder const& rspec);
// workaround gcc (3.2.1-7) bug
// Immediate-to-memory forms
protected:
#ifdef ASSERT
#endif
inline void emit_long64(jlong x);
// These are all easily abused and hence protected
// 32BIT ONLY SECTION
#ifndef _LP64
// Make these disappear in 64bit mode since they would never be correct
#else
// 64BIT ONLY SECTION
#endif // _LP64
// These are unique in that we are ensured by the caller that the 32bit
// relative in these instructions will always be able to reach the potentially
// 64bit address described by entry. Since they can take a 64bit address they
// don't have the 32 suffix like the other instructions in this class.
// Avoid using directly section
// Instructions in this section are actually usable by anyone without danger
// of failure but have performance issues that are addressed my enhanced
// instructions which will do the proper thing base on the particular cpu.
// We protect them because we don't trust you...
// Don't use next inc() and dec() methods directly. INC & DEC instructions
// could cause a partial flag stall since they don't set CF flag.
// Use MacroAssembler::decrement() & MacroAssembler::increment() methods
// which call inc() & dec() or add() & sub() in accordance with
// the product flag UseIncDec value.
// New cpus require use of movsd and movss to avoid partial register stall
// when loading from memory. But for old Opteron use movlpd instead of movsd.
// The selection is done in MacroAssembler::movdbl() and movflt().
// Move Scalar Single-Precision Floating-Point Values
// Move Scalar Double-Precision Floating-Point Values
// New cpus require use of movaps and movapd to avoid partial register stall
// when moving between registers.
// End avoid using directly
// Instruction prefixes
public:
// Creation
// Decoding
// Utilities
static bool is_polling_page_far() NOT_LP64({ return false;});
// Generic instructions
// Does 32bit or 64bit as needed for the platform. In some sense these
// belong in macro assembler but there is no need for both varieties to exist
void pusha();
void popa();
void pushf();
void popf();
// These are dummies to prevent surprise implicit conversions to Register
void push(void* v);
void pop(void* v);
void rep_mov();
void rep_stos();
void rep_stosb();
void repne_scan();
#ifdef _LP64
void repne_scanl();
#endif
// Vanilla instructions in lexical order
void addr_nop_4();
void addr_nop_5();
void addr_nop_7();
void addr_nop_8();
// Add Scalar Double-Precision Floating-Point Values
// Add Scalar Single-Precision Floating-Point Values
// AES instructions
#ifdef _LP64
#endif
void cdql();
void cdqq();
// these are dummies used to catch attempting to convert NULL to Register
// Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
// Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
// Identify processor type and features
void cpuid() {
emit_byte(0x0F);
emit_byte(0xA2);
}
// Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
// Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
// Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
// Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
// Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
// Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
// Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
// Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
// Divide Scalar Double-Precision Floating-Point Values
// Divide Scalar Single-Precision Floating-Point Values
void emms();
void fabs();
void fadd(int i);
// "Alternate" versions of x87 instructions place result down in FPU
// stack instead of on TOS
void fadda(int i); // "alternate" fadd
void faddp(int i = 1);
void fchs();
void fcom(int i);
void fcomp(int i = 1);
void fcompp();
void fcos();
void fdecstp();
void fdiv(int i);
void fdiva(int i); // "alternate" fdiv
void fdivp(int i = 1);
void fdivr(int i);
void fdivra(int i); // "alternate" reversed fdiv
void fdivrp(int i = 1);
void ffree(int i = 0);
void fincstp();
void finit();
void fld1();
void fldlg2();
void fldln2();
void fldz();
void flog();
void flog10();
void fmul(int i);
void fmula(int i); // "alternate" fmul
void fmulp(int i = 1);
void fnstsw_ax();
void fprem();
void fprem1();
void fsin();
void fsqrt();
void fsub(int i);
void fsuba(int i); // "alternate" fsub
void fsubp(int i = 1);
void fsubr(int i);
void fsubra(int i); // "alternate" reversed fsub
void fsubrp(int i = 1);
void ftan();
void ftst();
void fucomi(int i = 1);
void fucomip(int i = 1);
void fwait();
void fxch(int i = 1);
void fyl2x();
void frndint();
void f2xm1();
void fldl2e();
void hlt();
// jcc is the generic conditional branch generator to run-
// time routines, jcc is used for branches to labels. jcc
// takes a branch opcode (cc) and a label (L) and generates
// either a backward branch or a forward branch and links it
// to the label fixup chain. Usage:
//
// Label L; // unbound label
// jcc(cc, L); // forward branch to unbound label
// bind(L); // bind label to the current pc
// jcc(cc, L); // backward branch to bound label
// bind(L); // illegal: a label may be bound only once
//
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
// Conditional jump to a 8-bit offset to L.
// WARNING: be very careful using this for forward jumps. If the label is
// not bound within an 8-bit offset of this instruction, a run-time error
// will occur.
// Label operations & relative jumps (PPUM Appendix D)
// Unconditional 8-bit offset jump to L.
// WARNING: be very careful using this for forward jumps. If the label is
// not bound within an 8-bit offset of this instruction, a run-time error
// will occur.
void lfence() {
emit_byte(0x0F);
emit_byte(0xAE);
emit_byte(0xE8);
}
void lock();
#ifdef _LP64
#endif
enum Membar_mask_bits {
};
// Serializes memory and blows flags
// We only have to handle StoreLoad
if (order_constraint & StoreLoad) {
// All usable chips support "locked" instructions which suffice
// as barriers, and are much faster than the alternative of
// using cpuid instruction. We use here a locked add [esp],0.
// This is conveniently otherwise a no-op except for blowing
// flags.
// Any change to this code may need to revisit other places in
// the code where this idiom is used, in particular the
// orderAccess code.
lock();
}
}
}
void mfence();
// Moves
// Move Double Quadword
// Move Aligned Double Quadword
// Move Unaligned Double Quadword
// Move Unaligned 256bit Vector
// Move lower 64bit to high 64bit in 128bit register
// These dummies prevent using movl from converting a zero (like NULL) into Register
// by giving the compiler two choices it can't resolve
#ifdef _LP64
#endif
#ifdef _LP64
// These dummies prevent using movq from converting a zero (like NULL) into Register
// by giving the compiler two choices it can't resolve
#endif
// Move Quadword
#ifdef _LP64
// Move signed 32bit immediate to 64bit extending sign
#endif
#ifdef _LP64
#endif
#ifdef _LP64
#endif
#ifdef _LP64
#endif
// Multiply Scalar Double-Precision Floating-Point Values
// Multiply Scalar Single-Precision Floating-Point Values
#ifdef _LP64
#endif
void nop(int i = 1);
#ifdef _LP64
#endif
// Pack with unsigned saturation
// SSE4.2 string instructions
// SSE4.1 packed move
#endif
#ifdef _LP64
#endif
#ifdef _LP64
#endif
// Prefetches (SSE, SSE2, 3DNOW only)
// Shuffle Bytes
// Shuffle Packed Doublewords
// Shuffle Packed Low Words
// Shift Right by bytes Logical DoubleQuadword Immediate
// Logical Compare 128bit
// Logical Compare 256bit
// Interleave Low Bytes
// Interleave Low Doublewords
// Interleave Low Quadwords
#endif
void sahf();
void smovl(); // QQQ generic?
// Compute Square Root of Scalar Double-Precision Floating-Point Value
// Compute Square Root of Scalar Single-Precision Floating-Point Value
// Force generation of a 4 byte immediate value even if it fits into 8bit
// Subtract Scalar Double-Precision Floating-Point Values
// Subtract Scalar Single-Precision Floating-Point Values
// Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
// Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
// Get Value of Extended Control Register
void xgetbv() {
emit_byte(0x0F);
emit_byte(0x01);
emit_byte(0xD0);
}
// AVX 3-operands scalar instructions (encoded with VEX prefix)
//====================VECTOR ARITHMETIC=====================================
// Add Packed Floating-Point Values
// Subtract Packed Floating-Point Values
// Multiply Packed Floating-Point Values
// Divide Packed Floating-Point Values
// Bitwise Logical AND of Packed Floating-Point Values
// Bitwise Logical XOR of Packed Floating-Point Values
// Add packed integers
// Sub packed integers
// Multiply packed integers (only shorts and ints)
// Shift left packed integers
// Logical shift right packed integers
// Arithmetic shift right packed integers (only shorts and ints, no instructions for longs)
// And packed integers
// Or packed integers
// Xor packed integers
// Copy low 128bit into high 128bit of YMM registers.
// duplicate 4-bytes integer data from src into 8 locations in dest
// AVX instruction which is used to clear upper 128 bits of YMM registers and
// to avoid transaction penalty between AVX and SSE states. There is no
// penalty if legacy SSE instructions are encoded using VEX prefix because
// they always clear upper 128 bits. It should be used before calling
// runtime code and native libraries.
void vzeroupper();
protected:
// Next instructions require address alignment 16 bytes SSE mode.
// They should be called only from corresponding MacroAssembler instructions.
};
// MacroAssembler extends Assembler by frequently used macros.
//
// Instructions for which a 'better' code sequence exists depending
// on arguments should also go in here.
friend class LIR_Assembler;
friend class Runtime1; // as_Address()
protected:
// Support for VM calls
//
// This is the base routine called by the different versions of call_VM_leaf. The interpreter
// additional registers when doing a VM call).
#ifdef CC_INTERP
// c++ interpreter never wants to use interp_masm version of call_VM
#define VIRTUAL
#else
#define VIRTUAL virtual
#endif
VIRTUAL void call_VM_leaf_base(
int number_of_arguments // the number of arguments to pop after the call
);
// This is the base routine called by the different versions of call_VM. The interpreter
// additional registers when doing a VM call).
//
// If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
// returns the register which contains the thread upon return. If a thread register has been
// specified, the return value will correspond to that register. If no last_java_sp is specified
// (noreg) than rsp will be used instead.
int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
bool check_exceptions // whether to check for pending exceptions after return
);
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
// helpers for FPU flag access
// tmp is a temporary register, if none is available use noreg
public:
// Support for NULL-checks
//
// Generates code that causes a NULL OS exception if the content of reg is NULL.
// If the accessed location is M[reg + offset] and the offset is known, provide the
// offset. No explicit code generation is needed if the offset is within a certain
// range (0 <= offset <= page_size).
// Required platform-specific helpers for Label::patch_instructions.
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
#ifndef PRODUCT
#endif
// The following 4 methods return the offset of the appropriate move instruction
// Support for sign-extension (hi:lo = extend_sign(lo))
// Load and store values by size and signed-ness
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
// Support optimal SSE move instructions.
}
}
}
// Alignment
// A 5 byte nop that is safe for patching (see patch_verified_entry)
void fat_nop();
void enter();
void leave();
// Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
// The pointer will be loaded into the thread register.
// Support for VM calls
//
// It is imperative that all calls into the VM are handled via the call_VM macros.
// They make sure that the stack linkage is setup correctly. call_VM's correspond
bool check_exceptions = true);
bool check_exceptions = true);
bool check_exceptions = true);
bool check_exceptions = true);
// Overloadings with last_Java_sp
int number_of_arguments = 0,
bool check_exceptions = true);
check_exceptions = true);
bool check_exceptions = true);
bool check_exceptions = true);
// These always tightly bind to MacroAssembler::call_VM_base
// bypassing the virtual implementation
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
int number_of_arguments = 0);
// These always tightly bind to MacroAssembler::call_VM_leaf_base
// bypassing the virtual implementation
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
// last Java Frame (fills frame anchor)
// thread in the default location (r15_thread on 64bit)
// thread in the default location (r15_thread on 64bit)
// Stores
void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
#ifndef SERIALGC
bool tosca_live,
bool expand_call);
#endif // SERIALGC
// split store_check(Register obj) to enhance instruction interleaving
// C 'boolean' to Java boolean: x == 0 ? 0 : 1
// C++ bool manipulation
// oop manipulations
// Used for storing NULL. All other oop constants should be
// stored using routines that take a jobject.
#ifdef _LP64
// This dummy is to prevent a call to store_heap_oop from
// converting a zero (like NULL) into a Register by giving
// the compiler two choices it can't resolve
void encode_heap_oop(Register r);
void decode_heap_oop(Register r);
void encode_heap_oop_not_null(Register r);
void decode_heap_oop_not_null(Register r);
// if heap base register is used - reinit it with the correct value
void reinit_heapbase();
#endif // _LP64
// (as idivl, but checks for special case as described in JVM spec.)
// returns idivl instruction offset for implicit exception handling
// (as idivq, but checks for special case as described in JVM spec.)
// returns idivq instruction offset for implicit exception handling
void int3();
// Long operation macros for a 32bit cpu
// Long negation for Java
// Long multiplication for Java
// (destroys contents of eax, ebx, ecx and edx)
// Long shifts for Java
// (semantics as described in JVM spec.)
// Long compare for Java
// (semantics as described in JVM spec.)
// misc
// Sign extension
// Division by power of 2, rounding towards 0
// Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
//
// CF (corresponds to C0) if x < y
// PF (corresponds to C2) if unordered
// ZF (corresponds to C3) if x = y
//
// The arguments are in reversed order on the stack (i.e., top of stack is first argument).
// tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
// Variant of the above which allows y to be further down the stack
// and which only pops x and y if specified. If pop_right is
// specified then pop_left must also be specified.
// Floating-point comparison for Java
// Compares the top-most stack entries on the FPU stack and stores the result in dst.
// The arguments are in reversed order on the stack (i.e., top of stack is first argument).
// (semantics as described in JVM spec.)
// Variant of the above which allows y to be further down the stack
// and which only pops x and y if specified. If pop_right is
// specified then pop_left must also be specified.
// Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
// tmp is a temporary register, if none is available use noreg
// same as fcmp2int, but using SSE2
// directly on Intel as it does not have high enough precision
// outside of the range [-pi/4, pi/4]. Extra argument indicate the
// number of FPU stack slots in use; all but the topmost will
// require saving if a slow case is necessary. Assumes argument is
// on FP TOS; result is on FP TOS. No cpu registers are changed by
// this code.
// tmp is a temporary register, if none is available use noreg
// Pop ST (ffree & fincstp combined)
void fpop();
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
void push_fTOS();
// pops double TOS element from CPU stack and pushes on FPU stack
void pop_fTOS();
void empty_FPU_stack();
void push_IU_state();
void pop_IU_state();
void push_FPU_state();
void pop_FPU_state();
void push_CPU_state();
void pop_CPU_state();
// Round up to a power of two
// Callee saved registers handling
void push_callee_saved_registers();
void pop_callee_saved_registers();
// allocation
void eden_allocate(
int con_size_in_bytes, // object size in bytes if known at compile time
);
void tlab_allocate(
int con_size_in_bytes, // object size in bytes if known at compile time
);
// interface method calling
// virtual method calling
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except temp_reg.
// The rest of the type check; must be wired to a corresponding fast path.
// It does not repeat the fast path logic, so don't use it standalone.
// The temp_reg and temp2_reg can be noreg, if no temps are available.
// Updates the sub's secondary super cache as necessary.
// If set_cond_codes, condition codes will be Z on success, NZ on failure.
bool set_cond_codes = false);
// Simplified, combined version, good for typical uses.
// Falls through on failure.
// method handles (JSR 292)
//----
// Debugging
// only if +VerifyOops
// only if +VerifyFPU
// Verify or restore cpu control state after JNI call
// prints msg, dumps registers and stops execution
// prints msg and continues
// dumps registers and other state
void print_state();
static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
void os_breakpoint();
void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
void print_CPU_state();
// Stack overflow checking
// stack grows down, caller passes positive offset
}
// Writes to stack successive pages until offset reached to check for
// stack overflow + shadow pages. Also, clobbers tmp
int offset);
// Support for serializing memory accesses between threads
void verify_tlab();
// Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values.
// swap_reg must be rax, and is killed.
// tmp_reg is optional. If it is supplied (i.e., != noreg) it will
// allocate a temporary (inefficient, avoid if possible).
// Optional slow case is for implementations (interpreter and C1) which branch to
// slow case directly. Leaves condition codes set for C2's Fast_Lock node.
// Returns offset of first potentially-faulting instruction for null
// check info (currently consumed only by C1). If
// swap_reg_contains_mark is true then returns -1 as it is assumed
// the calling code has already passed any potential faults.
bool swap_reg_contains_mark,
// Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
// operands. In general the names are modified to avoid hiding the instruction in Assembler
// so that we don't need to implement all the varieties in the Assembler with trivial wrappers
// here in MacroAssembler. The major exception to this rule is call
// Arithmetics
}
void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
// compare reg - mem, or reg - &mem
#ifndef _LP64
#endif // _LP64
// NOTE src2 must be the lval. This is NOT an mem-mem compare
void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
// void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
// cmp64 to avoild hiding cmpq
// Force generation of a 4 byte immediate value even if it fits into 8bit
}
void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
// Helper functions for statistics gathering.
// Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
// Unconditional atomic increment.
// Import other testl() methods from the parent class or else
// they will be hidden by the following overriding declaration.
void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
// Calls
// NOTE: this call tranfers to the effective address of entry NOT
// the address contained by entry. This is because this is more natural
// Jumps
// NOTE: these jumps tranfer to the effective address of dst NOT
// the address contained by dst. This is because this is more natural
// 32bit can do a case table jump in one instruction but we no longer allow the base
// to be installed in the Address class. This jump will tranfers to the address
// contained in the location described by entry (not the address of entry)
// Floating
// compute pow(x,y) and exp(x) with x86 instructions. Don't cover
// all corner cases and may result in NaN and require fallback to a
// runtime call.
void fast_pow();
void fast_exp();
void increase_precision();
void restore_precision();
// computes exp(x). Fallback to runtime call included.
// computes pow(x,y). Fallback to runtime call included.
private:
// computes 2^(Ylog2X); Ylog2X in ST(0)
void pow_exp_core_encoding();
// computes pow(x,y) or exp(x). Fallback to runtime call included.
public:
// Move Unaligned Double Quadword
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
// Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
// Shuffle Bytes
// AVX 3-operands instructions
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
// AVX Vector instructions
void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
else
}
else
}
// Simple version for AVX2 256bit vectors
// Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
else
}
// Data
void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
// can this do an lea?
}
#ifdef _LP64
// Generally the next two are only used for moving NULL
// Although there are situations in initializing the mark word where
// they could be used. They are dangerous.
// They only exist on LP64 so that int32_t and intptr_t are not the same
// and we have ambiguous declarations.
#endif // _LP64
// to avoid hiding movl
// to avoid hiding movb
// Import other mov() methods from the parent class or else
// they will be hidden by the following overriding declaration.
// Can push value or effective address
// sign extend as need a l to ptr sized element
void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
// C2 compiled method's prolog code.
// clear memory of size 'cnt' qwords, starting at 'base'.
// IndexOf strings.
// Small strings are loaded through stack if they cross page boundary.
// IndexOf for constant substrings with size >= 8 elements
// which don't need to be loaded through stack.
// Smallest code: we don't need to load through stack,
// check string tail.
// Compare strings.
// Compare char[] arrays.
// Fill primitive arrays
};
/**
* class SkipIfEqual:
*
* Instantiating this class will result in assembly code being output that will
* jump around any code emitted between the creation of the instance and it's
* automatic destruction at the end of a scope block, depending on the value of
* the flag passed to the constructor, which will be checked at run-time.
*/
class SkipIfEqual {
private:
public:
~SkipIfEqual();
};
#ifdef ASSERT
#endif
#endif // CPU_X86_VM_ASSEMBLER_X86_HPP