/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_VM_NATIVEINST_X86_HPP
#define CPU_X86_VM_NATIVEINST_X86_HPP
#include "asm/assembler.hpp"
#include "memory/allocation.hpp"
#include "runtime/icache.hpp"
// We have interfaces for the following instructions:
// - NativeInstruction
// - - NativeCall
// - - NativeMovConstReg
// - - NativeMovConstRegPatching
// - - NativeMovRegMem
// - - NativeMovRegMemPatching
// - - NativeJump
// - - NativeIllegalOpCode
// - - NativeGeneralJump
// - - NativeReturn
// - - NativeReturnX (return with argument)
// - - NativePushConst
// - - NativeTstRegMem
// The base class for different kinds of native instruction abstractions.
// Provides the primitive operations to manipulate code relative to this.
friend class Relocation;
public:
enum Intel_specific_constants {
};
bool is_dtrace_trap();
inline bool is_call();
inline bool is_illegal();
inline bool is_return();
inline bool is_jump();
inline bool is_cond_jump();
inline bool is_safepoint_poll();
inline bool is_mov_literal64();
protected:
// This doesn't really do anything on Intel, but it is the place where
// cache invalidation belongs, generically:
public:
// unit test stuff
};
#ifdef ASSERT
//inst->verify();
#endif
return inst;
}
// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
// instructions (used to manipulate inline caches, primitive & dll calls, etc.).
public:
enum Intel_specific_constants {
instruction_offset = 0,
};
address destination() const;
#ifdef AMD64
0xFFFFFFFF00000000) == 0,
"must be 32bit offset");
#endif // AMD64
}
void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); }
void verify();
void print();
// Creation
}
}
}
// MT-safe patching of a call instruction.
};
#ifdef ASSERT
#endif
return call;
}
#ifdef ASSERT
#endif
return call;
}
// An interface for accessing/manipulating native mov reg, imm32 instructions.
// (used to manipulate inlined 32bit data dll calls, etc.)
#ifdef AMD64
static const bool has_rex = true;
#else
static const bool has_rex = false;
static const int rex_size = 0;
#endif // AMD64
public:
enum Intel_specific_constants {
instruction_offset = 0,
};
void verify();
void print();
// unit test stuff
static void test() {}
// Creation
};
#ifdef ASSERT
#endif
return test;
}
NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
#ifdef ASSERT
#endif
return test;
}
private:
#ifdef ASSERT
#endif
return test;
}
};
// An interface for accessing/manipulating native moves of the form:
// mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
// mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
// mov[s/z]x[w/b/q] [reg + offset], reg
// fld_s [reg+offset]
// fld_d [reg+offset]
// fstp_s [reg + offset]
// fstp_d [reg + offset]
// mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
//
// Warning: These routines must be able to handle any instruction sequences
// macros. For example: The load_unsigned_byte instruction generates
// an xor reg,reg inst prior to generating the movb instruction. This
// class must skip the xor instruction.
public:
enum Intel_specific_constants {
instruction_offset = 0,
};
// helper
int instruction_start() const;
address instruction_address() const;
address next_instruction_address() const;
int offset() const;
void set_offset(int x);
void verify();
void print ();
// unit test stuff
static void test() {}
private:
};
#ifdef ASSERT
#endif
return test;
}
private:
#ifdef ASSERT
#endif
return test;
}
};
// An interface for accessing/manipulating native leal instruction of form:
// leal reg, [reg + offset]
#ifdef AMD64
static const bool has_rex = true;
#else
static const bool has_rex = false;
static const int rex_size = 0;
#endif // AMD64
public:
enum Intel_specific_constants {
};
void verify();
void print ();
// unit test stuff
static void test() {}
private:
#ifdef ASSERT
#endif
return test;
}
};
// jump rel32off
public:
enum Intel_specific_constants {
instruction_offset = 0,
};
// 32bit used to encode unresolved jmp as jmp -1
// 64bit can't produce this so it used jump to self.
// Now 32bit and 64bit use jump to self as the unresolved address
// which the inline cache code (and relocs) know about
// return -1 if jump to self
return dest;
}
}
#ifdef AMD64
#endif // AMD64
}
// Creation
void verify();
// Unit testing stuff
static void test() {}
// Insertion of native jump instruction
// MT-safe insertion of native jump at verified method entry
};
#ifdef ASSERT
#endif
return jump;
}
// Handles all kinds of jump on Intel. Long/far, conditional/unconditional
public:
enum Intel_specific_constants {
// Constants does not apply, since the lengths and offsets depends on the actual jump
// used
// Instruction codes:
// Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off)
// Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off)
};
address jump_destination() const;
// Creation
// Insertion of native general jump instruction
void verify();
};
return jump;
}
public:
enum Intel_specific_constants {
instruction_offset = 0,
};
// Insert a pop instruction
};
public:
enum Intel_specific_constants {
instruction_offset = 0,
};
// Insert illegal opcode as specific address
};
// return instruction that does not pop values of the stack
public:
enum Intel_specific_constants {
instruction_offset = 0,
};
};
// return instruction that does pop values of the stack
public:
enum Intel_specific_constants {
instruction_offset = 0,
};
};
// Simple test vs memory
public:
enum Intel_specific_constants {
};
};
inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; }
inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code ||
inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
#ifdef AMD64
if (Assembler::is_polling_page_far()) {
// two cases, depending on the choice of the base register in the address.
if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
return true;
} else {
return false;
}
} else {
} else {
return false;
}
}
#else
#endif // AMD64
}
#ifdef AMD64
#else
return false;
#endif // AMD64
}
#endif // CPU_X86_VM_NATIVEINST_X86_HPP