0N/A/*
2273N/A * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#include "precompiled.hpp"
1879N/A#include "assembler_sparc.inline.hpp"
1879N/A#include "memory/resourceArea.hpp"
1879N/A#include "nativeInst_sparc.hpp"
1879N/A#include "oops/oop.inline.hpp"
1879N/A#include "runtime/handles.hpp"
1879N/A#include "runtime/sharedRuntime.hpp"
1879N/A#include "runtime/stubRoutines.hpp"
1879N/A#include "utilities/ostream.hpp"
1879N/A#ifdef COMPILER1
1879N/A#include "c1/c1_Runtime1.hpp"
1879N/A#endif
0N/A
0N/A
116N/Abool NativeInstruction::is_dtrace_trap() {
116N/A return !is_nop();
116N/A}
116N/A
0N/Avoid NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
0N/A ResourceMark rm;
0N/A CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
0N/A MacroAssembler* _masm = new MacroAssembler(&buf);
0N/A Register destreg;
0N/A
0N/A destreg = inv_rd(*(unsigned int *)instaddr);
0N/A // Generate a the new sequence
727N/A _masm->patchable_sethi(x, destreg);
0N/A ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
0N/A}
0N/A
2222N/Avoid NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
2222N/A ResourceMark rm;
2222N/A unsigned char buffer[10 * BytesPerInstWord];
2222N/A CodeBuffer buf(buffer, 10 * BytesPerInstWord);
2222N/A MacroAssembler masm(&buf);
2222N/A
2222N/A Register destreg = inv_rd(*(unsigned int *)instaddr);
2222N/A // Generate the proper sequence into a temporary buffer and compare
2222N/A // it with the original sequence.
2222N/A masm.patchable_sethi(x, destreg);
2222N/A int len = buffer - masm.pc();
2222N/A for (int i = 0; i < len; i++) {
2222N/A assert(instaddr[i] == buffer[i], "instructions must match");
2222N/A }
2222N/A}
2222N/A
0N/Avoid NativeInstruction::verify() {
0N/A // make sure code pattern is actually an instruction address
0N/A address addr = addr_at(0);
0N/A if (addr == 0 || ((intptr_t)addr & 3) != 0) {
0N/A fatal("not an instruction address");
0N/A }
0N/A}
0N/A
0N/Avoid NativeInstruction::print() {
0N/A tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
0N/A}
0N/A
0N/Avoid NativeInstruction::set_long_at(int offset, int i) {
0N/A address addr = addr_at(offset);
0N/A *(int*)addr = i;
0N/A ICache::invalidate_word(addr);
0N/A}
0N/A
0N/Avoid NativeInstruction::set_jlong_at(int offset, jlong i) {
0N/A address addr = addr_at(offset);
0N/A *(jlong*)addr = i;
0N/A // Don't need to invalidate 2 words here, because
0N/A // the flush instruction operates on doublewords.
0N/A ICache::invalidate_word(addr);
0N/A}
0N/A
0N/Avoid NativeInstruction::set_addr_at(int offset, address x) {
0N/A address addr = addr_at(offset);
0N/A assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
0N/A *(uintptr_t*)addr = (uintptr_t)x;
0N/A // Don't need to invalidate 2 words here in the 64-bit case,
0N/A // because the flush instruction operates on doublewords.
0N/A ICache::invalidate_word(addr);
0N/A // The Intel code has this assertion for NativeCall::set_destination,
0N/A // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
0N/A // NativeJump::set_jump_destination, and NativePushImm32::set_data
0N/A //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
0N/A}
0N/A
0N/Abool NativeInstruction::is_zero_test(Register &reg) {
0N/A int x = long_at(0);
0N/A Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
0N/A if (is_op3(x, temp, Assembler::arith_op) &&
0N/A inv_immed(x) && inv_rd(x) == G0) {
0N/A if (inv_rs1(x) == G0) {
0N/A reg = inv_rs2(x);
0N/A return true;
0N/A } else if (inv_rs2(x) == G0) {
0N/A reg = inv_rs1(x);
0N/A return true;
0N/A }
0N/A }
0N/A return false;
0N/A}
0N/A
0N/Abool NativeInstruction::is_load_store_with_small_offset(Register reg) {
0N/A int x = long_at(0);
0N/A if (is_op(x, Assembler::ldst_op) &&
0N/A inv_rs1(x) == reg && inv_immed(x)) {
0N/A return true;
0N/A }
0N/A return false;
0N/A}
0N/A
0N/Avoid NativeCall::verify() {
0N/A NativeInstruction::verify();
0N/A // make sure code pattern is actually a call instruction
0N/A if (!is_op(long_at(0), Assembler::call_op)) {
0N/A fatal("not a call");
0N/A }
0N/A}
0N/A
0N/Avoid NativeCall::print() {
0N/A tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
0N/A}
0N/A
0N/A
0N/A// MT-safe patching of a call instruction (and following word).
0N/A// First patches the second word, and then atomicly replaces
0N/A// the first word with the first new instruction word.
0N/A// Other processors might briefly see the old first word
0N/A// followed by the new second word. This is OK if the old
0N/A// second word is harmless, and the new second word may be
0N/A// harmlessly executed in the delay slot of the call.
0N/Avoid NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
0N/A assert(Patching_lock->is_locked() ||
0N/A SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
0N/A assert (instr_addr != NULL, "illegal address for code patching");
0N/A NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
0N/A assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
0N/A int i0 = ((int*)code_buffer)[0];
0N/A int i1 = ((int*)code_buffer)[1];
0N/A int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
0N/A assert(inv_op(*contention_addr) == Assembler::arith_op ||
0N/A *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
0N/A "must not interfere with original call");
0N/A // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
0N/A n_call->set_long_at(1*BytesPerInstWord, i1);
0N/A n_call->set_long_at(0*BytesPerInstWord, i0);
0N/A // NOTE: It is possible that another thread T will execute
0N/A // only the second patched word.
0N/A // In other words, since the original instruction is this
0N/A // call patching_stub; nop (NativeCall)
0N/A // and the new sequence from the buffer is this:
0N/A // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
0N/A // what T will execute is this:
0N/A // call patching_stub; add %r, %lo(K), %r
0N/A // thereby putting garbage into %r before calling the patching stub.
0N/A // This is OK, because the patching stub ignores the value of %r.
0N/A
0N/A // Make sure the first-patched instruction, which may co-exist
0N/A // briefly with the call, will do something harmless.
0N/A assert(inv_op(*contention_addr) == Assembler::arith_op ||
0N/A *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
0N/A "must not interfere with original call");
0N/A}
0N/A
0N/A// Similar to replace_mt_safe, but just changes the destination. The
0N/A// important thing is that free-running threads are able to execute this
0N/A// call instruction at all times. Thus, the displacement field must be
0N/A// instruction-word-aligned. This is always true on SPARC.
0N/A//
0N/A// Used in the runtime linkage of calls; see class CompiledIC.
0N/Avoid NativeCall::set_destination_mt_safe(address dest) {
0N/A assert(Patching_lock->is_locked() ||
0N/A SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
0N/A // set_destination uses set_long_at which does the ICache::invalidate
0N/A set_destination(dest);
0N/A}
0N/A
0N/A// Code for unit testing implementation of NativeCall class
0N/Avoid NativeCall::test() {
0N/A#ifdef ASSERT
0N/A ResourceMark rm;
0N/A CodeBuffer cb("test", 100, 100);
0N/A MacroAssembler* a = new MacroAssembler(&cb);
0N/A NativeCall *nc;
0N/A uint idx;
0N/A int offsets[] = {
0N/A 0x0,
0N/A 0xfffffff0,
0N/A 0x7ffffff0,
0N/A 0x80000000,
0N/A 0x20,
0N/A 0x4000,
0N/A };
0N/A
0N/A VM_Version::allow_all();
0N/A
0N/A a->call( a->pc(), relocInfo::none );
0N/A a->delayed()->nop();
1668N/A nc = nativeCall_at( cb.insts_begin() );
0N/A nc->print();
0N/A
0N/A nc = nativeCall_overwriting_at( nc->next_instruction_address() );
0N/A for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
1668N/A nc->set_destination( cb.insts_begin() + offsets[idx] );
1668N/A assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
0N/A nc->print();
0N/A }
0N/A
1668N/A nc = nativeCall_before( cb.insts_begin() + 8 );
0N/A nc->print();
0N/A
0N/A VM_Version::revert();
0N/A#endif
0N/A}
0N/A// End code for unit testing implementation of NativeCall class
0N/A
0N/A//-------------------------------------------------------------------
0N/A
0N/A#ifdef _LP64
0N/A
0N/Avoid NativeFarCall::set_destination(address dest) {
0N/A // Address materialized in the instruction stream, so nothing to do.
0N/A return;
0N/A#if 0 // What we'd do if we really did want to change the destination
0N/A if (destination() == dest) {
0N/A return;
0N/A }
0N/A ResourceMark rm;
0N/A CodeBuffer buf(addr_at(0), instruction_size + 1);
0N/A MacroAssembler* _masm = new MacroAssembler(&buf);
0N/A // Generate the new sequence
727N/A AddressLiteral(dest);
727N/A _masm->jumpl_to(dest, O7, O7);
0N/A ICache::invalidate_range(addr_at(0), instruction_size );
0N/A#endif
0N/A}
0N/A
0N/Avoid NativeFarCall::verify() {
0N/A // make sure code pattern is actually a jumpl_to instruction
0N/A assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
0N/A assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
0N/A nativeJump_at(addr_at(0))->verify();
0N/A}
0N/A
0N/Abool NativeFarCall::is_call_at(address instr) {
0N/A return nativeInstruction_at(instr)->is_sethi();
0N/A}
0N/A
0N/Avoid NativeFarCall::print() {
0N/A tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
0N/A}
0N/A
0N/Abool NativeFarCall::destination_is_compiled_verified_entry_point() {
0N/A nmethod* callee = CodeCache::find_nmethod(destination());
0N/A if (callee == NULL) {
0N/A return false;
0N/A } else {
0N/A return destination() == callee->verified_entry_point();
0N/A }
0N/A}
0N/A
0N/A// MT-safe patching of a far call.
0N/Avoid NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
0N/A Unimplemented();
0N/A}
0N/A
0N/A// Code for unit testing implementation of NativeFarCall class
0N/Avoid NativeFarCall::test() {
0N/A Unimplemented();
0N/A}
0N/A// End code for unit testing implementation of NativeFarCall class
0N/A
0N/A#endif // _LP64
0N/A
0N/A//-------------------------------------------------------------------
0N/A
0N/A
0N/Avoid NativeMovConstReg::verify() {
0N/A NativeInstruction::verify();
0N/A // make sure code pattern is actually a "set_oop" synthetic instruction
0N/A // see MacroAssembler::set_oop()
0N/A int i0 = long_at(sethi_offset);
0N/A int i1 = long_at(add_offset);
0N/A
0N/A // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
0N/A Register rd = inv_rd(i0);
0N/A#ifndef _LP64
0N/A if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
0N/A is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
0N/A inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
0N/A rd == inv_rs1(i1) && rd == inv_rd(i1))) {
0N/A fatal("not a set_oop");
0N/A }
0N/A#else
0N/A if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
0N/A fatal("not a set_oop");
0N/A }
0N/A#endif
0N/A}
0N/A
0N/A
0N/Avoid NativeMovConstReg::print() {
0N/A tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
0N/A}
0N/A
0N/A
0N/A#ifdef _LP64
0N/Aintptr_t NativeMovConstReg::data() const {
0N/A return data64(addr_at(sethi_offset), long_at(add_offset));
0N/A}
0N/A#else
0N/Aintptr_t NativeMovConstReg::data() const {
0N/A return data32(long_at(sethi_offset), long_at(add_offset));
0N/A}
0N/A#endif
0N/A
0N/A
0N/Avoid NativeMovConstReg::set_data(intptr_t x) {
0N/A#ifdef _LP64
0N/A set_data64_sethi(addr_at(sethi_offset), x);
0N/A#else
0N/A set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
0N/A#endif
0N/A set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
0N/A
0N/A // also store the value into an oop_Relocation cell, if any
1483N/A CodeBlob* cb = CodeCache::find_blob(instruction_address());
1483N/A nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
0N/A if (nm != NULL) {
0N/A RelocIterator iter(nm, instruction_address(), next_instruction_address());
0N/A oop* oop_addr = NULL;
0N/A while (iter.next()) {
0N/A if (iter.type() == relocInfo::oop_type) {
0N/A oop_Relocation *r = iter.oop_reloc();
0N/A if (oop_addr == NULL) {
0N/A oop_addr = r->oop_addr();
0N/A *oop_addr = (oop)x;
0N/A } else {
0N/A assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
0N/A }
0N/A }
0N/A }
0N/A }
0N/A}
0N/A
0N/A
0N/A// Code for unit testing implementation of NativeMovConstReg class
0N/Avoid NativeMovConstReg::test() {
0N/A#ifdef ASSERT
0N/A ResourceMark rm;
0N/A CodeBuffer cb("test", 100, 100);
0N/A MacroAssembler* a = new MacroAssembler(&cb);
0N/A NativeMovConstReg* nm;
0N/A uint idx;
0N/A int offsets[] = {
0N/A 0x0,
0N/A 0x7fffffff,
0N/A 0x80000000,
0N/A 0xffffffff,
0N/A 0x20,
0N/A 4096,
0N/A 4097,
0N/A };
0N/A
0N/A VM_Version::allow_all();
0N/A
727N/A AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
727N/A a->sethi(al1, I3);
727N/A a->add(I3, al1.low10(), I3);
727N/A AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
727N/A a->sethi(al2, O2);
727N/A a->add(O2, al2.low10(), O2);
0N/A
1668N/A nm = nativeMovConstReg_at( cb.insts_begin() );
0N/A nm->print();
0N/A
0N/A nm = nativeMovConstReg_at( nm->next_instruction_address() );
0N/A for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
0N/A nm->set_data( offsets[idx] );
0N/A assert(nm->data() == offsets[idx], "check unit test");
0N/A }
0N/A nm->print();
0N/A
0N/A VM_Version::revert();
0N/A#endif
0N/A}
0N/A// End code for unit testing implementation of NativeMovConstReg class
0N/A
0N/A//-------------------------------------------------------------------
0N/A
0N/Avoid NativeMovConstRegPatching::verify() {
0N/A NativeInstruction::verify();
0N/A // Make sure code pattern is sethi/nop/add.
0N/A int i0 = long_at(sethi_offset);
0N/A int i1 = long_at(nop_offset);
0N/A int i2 = long_at(add_offset);
0N/A assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
0N/A
0N/A // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
0N/A // The casual reader should note that on Sparc a nop is a special case if sethi
0N/A // in which the destination register is %g0.
0N/A Register rd0 = inv_rd(i0);
0N/A Register rd1 = inv_rd(i1);
0N/A if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
0N/A is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi
0N/A is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
0N/A inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
0N/A rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
0N/A fatal("not a set_oop");
0N/A }
0N/A}
0N/A
0N/A
0N/Avoid NativeMovConstRegPatching::print() {
0N/A tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
0N/A}
0N/A
0N/A
0N/Aint NativeMovConstRegPatching::data() const {
0N/A#ifdef _LP64
0N/A return data64(addr_at(sethi_offset), long_at(add_offset));
0N/A#else
0N/A return data32(long_at(sethi_offset), long_at(add_offset));
0N/A#endif
0N/A}
0N/A
0N/A
0N/Avoid NativeMovConstRegPatching::set_data(int x) {
0N/A#ifdef _LP64
0N/A set_data64_sethi(addr_at(sethi_offset), x);
0N/A#else
0N/A set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
0N/A#endif
0N/A set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
0N/A
0N/A // also store the value into an oop_Relocation cell, if any
1483N/A CodeBlob* cb = CodeCache::find_blob(instruction_address());
1483N/A nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL;
0N/A if (nm != NULL) {
0N/A RelocIterator iter(nm, instruction_address(), next_instruction_address());
0N/A oop* oop_addr = NULL;
0N/A while (iter.next()) {
0N/A if (iter.type() == relocInfo::oop_type) {
0N/A oop_Relocation *r = iter.oop_reloc();
0N/A if (oop_addr == NULL) {
0N/A oop_addr = r->oop_addr();
0N/A *oop_addr = (oop)x;
0N/A } else {
0N/A assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
0N/A }
0N/A }
0N/A }
0N/A }
0N/A}
0N/A
0N/A
0N/A// Code for unit testing implementation of NativeMovConstRegPatching class
0N/Avoid NativeMovConstRegPatching::test() {
0N/A#ifdef ASSERT
0N/A ResourceMark rm;
0N/A CodeBuffer cb("test", 100, 100);
0N/A MacroAssembler* a = new MacroAssembler(&cb);
0N/A NativeMovConstRegPatching* nm;
0N/A uint idx;
0N/A int offsets[] = {
0N/A 0x0,
0N/A 0x7fffffff,
0N/A 0x80000000,
0N/A 0xffffffff,
0N/A 0x20,
0N/A 4096,
0N/A 4097,
0N/A };
0N/A
0N/A VM_Version::allow_all();
0N/A
727N/A AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
727N/A a->sethi(al1, I3);
0N/A a->nop();
727N/A a->add(I3, al1.low10(), I3);
727N/A AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
727N/A a->sethi(al2, O2);
0N/A a->nop();
727N/A a->add(O2, al2.low10(), O2);
0N/A
1668N/A nm = nativeMovConstRegPatching_at( cb.insts_begin() );
0N/A nm->print();
0N/A
0N/A nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
0N/A for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
0N/A nm->set_data( offsets[idx] );
0N/A assert(nm->data() == offsets[idx], "check unit test");
0N/A }
0N/A nm->print();
0N/A
0N/A VM_Version::revert();
0N/A#endif // ASSERT
0N/A}
0N/A// End code for unit testing implementation of NativeMovConstRegPatching class
0N/A
0N/A
0N/A//-------------------------------------------------------------------
0N/A
0N/A
0N/Avoid NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
0N/A Untested("copy_instruction_to");
0N/A int instruction_size = next_instruction_address() - instruction_address();
0N/A for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
0N/A *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
0N/A }
0N/A}
0N/A
0N/A
0N/Avoid NativeMovRegMem::verify() {
0N/A NativeInstruction::verify();
0N/A // make sure code pattern is actually a "ld" or "st" of some sort.
0N/A int i0 = long_at(0);
0N/A int op3 = inv_op3(i0);
0N/A
0N/A assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
0N/A
0N/A if (!(is_op(i0, Assembler::ldst_op) &&
0N/A inv_immed(i0) &&
0N/A 0 != (op3 < op3_ldst_int_limit
0N/A ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
0N/A : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
0N/A {
0N/A int i1 = long_at(ldst_offset);
0N/A Register rd = inv_rd(i0);
0N/A
0N/A op3 = inv_op3(i1);
0N/A if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
0N/A 0 != (op3 < op3_ldst_int_limit
0N/A ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
0N/A : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
0N/A fatal("not a ld* or st* op");
0N/A }
0N/A }
0N/A}
0N/A
0N/A
0N/Avoid NativeMovRegMem::print() {
0N/A if (is_immediate()) {
0N/A tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
0N/A } else {
0N/A tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
0N/A }
0N/A}
0N/A
0N/A
0N/A// Code for unit testing implementation of NativeMovRegMem class
0N/Avoid NativeMovRegMem::test() {
0N/A#ifdef ASSERT
0N/A ResourceMark rm;
0N/A CodeBuffer cb("test", 1000, 1000);
0N/A MacroAssembler* a = new MacroAssembler(&cb);
0N/A NativeMovRegMem* nm;
0N/A uint idx = 0;
0N/A uint idx1;
0N/A int offsets[] = {
0N/A 0x0,
0N/A 0xffffffff,
0N/A 0x7fffffff,
0N/A 0x80000000,
0N/A 4096,
0N/A 4097,
0N/A 0x20,
0N/A 0x4000,
0N/A };
0N/A
0N/A VM_Version::allow_all();
0N/A
727N/A AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
727N/A AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
727N/A a->ldsw( G5, al1.low10(), G4 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->ldsw( G5, I3, G4 ); idx++;
727N/A a->ldsb( G5, al1.low10(), G4 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->ldsb( G5, I3, G4 ); idx++;
727N/A a->ldsh( G5, al1.low10(), G4 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->ldsh( G5, I3, G4 ); idx++;
727N/A a->lduw( G5, al1.low10(), G4 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->lduw( G5, I3, G4 ); idx++;
727N/A a->ldub( G5, al1.low10(), G4 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->ldub( G5, I3, G4 ); idx++;
727N/A a->lduh( G5, al1.low10(), G4 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->lduh( G5, I3, G4 ); idx++;
727N/A a->ldx( G5, al1.low10(), G4 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->ldx( G5, I3, G4 ); idx++;
727N/A a->ldd( G5, al1.low10(), G4 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->ldd( G5, I3, G4 ); idx++;
0N/A a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
0N/A
727N/A a->stw( G5, G4, al1.low10() ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->stw( G5, G4, I3 ); idx++;
727N/A a->stb( G5, G4, al1.low10() ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->stb( G5, G4, I3 ); idx++;
727N/A a->sth( G5, G4, al1.low10() ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->sth( G5, G4, I3 ); idx++;
727N/A a->stx( G5, G4, al1.low10() ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->stx( G5, G4, I3 ); idx++;
727N/A a->std( G5, G4, al1.low10() ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->std( G5, G4, I3 ); idx++;
0N/A a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
727N/A a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
0N/A a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
0N/A
1668N/A nm = nativeMovRegMem_at( cb.insts_begin() );
0N/A nm->print();
0N/A nm->set_offset( low10(0) );
0N/A nm->print();
0N/A nm->add_offset_in_bytes( low10(0xbb) * wordSize );
0N/A nm->print();
0N/A
0N/A while (--idx) {
0N/A nm = nativeMovRegMem_at( nm->next_instruction_address() );
0N/A nm->print();
0N/A for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
0N/A nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
0N/A assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
0N/A "check unit test");
0N/A nm->print();
0N/A }
0N/A nm->add_offset_in_bytes( low10(0xbb) * wordSize );
0N/A nm->print();
0N/A }
0N/A
0N/A VM_Version::revert();
0N/A#endif // ASSERT
0N/A}
0N/A
0N/A// End code for unit testing implementation of NativeMovRegMem class
0N/A
0N/A//--------------------------------------------------------------------------------
0N/A
0N/A
0N/Avoid NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
0N/A Untested("copy_instruction_to");
0N/A int instruction_size = next_instruction_address() - instruction_address();
0N/A for (int i = 0; i < instruction_size; i += wordSize) {
0N/A *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
0N/A }
0N/A}
0N/A
0N/A
0N/Avoid NativeMovRegMemPatching::verify() {
0N/A NativeInstruction::verify();
0N/A // make sure code pattern is actually a "ld" or "st" of some sort.
0N/A int i0 = long_at(0);
0N/A int op3 = inv_op3(i0);
0N/A
0N/A assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
0N/A
0N/A if (!(is_op(i0, Assembler::ldst_op) &&
0N/A inv_immed(i0) &&
0N/A 0 != (op3 < op3_ldst_int_limit
0N/A ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
0N/A : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
0N/A int i1 = long_at(ldst_offset);
0N/A Register rd = inv_rd(i0);
0N/A
0N/A op3 = inv_op3(i1);
0N/A if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
0N/A 0 != (op3 < op3_ldst_int_limit
0N/A ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
0N/A : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
0N/A fatal("not a ld* or st* op");
0N/A }
0N/A }
0N/A}
0N/A
0N/A
0N/Avoid NativeMovRegMemPatching::print() {
0N/A if (is_immediate()) {
0N/A tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
0N/A } else {
0N/A tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
0N/A }
0N/A}
0N/A
0N/A
0N/A// Code for unit testing implementation of NativeMovRegMemPatching class
0N/Avoid NativeMovRegMemPatching::test() {
0N/A#ifdef ASSERT
0N/A ResourceMark rm;
0N/A CodeBuffer cb("test", 1000, 1000);
0N/A MacroAssembler* a = new MacroAssembler(&cb);
0N/A NativeMovRegMemPatching* nm;
0N/A uint idx = 0;
0N/A uint idx1;
0N/A int offsets[] = {
0N/A 0x0,
0N/A 0xffffffff,
0N/A 0x7fffffff,
0N/A 0x80000000,
0N/A 4096,
0N/A 4097,
0N/A 0x20,
0N/A 0x4000,
0N/A };
0N/A
0N/A VM_Version::allow_all();
0N/A
727N/A AddressLiteral al(0xffffffff, relocInfo::external_word_type);
727N/A a->ldsw( G5, al.low10(), G4); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->ldsw( G5, I3, G4 ); idx++;
727N/A a->ldsb( G5, al.low10(), G4); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->ldsb( G5, I3, G4 ); idx++;
727N/A a->ldsh( G5, al.low10(), G4); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->ldsh( G5, I3, G4 ); idx++;
727N/A a->lduw( G5, al.low10(), G4); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->lduw( G5, I3, G4 ); idx++;
727N/A a->ldub( G5, al.low10(), G4); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->ldub( G5, I3, G4 ); idx++;
727N/A a->lduh( G5, al.low10(), G4); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->lduh( G5, I3, G4 ); idx++;
727N/A a->ldx( G5, al.low10(), G4); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
727N/A a->ldx( G5, I3, G4 ); idx++;
727N/A a->ldd( G5, al.low10(), G4); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
727N/A a->ldd( G5, I3, G4 ); idx++;
727N/A a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
727N/A a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
0N/A
727N/A a->stw( G5, G4, al.low10()); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->stw( G5, G4, I3 ); idx++;
727N/A a->stb( G5, G4, al.low10()); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->stb( G5, G4, I3 ); idx++;
727N/A a->sth( G5, G4, al.low10()); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->sth( G5, G4, I3 ); idx++;
727N/A a->stx( G5, G4, al.low10()); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->stx( G5, G4, I3 ); idx++;
727N/A a->std( G5, G4, al.low10()); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->std( G5, G4, I3 ); idx++;
0N/A a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
727N/A a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
0N/A a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
0N/A
1668N/A nm = nativeMovRegMemPatching_at( cb.insts_begin() );
0N/A nm->print();
0N/A nm->set_offset( low10(0) );
0N/A nm->print();
0N/A nm->add_offset_in_bytes( low10(0xbb) * wordSize );
0N/A nm->print();
0N/A
0N/A while (--idx) {
0N/A nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
0N/A nm->print();
0N/A for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
0N/A nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
0N/A assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
0N/A "check unit test");
0N/A nm->print();
0N/A }
0N/A nm->add_offset_in_bytes( low10(0xbb) * wordSize );
0N/A nm->print();
0N/A }
0N/A
0N/A VM_Version::revert();
0N/A#endif // ASSERT
0N/A}
0N/A// End code for unit testing implementation of NativeMovRegMemPatching class
0N/A
0N/A
0N/A//--------------------------------------------------------------------------------
0N/A
0N/A
0N/Avoid NativeJump::verify() {
0N/A NativeInstruction::verify();
0N/A int i0 = long_at(sethi_offset);
0N/A int i1 = long_at(jmpl_offset);
0N/A assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
0N/A // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
0N/A Register rd = inv_rd(i0);
0N/A#ifndef _LP64
0N/A if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
0N/A (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
0N/A (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
0N/A inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
0N/A rd == inv_rs1(i1))) {
0N/A fatal("not a jump_to instruction");
0N/A }
0N/A#else
0N/A // In LP64, the jump instruction location varies for non relocatable
0N/A // jumps, for example is could be sethi, xor, jmp instead of the
0N/A // 7 instructions for sethi. So let's check sethi only.
0N/A if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
0N/A fatal("not a jump_to instruction");
0N/A }
0N/A#endif
0N/A}
0N/A
0N/A
0N/Avoid NativeJump::print() {
0N/A tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
0N/A}
0N/A
0N/A
0N/A// Code for unit testing implementation of NativeJump class
0N/Avoid NativeJump::test() {
0N/A#ifdef ASSERT
0N/A ResourceMark rm;
0N/A CodeBuffer cb("test", 100, 100);
0N/A MacroAssembler* a = new MacroAssembler(&cb);
0N/A NativeJump* nj;
0N/A uint idx;
0N/A int offsets[] = {
0N/A 0x0,
0N/A 0xffffffff,
0N/A 0x7fffffff,
0N/A 0x80000000,
0N/A 4096,
0N/A 4097,
0N/A 0x20,
0N/A 0x4000,
0N/A };
0N/A
0N/A VM_Version::allow_all();
0N/A
727N/A AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
727N/A a->sethi(al, I3);
727N/A a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
0N/A a->delayed()->nop();
727N/A a->sethi(al, I3);
727N/A a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
0N/A a->delayed()->nop();
0N/A
1668N/A nj = nativeJump_at( cb.insts_begin() );
0N/A nj->print();
0N/A
0N/A nj = nativeJump_at( nj->next_instruction_address() );
0N/A for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
0N/A nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
0N/A assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
0N/A nj->print();
0N/A }
0N/A
0N/A VM_Version::revert();
0N/A#endif // ASSERT
0N/A}
0N/A// End code for unit testing implementation of NativeJump class
0N/A
0N/A
0N/Avoid NativeJump::insert(address code_pos, address entry) {
0N/A Unimplemented();
0N/A}
0N/A
0N/A// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
0N/A// The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
0N/A// Atomic write can be only with 1 word.
0N/Avoid NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
0N/A // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere
0N/A // in the header of the nmethod, within a short branch's span of the patch point.
0N/A // Set up the jump sequence using NativeJump::insert, and then use an annulled
0N/A // unconditional branch at the target site (an atomic 1-word update).
0N/A // Limitations: You can only patch nmethods, with any given nmethod patched at
0N/A // most once, and the patch must be in the nmethod's header.
0N/A // It's messy, but you can ask the CodeCache for the nmethod containing the
0N/A // target address.
0N/A
0N/A // %%%%% For now, do something MT-stupid:
0N/A ResourceMark rm;
0N/A int code_size = 1 * BytesPerInstWord;
0N/A CodeBuffer cb(verified_entry, code_size + 1);
0N/A MacroAssembler* a = new MacroAssembler(&cb);
0N/A if (VM_Version::v9_instructions_work()) {
0N/A a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
0N/A } else {
0N/A a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
0N/A }
0N/A ICache::invalidate_range(verified_entry, code_size);
0N/A}
0N/A
0N/A
0N/Avoid NativeIllegalInstruction::insert(address code_pos) {
0N/A NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
0N/A nii->set_long_at(0, illegal_instruction());
0N/A}
0N/A
0N/Astatic int illegal_instruction_bits = 0;
0N/A
0N/Aint NativeInstruction::illegal_instruction() {
0N/A if (illegal_instruction_bits == 0) {
0N/A ResourceMark rm;
0N/A char buf[40];
0N/A CodeBuffer cbuf((address)&buf[0], 20);
0N/A MacroAssembler* a = new MacroAssembler(&cbuf);
0N/A address ia = a->pc();
0N/A a->trap(ST_RESERVED_FOR_USER_0 + 1);
0N/A int bits = *(int*)ia;
0N/A assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
0N/A illegal_instruction_bits = bits;
0N/A assert(illegal_instruction_bits != 0, "oops");
0N/A }
0N/A return illegal_instruction_bits;
0N/A}
0N/A
0N/Astatic int ic_miss_trap_bits = 0;
0N/A
0N/Abool NativeInstruction::is_ic_miss_trap() {
0N/A if (ic_miss_trap_bits == 0) {
0N/A ResourceMark rm;
0N/A char buf[40];
0N/A CodeBuffer cbuf((address)&buf[0], 20);
0N/A MacroAssembler* a = new MacroAssembler(&cbuf);
0N/A address ia = a->pc();
0N/A a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
0N/A int bits = *(int*)ia;
0N/A assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
0N/A ic_miss_trap_bits = bits;
0N/A assert(ic_miss_trap_bits != 0, "oops");
0N/A }
0N/A return long_at(0) == ic_miss_trap_bits;
0N/A}
0N/A
0N/A
0N/Abool NativeInstruction::is_illegal() {
0N/A if (illegal_instruction_bits == 0) {
0N/A return false;
0N/A }
0N/A return long_at(0) == illegal_instruction_bits;
0N/A}
0N/A
0N/A
0N/Avoid NativeGeneralJump::verify() {
0N/A assert(((NativeInstruction *)this)->is_jump() ||
0N/A ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
0N/A}
0N/A
0N/A
0N/Avoid NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
0N/A Assembler::Condition condition = Assembler::always;
0N/A int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
0N/A Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
0N/A NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
0N/A ni->set_long_at(0, x);
0N/A}
0N/A
0N/A
0N/A// MT-safe patching of a jmp instruction (and following word).
0N/A// First patches the second word, and then atomicly replaces
0N/A// the first word with the first new instruction word.
0N/A// Other processors might briefly see the old first word
0N/A// followed by the new second word. This is OK if the old
0N/A// second word is harmless, and the new second word may be
0N/A// harmlessly executed in the delay slot of the call.
0N/Avoid NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
0N/A assert(Patching_lock->is_locked() ||
0N/A SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
0N/A assert (instr_addr != NULL, "illegal address for code patching");
0N/A NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call
0N/A assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
0N/A int i0 = ((int*)code_buffer)[0];
0N/A int i1 = ((int*)code_buffer)[1];
0N/A int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
0N/A assert(inv_op(*contention_addr) == Assembler::arith_op ||
0N/A *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
0N/A "must not interfere with original call");
0N/A // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
0N/A h_jump->set_long_at(1*BytesPerInstWord, i1);
0N/A h_jump->set_long_at(0*BytesPerInstWord, i0);
0N/A // NOTE: It is possible that another thread T will execute
0N/A // only the second patched word.
0N/A // In other words, since the original instruction is this
0N/A // jmp patching_stub; nop (NativeGeneralJump)
0N/A // and the new sequence from the buffer is this:
0N/A // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
0N/A // what T will execute is this:
0N/A // jmp patching_stub; add %r, %lo(K), %r
0N/A // thereby putting garbage into %r before calling the patching stub.
0N/A // This is OK, because the patching stub ignores the value of %r.
0N/A
0N/A // Make sure the first-patched instruction, which may co-exist
0N/A // briefly with the call, will do something harmless.
0N/A assert(inv_op(*contention_addr) == Assembler::arith_op ||
0N/A *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
0N/A "must not interfere with original call");
0N/A}