assembler.cpp revision 1879
0N/A/*
0N/A * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
0N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
0N/A * or visit www.oracle.com if you need additional information or have any
0N/A * questions.
0N/A *
0N/A */
0N/A
0N/A#include "precompiled.hpp"
0N/A#include "asm/assembler.hpp"
0N/A#include "asm/assembler.inline.hpp"
0N/A#include "asm/codeBuffer.hpp"
0N/A#include "runtime/icache.hpp"
0N/A#include "runtime/os.hpp"
0N/A#ifdef TARGET_ARCH_x86
0N/A# include "assembler_x86.inline.hpp"
0N/A#endif
0N/A#ifdef TARGET_ARCH_sparc
0N/A# include "assembler_sparc.inline.hpp"
0N/A#endif
0N/A#ifdef TARGET_ARCH_zero
0N/A# include "assembler_zero.inline.hpp"
0N/A#endif
0N/A
0N/A
0N/A// Implementation of AbstractAssembler
0N/A//
0N/A// The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
0N/A// the assembler keeps a copy of the code buffers boundaries & modifies them when
0N/A// emitting bytes rather than using the code buffers accessor functions all the time.
0N/A// The code buffer is updated via set_code_end(...) after emitting a whole instruction.
0N/A
0N/AAbstractAssembler::AbstractAssembler(CodeBuffer* code) {
0N/A if (code == NULL) return;
0N/A CodeSection* cs = code->insts();
0N/A cs->clear_mark(); // new assembler kills old mark
0N/A _code_section = cs;
0N/A _code_begin = cs->start();
0N/A _code_limit = cs->limit();
0N/A _code_pos = cs->end();
0N/A _oop_recorder= code->oop_recorder();
0N/A if (_code_begin == NULL) {
0N/A vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
0N/A code->name()));
0N/A }
0N/A}
0N/A
0N/Avoid AbstractAssembler::set_code_section(CodeSection* cs) {
0N/A assert(cs->outer() == code_section()->outer(), "sanity");
0N/A assert(cs->is_allocated(), "need to pre-allocate this section");
0N/A cs->clear_mark(); // new assembly into this section kills old mark
0N/A _code_section = cs;
0N/A _code_begin = cs->start();
0N/A _code_limit = cs->limit();
0N/A _code_pos = cs->end();
0N/A}
0N/A
0N/A// Inform CodeBuffer that incoming code and relocation will be for stubs
0N/Aaddress AbstractAssembler::start_a_stub(int required_space) {
0N/A CodeBuffer* cb = code();
0N/A CodeSection* cs = cb->stubs();
0N/A assert(_code_section == cb->insts(), "not in insts?");
0N/A sync();
0N/A if (cs->maybe_expand_to_ensure_remaining(required_space)
0N/A && cb->blob() == NULL) {
0N/A return NULL;
0N/A }
0N/A set_code_section(cs);
0N/A return pc();
0N/A}
0N/A
0N/A// Inform CodeBuffer that incoming code and relocation will be code
0N/A// Should not be called if start_a_stub() returned NULL
0N/Avoid AbstractAssembler::end_a_stub() {
0N/A assert(_code_section == code()->stubs(), "not in stubs?");
0N/A sync();
0N/A set_code_section(code()->insts());
0N/A}
0N/A
0N/A// Inform CodeBuffer that incoming code and relocation will be for stubs
0N/Aaddress AbstractAssembler::start_a_const(int required_space, int required_align) {
0N/A CodeBuffer* cb = code();
0N/A CodeSection* cs = cb->consts();
0N/A assert(_code_section == cb->insts(), "not in insts?");
0N/A sync();
0N/A address end = cs->end();
0N/A int pad = -(intptr_t)end & (required_align-1);
0N/A if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
0N/A if (cb->blob() == NULL) return NULL;
0N/A end = cs->end(); // refresh pointer
0N/A }
0N/A if (pad > 0) {
0N/A while (--pad >= 0) { *end++ = 0; }
0N/A cs->set_end(end);
0N/A }
0N/A set_code_section(cs);
0N/A return end;
0N/A}
0N/A
0N/A// Inform CodeBuffer that incoming code and relocation will be code
0N/A// Should not be called if start_a_const() returned NULL
0N/Avoid AbstractAssembler::end_a_const() {
0N/A assert(_code_section == code()->consts(), "not in consts?");
0N/A sync();
0N/A set_code_section(code()->insts());
0N/A}
0N/A
0N/A
0N/Avoid AbstractAssembler::flush() {
0N/A sync();
0N/A ICache::invalidate_range(addr_at(0), offset());
0N/A}
0N/A
0N/A
0N/Avoid AbstractAssembler::a_byte(int x) {
0N/A emit_byte(x);
0N/A}
0N/A
0N/A
0N/Avoid AbstractAssembler::a_long(jint x) {
0N/A emit_long(x);
0N/A}
0N/A
0N/A// Labels refer to positions in the (to be) generated code. There are bound
0N/A// and unbound
0N/A//
0N/A// Bound labels refer to known positions in the already generated code.
0N/A// offset() is the position the label refers to.
0N/A//
0N/A// Unbound labels refer to unknown positions in the code to be generated; it
0N/A// may contain a list of unresolved displacements that refer to it
0N/A#ifndef PRODUCT
0N/Avoid AbstractAssembler::print(Label& L) {
0N/A if (L.is_bound()) {
0N/A tty->print_cr("bound label to %d|%d", L.loc_pos(), L.loc_sect());
0N/A } else if (L.is_unbound()) {
0N/A L.print_instructions((MacroAssembler*)this);
0N/A } else {
0N/A tty->print_cr("label in inconsistent state (loc = %d)", L.loc());
0N/A }
0N/A}
0N/A#endif // PRODUCT
0N/A
0N/A
0N/Avoid AbstractAssembler::bind(Label& L) {
0N/A if (L.is_bound()) {
0N/A // Assembler can bind a label more than once to the same place.
0N/A guarantee(L.loc() == locator(), "attempt to redefine label");
0N/A return;
0N/A }
0N/A L.bind_loc(locator());
0N/A L.patch_instructions((MacroAssembler*)this);
0N/A}
0N/A
0N/Avoid AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) {
0N/A if (UseStackBanging) {
0N/A // Each code entry causes one stack bang n pages down the stack where n
0N/A // is configurable by StackBangPages. The setting depends on the maximum
0N/A // depth of VM call stack or native before going back into java code,
0N/A // since only java code can raise a stack overflow exception using the
0N/A // stack banging mechanism. The VM and native code does not detect stack
0N/A // overflow.
0N/A // The code in JavaCalls::call() checks that there is at least n pages
0N/A // available, so all entry code needs to do is bang once for the end of
0N/A // this shadow zone.
0N/A // The entry code may need to bang additional pages if the framesize
0N/A // is greater than a page.
0N/A
0N/A const int page_size = os::vm_page_size();
0N/A int bang_end = StackShadowPages*page_size;
0N/A
0N/A // This is how far the previous frame's stack banging extended.
0N/A const int bang_end_safe = bang_end;
0N/A
0N/A if (frame_size_in_bytes > page_size) {
0N/A bang_end += frame_size_in_bytes;
0N/A }
0N/A
0N/A int bang_offset = bang_end_safe;
0N/A while (bang_offset <= bang_end) {
0N/A // Need at least one stack bang at end of shadow zone.
0N/A bang_stack_with_offset(bang_offset);
0N/A bang_offset += page_size;
0N/A }
0N/A } // end (UseStackBanging)
0N/A}
0N/A
0N/Avoid Label::add_patch_at(CodeBuffer* cb, int branch_loc) {
0N/A assert(_loc == -1, "Label is unbound");
0N/A if (_patch_index < PatchCacheSize) {
0N/A _patches[_patch_index] = branch_loc;
0N/A } else {
0N/A if (_patch_overflow == NULL) {
0N/A _patch_overflow = cb->create_patch_overflow();
0N/A }
0N/A _patch_overflow->push(branch_loc);
0N/A }
0N/A ++_patch_index;
0N/A}
0N/A
0N/Avoid Label::patch_instructions(MacroAssembler* masm) {
0N/A assert(is_bound(), "Label is bound");
0N/A CodeBuffer* cb = masm->code();
0N/A int target_sect = CodeBuffer::locator_sect(loc());
0N/A address target = cb->locator_address(loc());
0N/A while (_patch_index > 0) {
0N/A --_patch_index;
0N/A int branch_loc;
0N/A if (_patch_index >= PatchCacheSize) {
0N/A branch_loc = _patch_overflow->pop();
0N/A } else {
0N/A branch_loc = _patches[_patch_index];
0N/A }
0N/A int branch_sect = CodeBuffer::locator_sect(branch_loc);
0N/A address branch = cb->locator_address(branch_loc);
0N/A if (branch_sect == CodeBuffer::SECT_CONSTS) {
0N/A // The thing to patch is a constant word.
0N/A *(address*)branch = target;
0N/A continue;
0N/A }
0N/A
0N/A#ifdef ASSERT
0N/A // Cross-section branches only work if the
0N/A // intermediate section boundaries are frozen.
0N/A if (target_sect != branch_sect) {
0N/A for (int n = MIN2(target_sect, branch_sect),
0N/A nlimit = (target_sect + branch_sect) - n;
0N/A n < nlimit; n++) {
0N/A CodeSection* cs = cb->code_section(n);
0N/A assert(cs->is_frozen(), "cross-section branch needs stable offsets");
0N/A }
0N/A }
0N/A#endif //ASSERT
0N/A
0N/A // Push the target offset into the branch instruction.
0N/A masm->pd_patch_instruction(branch, target);
0N/A }
0N/A}
0N/A
0N/Astruct DelayedConstant {
0N/A typedef void (*value_fn_t)();
0N/A BasicType type;
0N/A intptr_t value;
0N/A value_fn_t value_fn;
0N/A // This limit of 20 is generous for initial uses.
0N/A // The limit needs to be large enough to store the field offsets
0N/A // into classes which do not have statically fixed layouts.
0N/A // (Initial use is for method handle object offsets.)
0N/A // Look for uses of "delayed_value" in the source code
0N/A // and make sure this number is generous enough to handle all of them.
0N/A enum { DC_LIMIT = 20 };
0N/A static DelayedConstant delayed_constants[DC_LIMIT];
0N/A static DelayedConstant* add(BasicType type, value_fn_t value_fn);
0N/A bool match(BasicType t, value_fn_t cfn) {
0N/A return type == t && value_fn == cfn;
0N/A }
0N/A static void update_all();
};
DelayedConstant DelayedConstant::delayed_constants[DC_LIMIT];
// Default C structure initialization rules have the following effect here:
// = { { (BasicType)0, (intptr_t)NULL }, ... };
DelayedConstant* DelayedConstant::add(BasicType type,
DelayedConstant::value_fn_t cfn) {
for (int i = 0; i < DC_LIMIT; i++) {
DelayedConstant* dcon = &delayed_constants[i];
if (dcon->match(type, cfn))
return dcon;
if (dcon->value_fn == NULL) {
// (cmpxchg not because this is multi-threaded but because I'm paranoid)
if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) {
dcon->type = type;
return dcon;
}
}
}
// If this assert is hit (in pre-integration testing!) then re-evaluate
// the comment on the definition of DC_LIMIT.
guarantee(false, "too many delayed constants");
return NULL;
}
void DelayedConstant::update_all() {
for (int i = 0; i < DC_LIMIT; i++) {
DelayedConstant* dcon = &delayed_constants[i];
if (dcon->value_fn != NULL && dcon->value == 0) {
typedef int (*int_fn_t)();
typedef address (*address_fn_t)();
switch (dcon->type) {
case T_INT: dcon->value = (intptr_t) ((int_fn_t) dcon->value_fn)(); break;
case T_ADDRESS: dcon->value = (intptr_t) ((address_fn_t)dcon->value_fn)(); break;
}
}
}
}
intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) {
DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn);
return &dcon->value;
}
intptr_t* AbstractAssembler::delayed_value_addr(address(*value_fn)()) {
DelayedConstant* dcon = DelayedConstant::add(T_ADDRESS, (DelayedConstant::value_fn_t) value_fn);
return &dcon->value;
}
void AbstractAssembler::update_delayed_values() {
DelayedConstant::update_all();
}
void AbstractAssembler::block_comment(const char* comment) {
if (sect() == CodeBuffer::SECT_INSTS) {
code_section()->outer()->block_comment(offset(), comment);
}
}
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
// Exception handler checks the nmethod's implicit null checks table
// only when this method returns false.
#ifdef _LP64
if (UseCompressedOops && Universe::narrow_oop_base() != NULL) {
assert (Universe::heap() != NULL, "java heap should be initialized");
// The first page after heap_base is unmapped and
// the 'offset' is equal to [heap_base + offset] for
// narrow oop implicit null checks.
uintptr_t base = (uintptr_t)Universe::narrow_oop_base();
if ((uintptr_t)offset >= base) {
// Normalize offset for the next check.
offset = (intptr_t)(pointer_delta((void*)offset, (void*)base, 1));
}
}
#endif
return offset < 0 || os::vm_page_size() <= offset;
}
#ifndef PRODUCT
void Label::print_instructions(MacroAssembler* masm) const {
CodeBuffer* cb = masm->code();
for (int i = 0; i < _patch_index; ++i) {
int branch_loc;
if (i >= PatchCacheSize) {
branch_loc = _patch_overflow->at(i - PatchCacheSize);
} else {
branch_loc = _patches[i];
}
int branch_pos = CodeBuffer::locator_pos(branch_loc);
int branch_sect = CodeBuffer::locator_sect(branch_loc);
address branch = cb->locator_address(branch_loc);
tty->print_cr("unbound label");
tty->print("@ %d|%d ", branch_pos, branch_sect);
if (branch_sect == CodeBuffer::SECT_CONSTS) {
tty->print_cr(PTR_FORMAT, *(address*)branch);
continue;
}
masm->pd_print_patched_instruction(branch);
tty->cr();
}
}
#endif // ndef PRODUCT