assembler.cpp revision 1472
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_assembler.cpp.incl"
// Implementation of AbstractAssembler
//
// The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
// the assembler keeps a copy of the code buffers boundaries & modifies them when
// emitting bytes rather than using the code buffers accessor functions all the time.
// The code buffer is updated via set_code_end(...) after emitting a whole instruction.
_code_section = cs;
if (_code_begin == NULL) {
}
}
_code_section = cs;
}
// Inform CodeBuffer that incoming code and relocation will be for stubs
sync();
return NULL;
}
return pc();
}
// Inform CodeBuffer that incoming code and relocation will be code
// Should not be called if start_a_stub() returned NULL
void AbstractAssembler::end_a_stub() {
sync();
}
// Inform CodeBuffer that incoming code and relocation will be for stubs
sync();
}
if (pad > 0) {
}
return end;
}
// Inform CodeBuffer that incoming code and relocation will be code
// Should not be called if start_a_const() returned NULL
void AbstractAssembler::end_a_const() {
sync();
}
void AbstractAssembler::flush() {
sync();
}
void AbstractAssembler::a_byte(int x) {
emit_byte(x);
}
emit_long(x);
}
// Labels refer to positions in the (to be) generated code. There are bound
// and unbound
//
// Bound labels refer to known positions in the already generated code.
// offset() is the position the label refers to.
//
// Unbound labels refer to unknown positions in the code to be generated; it
// may contain a list of unresolved displacements that refer to it
#ifndef PRODUCT
if (L.is_bound()) {
} else if (L.is_unbound()) {
L.print_instructions((MacroAssembler*)this);
} else {
}
}
#endif // PRODUCT
if (L.is_bound()) {
// Assembler can bind a label more than once to the same place.
return;
}
L.patch_instructions((MacroAssembler*)this);
}
if (UseStackBanging) {
// Each code entry causes one stack bang n pages down the stack where n
// is configurable by StackBangPages. The setting depends on the maximum
// depth of VM call stack or native before going back into java code,
// since only java code can raise a stack overflow exception using the
// stack banging mechanism. The VM and native code does not detect stack
// overflow.
// The code in JavaCalls::call() checks that there is at least n pages
// available, so all entry code needs to do is bang once for the end of
// this shadow zone.
// The entry code may need to bang additional pages if the framesize
// is greater than a page.
// This is how far the previous frame's stack banging extended.
const int bang_end_safe = bang_end;
if (frame_size_in_bytes > page_size) {
}
int bang_offset = bang_end_safe;
while (bang_offset <= bang_end) {
// Need at least one stack bang at end of shadow zone.
bang_offset += page_size;
}
} // end (UseStackBanging)
}
if (_patch_index < PatchCacheSize) {
} else {
if (_patch_overflow == NULL) {
}
}
++_patch_index;
}
while (_patch_index > 0) {
--_patch_index;
int branch_loc;
if (_patch_index >= PatchCacheSize) {
} else {
}
// The thing to patch is a constant word.
continue;
}
#ifdef ASSERT
// Cross-section branches only work if the
// intermediate section boundaries are frozen.
if (target_sect != branch_sect) {
n < nlimit; n++) {
}
}
#endif //ASSERT
// Push the target offset into the branch instruction.
}
}
struct DelayedConstant {
typedef void (*value_fn_t)();
// This limit of 20 is generous for initial uses.
// The limit needs to be large enough to store the field offsets
// into classes which do not have statically fixed layouts.
// (Initial use is for method handle object offsets.)
// Look for uses of "delayed_value" in the source code
// and make sure this number is generous enough to handle all of them.
enum { DC_LIMIT = 20 };
}
static void update_all();
};
// Default C structure initialization rules have the following effect here:
// = { { (BasicType)0, (intptr_t)NULL }, ... };
for (int i = 0; i < DC_LIMIT; i++) {
return dcon;
// (cmpxchg not because this is multi-threaded but because I'm paranoid)
return dcon;
}
}
}
// If this assert is hit (in pre-integration testing!) then re-evaluate
// the comment on the definition of DC_LIMIT.
guarantee(false, "too many delayed constants");
return NULL;
}
void DelayedConstant::update_all() {
for (int i = 0; i < DC_LIMIT; i++) {
typedef int (*int_fn_t)();
typedef address (*address_fn_t)();
}
}
}
}
}
}
void AbstractAssembler::update_delayed_values() {
}
}
}
// Exception handler checks the nmethod's implicit null checks table
// only when this method returns false.
#ifdef _LP64
// The first page after heap_base is unmapped and
// the 'offset' is equal to [heap_base + offset] for
// narrow oop implicit null checks.
// Normalize offset for the next check.
}
}
#endif
}
#ifndef PRODUCT
for (int i = 0; i < _patch_index; ++i) {
int branch_loc;
if (i >= PatchCacheSize) {
} else {
branch_loc = _patches[i];
}
continue;
}
}
}
#endif // ndef PRODUCT