assembler_sparc.cpp revision 1915
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#endif
// Convert the raw encoding form into the form expected by the
// constructor for Address.
if (disp_is_oop) {
}
return madr;
} else {
return madr;
}
}
// Warning: In LP64 mode disp will occupy more than 10 bits, but
// op codes such as ld or ldx, only access disp() to get
// their simm13 argument.
int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
if (is_in())
else
}
static const char* argumentNames[][2] = {
{"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
{"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
{"A(n>9)","P(n>9)"}
};
}
const char* s;
default: s = "????"; break;
case call_op: s = "call"; break;
case branch_op:
case bpr_op2: s = "bpr"; break;
case fb_op2: s = "fb"; break;
case fbp_op2: s = "fbp"; break;
case br_op2: s = "br"; break;
case bp_op2: s = "bp"; break;
case cb_op2: s = "cb"; break;
default: s = "????"; break;
}
}
}
// Patch instruction inst at offset inst_pos to refer to dest_pos
// and return the resulting instruction.
// We should have pcs, not offsets, but since all is relative, it will work out
// OK.
int m; // mask for displacement field
int v; // new value for displacement field
const int word_aligned_ones = -4;
default: ShouldNotReachHere();
case branch_op:
default: ShouldNotReachHere();
}
}
return inst & ~m | v;
}
// Return the offset of the branch destionation of instruction inst
// at offset pos.
// Should have pcs, but since all is relative, it works out.
int r;
default: ShouldNotReachHere();
case branch_op:
default: ShouldNotReachHere();
}
}
return r;
}
int AbstractAssembler::code_fill_byte() {
return 0x00; // illegal instruction 0x00000000
}
switch (in) {
case rc_gez: return greaterEqual;
default:
}
return equal;
}
// Generate a bunch 'o stuff (including v9's
#ifndef PRODUCT
done();
retry();
flushw();
impdep2( 3, 0 );
membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
membar( StoreStore );
nop();
saved();
restored();
sir( -1 );
stbar();
}
// Generate a bunch 'o stuff unique to V8
void Assembler::test_v8_onlys() {
}
#endif
// Implementation of MacroAssembler
// provoke OS NULL exception if reg = NULL by
// accessing M[reg] w/o changing any registers
}
else {
// nothing to do, (later) access of M[reg + offset]
// will provoke OS NULL exception if reg = NULL
}
}
// Ring buffer jumps
#ifndef PRODUCT
} else {
}
}
#endif /* PRODUCT */
// This can only be traceable if r1 & r2 are visible after a window save
if (TraceJumps) {
#ifndef PRODUCT
save_frame(0);
Label L;
// get nearby pc, store jmp target
bind(L);
// store nearby pc
// store file
// store line
restore();
#endif /* PRODUCT */
}
}
// This can only be traceable if r1 is visible after a window save
if (TraceJumps) {
#ifndef PRODUCT
save_frame(0);
Label L;
// get nearby pc, store jmp target
bind(L);
// store nearby pc
// store file
// store line
restore();
#endif /* PRODUCT */
}
}
// This code sequence is relocatable to any address, even on LP64.
void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
if (TraceJumps) {
#ifndef PRODUCT
// Must do the add here so relocation can find the remainder of the
// value to be relocated.
save_frame(0);
Label L;
// get nearby pc, store jmp target
bind(L);
// store nearby pc
// store file
// store line
restore();
#else
#endif /* PRODUCT */
} else {
}
}
void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
}
// Convert to C varargs format
// spill register-resident args to their memory slots
// (SPARC calling convention requires callers to have already preallocated these)
// Note that the inArg might in fact be an outgoing argument,
// if a leaf routine or stub does some tricky argument shuffling.
// This routine must work even though one of the saved arguments
// is in the d register (e.g., set_varargs(Argument(0, false), O0)).
}
// return the address of the first memory slot
}
// Conditional breakpoint (for assertion checks in assembly code)
}
// We want to use ST_BREAKPOINT here, but the debugger is confused by it.
void MacroAssembler::breakpoint_trap() {
}
// flush windows (except current) using flushw instruction if avail.
void MacroAssembler::flush_windows() {
else flush_windows_trap();
}
// Write serialization page so VM thread can do a pseudo remote membar
// We use the current thread pointer to calculate a thread specific
// offset to write to within the page. This minimizes bus traffic
// due to cache line collision.
}
else {
}
}
void MacroAssembler::enter() {
}
void MacroAssembler::leave() {
}
if(VM_Version::v9_instructions_work()) {
} else {
}
}
if(VM_Version::v9_instructions_work()) {
} else {
}
}
#ifdef ASSERT
// Get the condition codes the V8 way.
// This is a test of V8 which has icc but not xcc
// so mask off the xcc bits
// Compare condition codes from the V8 and V9 ways.
delayed()->breakpoint_trap();
}
// Write out the saved condition codes the V8 way
// Read back the condition codes using the V9 instruction
// This is a test of V8 which has icc but not xcc
// so mask off the xcc bits
// Compare the V8 way with the V9 way.
delayed()->breakpoint_trap();
}
#else
#define read_ccr_v8_assert(x)
#define write_ccr_v8_assert(x)
#endif // ASSERT
if (VM_Version::v9_instructions_work()) {
// Test code sequence used on V8. Do not move above rdccr.
} else {
}
}
if (VM_Version::v9_instructions_work()) {
// Test code sequence used on V8. Do not move below wrccr.
} else {
}
}
// Calls to C land
#ifdef ASSERT
// a hook for debugging
static Thread* reinitialize_thread() {
return ThreadLocalStorage::thread();
}
#else
#endif
#ifdef ASSERT
#endif
// call this when G2_thread is not known to be valid
void MacroAssembler::get_thread() {
save_frame(0); // to avoid clobbering O0
#ifdef ASSERT
#endif
}
return correct_value;
}
void MacroAssembler::verify_thread() {
if (VerifyThread) {
// NOTE: this chops off the heads of the 64-bit O registers.
#ifdef CC_INTERP
save_frame(0);
#else
// make sure G2_thread contains the right value
save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
// G2 saved below
#endif /* CC_INTERP */
// Save & restore possible 64-bit Long arguments in G-regs
#endif
// G2 restored below
// Save & restore possible 64-bit Long arguments in G-regs
#endif
}
}
if (thread_cache->is_valid()) {
}
if (VerifyThread) {
// smash G2_thread, as if the VM were about to anyway
}
}
if (thread_cache->is_valid()) {
} else {
// do it the slow way
get_thread();
}
}
// %%% maybe get rid of [re]set_last_Java_frame
// Always set last_Java_pc and flags first because once last_Java_sp is visible
// has_last_Java_frame is true and users will look at the rest of the fields.
// (Note: flags should always be zero before we get here so doesn't need to be set.)
#ifdef ASSERT
// Verify that flags was zeroed on return to Java
save_frame(0); // to avoid clobbering O0
#ifdef _LP64
#else
#endif // _LP64
stop("last_Java_pc not zeroed before leaving Java");
// Verify that flags was zeroed on return to Java
stop("flags not zeroed before leaving Java");
#endif /* ASSERT */
//
// When returning from calling out from Java mode the frame anchor's last_Java_pc
// will always be set to NULL. It is set here so that if we are doing a call to
// native (not VM) that we capture the known pc and don't have to rely on the
// native call having a standard frame linkage where we can find the pc.
if (last_Java_pc->is_valid()) {
}
#ifdef _LP64
#ifdef ASSERT
// Make sure that we have an odd stack
stop("Stack Not Biased in set_last_Java_frame");
#endif // ASSERT
#else
#endif // _LP64
}
void MacroAssembler::reset_last_Java_frame(void) {
Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
#ifdef ASSERT
// check that it WAS previously set
#ifdef CC_INTERP
save_frame(0);
#else
#endif /* CC_INTERP */
restore();
#endif // ASSERT
// Always return last_Java_pc to zero
// Always null flags after return to Java
}
void MacroAssembler::call_VM_base(
int number_of_arguments,
bool check_exceptions)
{
// determine last_java_sp register
if (!last_java_sp->is_valid()) {
last_java_sp = SP;
}
// debugging support
// 64-bit last_java_sp is biased!
// do the call
if (!VerifyThread)
else
// check for pending exceptions. use Gtemp as scratch register.
if (check_exceptions) {
}
// get oop result if there is one and reset the value in the thread
if (oop_result->is_valid()) {
}
}
{
Label L;
// we use O7 linkage so that forward_exception_entry has the issuing PC
bind(L);
}
}
}
void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
}
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
// O0 is reserved for the thread
}
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
// O0 is reserved for the thread
}
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
// O0 is reserved for the thread
}
// Note: The following call_VM overloadings are useful when a "save"
// has already been performed by a stub, and the last Java frame is
// the previous one. In that case, last_java_sp must be passed as FP
// instead of SP.
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
}
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
// O0 is reserved for the thread
}
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
// O0 is reserved for the thread
}
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
// O0 is reserved for the thread
}
void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
// do the call
}
void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
}
}
void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
}
void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
}
}
}
// We require that C code which does not return a value in vm_result will
// leave it undisturbed.
# ifdef ASSERT
// Check that we are not overwriting any other oop.
#ifdef CC_INTERP
save_frame(0);
#else
#endif /* CC_INTERP */
restore();
// }
# endif
}
#ifdef _LP64
#else
#endif
}
void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
int shiftcnt;
#ifdef _LP64
# ifdef CHECK_DELAY
assert_not_delayed((char*) "cannot put two instructions in delay slot");
# endif
v9_dep();
}
else if (msb32 == -1) {
}
else {
shiftcnt = 0; // We already shifted
}
else
shiftcnt = 12;
shiftcnt = 0;
}
else
shiftcnt = 10;
}
else
sllx(d, 32, d);
}
// Pad out the instruction sequence so it can be patched later.
nop();
}
#else
#endif
}
internal_sethi(addrlit, d, false);
}
internal_sethi(addrlit, d, true);
}
#ifdef _LP64
if (worst_case) return 7;
int inst_count;
inst_count = 1;
else if (hi32 == -1)
inst_count = 2;
else {
inst_count = 2;
if ( hi32 & 0x3ff )
inst_count++;
if ( lo32 & 0xFFFFFC00 ) {
}
}
return BytesPerInstWord * inst_count;
#else
return BytesPerInstWord;
#endif
}
int MacroAssembler::worst_case_size_of_set() {
}
void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
// can optimize
return;
}
return;
}
}
assert_not_delayed((char*) "cannot put two instructions in delay slot");
}
}
internal_set(al, d, false);
}
internal_set(al, d, false);
}
internal_set(al, d, false);
}
internal_set(al, d, true);
}
internal_set(al, d, true);
}
v9_dep();
// (Matcher::isSimpleConstant64 knows about the following optimizations.)
} else if (hi == 0) {
}
else if (hi == -1) {
}
else if (lo == 0) {
} else {
}
sllx(d, 32, d);
}
else {
}
}
v9_dep();
int count = 0;
// (Matcher::isSimpleConstant64 knows about the following optimizations.)
count++;
} else if (hi == 0) {
count++;
count++;
}
else if (hi == -1) {
count += 2;
}
else if (lo == 0) {
count++;
} else {
count++;
count++;
}
count++;
}
else {
count += 2;
count++;
count++;
count += 2;
}
return count;
}
// compute size in bytes of sparc frame, given
// number of extraWords
nWords += extraWords;
return nWords * BytesPerWord;
}
// save_frame: given number of "extra" words in frame,
// issue approp. save instruction (p 200, v8 manual)
} else {
}
}
if (is_simm13(-size_in_bytes)) {
} else {
}
}
// The trick here is to use precisely the same memory word
// that trap handlers also use to save the register.
// This word cannot be used for any other purpose, but
// it works fine to save the register's value, whether or not
// an interrupt flushes register windows at any given moment!
}
}
}
}
}
}
}
// Relocation with special format (see relocInfo_sparc.hpp).
// Assembler::sethi(0x3fffff, d);
// Don't add relocation for 'add'. Do patching during 'sethi' processing.
add(d, 0x3ff, d);
}
}
void MacroAssembler::safepoint() {
}
int j;
for ( j = 0; j < 8; ++j )
else s->print_cr( "fp = 0x%.16lx", i[j]);
s->cr();
for ( j = 0; j < 8; ++j )
s->print_cr("l%d = 0x%.16lx", j, l[j]);
s->cr();
for ( j = 0; j < 8; ++j )
else s->print_cr( "sp = 0x%.16lx", o[j]);
s->cr();
for ( j = 0; j < 8; ++j )
s->print_cr("g%d = 0x%.16lx", j, g[j]);
s->cr();
// print out floats with compression
for (j = 0; j < 32; ) {
int last = j;
break;
}
s->print("f%d", j);
s->fill_to(25);
j = last + 1;
}
s->cr();
// and doubles (evens only)
for (j = 0; j < 32; ) {
int last = j;
break;
}
s->fill_to(30);
s->fill_to(42);
j = last + 1;
}
s->cr();
}
a->flush_windows();
int i;
for (i = 0; i < 8; ++i) {
a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
}
for (i = 0; i < 32; ++i) {
}
}
}
for (int i = 1; i < 8; ++i) {
}
for (int j = 0; j < 32; ++j) {
}
}
}
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
void MacroAssembler::push_fTOS() {
// %%%%%% need to implement this
}
// pops double TOS element from CPU stack and pushes on FPU stack
void MacroAssembler::pop_fTOS() {
// %%%%%% need to implement this
}
void MacroAssembler::empty_FPU_stack() {
// %%%%%% need to implement this
}
// plausibility check for oops
if (!VerifyOops) return;
char buffer[64];
#ifdef COMPILER1
if (CommentedAssembly) {
}
#endif
// Call indirectly to solve generation ordering problem
// Make some space on stack above the current register window.
// Enough to hold 8 64-bit registers.
// Save some 64-bit registers; a normal 'save' chops the heads off
// of 64-bit longs in the 32-bit build.
// Load address to call to into O7
load_ptr_contents(a, O7);
// Register call to verify_oop_subroutine
// recover frame size
}
// plausibility check for oops
if (!VerifyOops) return;
char buffer[64];
// Call indirectly to solve generation ordering problem
// Make some space on stack above the current register window.
// Enough to hold 8 64-bit registers.
// Save some 64-bit registers; a normal 'save' chops the heads off
// of 64-bit longs in the 32-bit build.
ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
// Load address to call to into O7
load_ptr_contents(a, O7);
// Register call to verify_oop_subroutine
// recover frame size
}
// side-door communication with signalHandler in os_solaris.cpp
// This macro is expanded just once; it creates shared code. Contract:
// receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
// registers, including flags. May not use a register 'save', as this blows
// the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
// call.
void MacroAssembler::verify_oop_subroutine() {
// Leaf call; no frame.
// O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
// O0 is now the oop to be checked. O7 is the return address.
// Save some more registers for temps.
// Save flags
rdccr( O5_save_flags );
{ // count number of verifies
}
// mark lower end of faulting range
_verify_oop_implicit_branch[0] = pc();
// We can't check the mark oop because it could be in the process of
// locking or unlocking while this is running.
// assert((obj & oop_mask) == oop_bits);
// the null_or_fail case is useless; must test for null separately
}
// Check the klassOop of this object for being in the right area of memory.
// Cannot do the load in the delay above slot in case O0 is null
// assert((klass & klass_mask) == klass_bits);
// Check the klass's klass
// mark upper end of faulting range
//-----------------------
// all tests pass
// Restore prior 64-bit registers
retl(); // Leaf return; restore prior O7 in delay slot
//-----------------------
//-----------------------
// report failure:
// stop_subroutine expects message pointer in I1.
// Restore prior 64-bit registers
// factor long stop-sequence into subroutine to save space
// call indirectly to solve generation ordering problem
}
// save frame first to get O7 for return address
// add one word to size in case struct is odd number of words long
// It must be doubleword-aligned for storing doubles into it.
// stop_subroutine expects message pointer in I1.
// factor long stop-sequence into subroutine to save space
// call indirectly to solve generation ordering problem
load_ptr_contents(a, O5);
breakpoint_trap(); // make stop actually stop rather than writing
// unnoticeable results in the output files.
// restore(); done in callee to save space!
}
RegistersForDebugging::save_registers(this);
// ret();
// delayed()->restore();
restore();
}
// We must be able to turn interactive prompting off
// in order to run automated test scripts on the VM
// Use the flag ShowMessageBoxOnError
char* b = new char[1024];
if ( ShowMessageBoxOnError ) stop(b);
else warn(b);
}
void MacroAssembler::stop_subroutine() {
RegistersForDebugging::save_registers(this);
// for the sake of the debugger, stick a PC on the current frame
// (this assumes that the caller has performed an extra "save")
save_frame(); // one more save to free up another O7 register
// We expect pointer to message in I1. Caller must set it up in O1
restore();
save_frame(0);
restore();
retl();
}
if ( ShowMessageBoxOnError ) {
{
// In order to get locks work, we need to fake a in_VM state
}
}
}
else
assert(false, "error");
}
#ifndef PRODUCT
void MacroAssembler::test() {
VM_Version::allow_all();
a->test_v9();
a->test_v8_onlys();
VM_Version::revert();
}
#endif
}
#ifdef _LP64
#else
#endif
}
}
// ---------------------------------------------------------
switch (c) {
/*case zero: */
/*case notZero:*/
}
}
// compares register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
br (c, a, p, L);
}
// Compares a pointer register with zero and branches on null.
// Does a test & branch on 32-bit systems and a register-branch on 64-bit.
#ifdef _LP64
#else
#endif
}
#ifdef _LP64
#else
#endif
}
if (VM_Version::v9_instructions_work()) {
} else {
}
}
if (VM_Version::v9_instructions_work()) {
} else {
}
}
// instruction sequences factored across compiler & interpreter
// And, with an unsigned comparison, it does not matter if the numbers
// are negative or not.
// E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
// The second one is bigger (unsignedly).
// Other notes: The first move in each triplet can be unconditional
// (and therefore probably prefetchable).
// And the equals case for the high part does not need testing,
// since that triplet is reached only after finding the high halves differ.
if (VM_Version::v9_instructions_work()) {
}
else {
}
bind( check_low_parts );
if (VM_Version::v9_instructions_work()) {
}
else {
}
}
}
&& Ralt_count != Rin_low
&& Ralt_count != Rcount
&& Rxfer_bits != Rin_low
&& Rxfer_bits != Rin_high
&& Rxfer_bits != Rcount
&& Rxfer_bits != Rout_low
"register alias checks");
// This code can be optimized to use the 64 bit shifts in V9.
// Here we use the 32 bit shifts.
delayed()->
// shift < 32 bits, Ralt_count = Rcount-31
// We get the transfer bits by shifting right by 32-count the low
// register. This is done by shifting right by 31-count and then by one
// more to take care of the special (rare) case where count is zero
// (shifting by 32 would not work).
neg( Ralt_count );
// The order of the next two instructions is critical in the case where
// Rin and Rout are the same and should not be reversed.
}
}
delayed()->
or3( Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
// shift >= 32 bits, Ralt_count = Rcount-32
}
&& Ralt_count != Rin_low
&& Ralt_count != Rcount
&& Rxfer_bits != Rin_low
&& Rxfer_bits != Rin_high
&& Rxfer_bits != Rcount
&& Rxfer_bits != Rout_high
"register alias checks");
// This code can be optimized to use the 64 bit shifts in V9.
// Here we use the 32 bit shifts.
// shift < 32 bits, Ralt_count = Rcount-31
// We get the transfer bits by shifting left by 32-count the high
// register. This is done by shifting left by 31-count and then by one
// more to take care of the special (rare) case where count is zero
// (shifting by 32 would not work).
neg( Ralt_count );
}
// The order of the next two instructions is critical in the case where
// Rin and Rout are the same and should not be reversed.
}
delayed()->
// shift >= 32 bits, Ralt_count = Rcount-32
}
&& Ralt_count != Rin_low
&& Ralt_count != Rcount
&& Rxfer_bits != Rin_low
&& Rxfer_bits != Rin_high
&& Rxfer_bits != Rcount
&& Rxfer_bits != Rout_high
"register alias checks");
// This code can be optimized to use the 64 bit shifts in V9.
// Here we use the 32 bit shifts.
// shift < 32 bits, Ralt_count = Rcount-31
// We get the transfer bits by shifting left by 32-count the high
// register. This is done by shifting left by 31-count and then by one
// more to take care of the special (rare) case where count is zero
// (shifting by 32 would not work).
neg( Ralt_count );
}
// The order of the next two instructions is critical in the case where
// Rin and Rout are the same and should not be reversed.
}
delayed()->
// shift >= 32 bits, Ralt_count = Rcount-32
}
#ifdef _LP64
}
#endif
switch (size_in_bytes) {
default: ShouldNotReachHere();
}
}
if (VM_Version::v9_instructions_work()) {
} else {
//fb(lt, true, pn, done); delayed()->set( -1, Rresult );
}
}
{
if (VM_Version::v9_instructions_work()) {
} else {
if (w == FloatRegisterImpl::S) {
} else if (w == FloatRegisterImpl::D) {
// number() does a sanity check on the alignment.
} else {
// number() does a sanity check on the alignment.
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
}
}
}
{
if (VM_Version::v9_instructions_work()) {
} else {
if (w == FloatRegisterImpl::S) {
} else if (w == FloatRegisterImpl::D) {
// number() does a sanity check on the alignment.
} else {
// number() does a sanity check on the alignment.
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
}
}
}
{
if (VM_Version::v9_instructions_work()) {
} else {
if (w == FloatRegisterImpl::S) {
} else if (w == FloatRegisterImpl::D) {
// number() does a sanity check on the alignment.
} else {
// number() does a sanity check on the alignment.
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
}
}
}
void MacroAssembler::save_all_globals_into_locals() {
}
void MacroAssembler::restore_globals_from_locals() {
}
// Use for 64 bit operation.
void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
{
// store ptr_reg as the new top value
#ifdef _LP64
#else
#endif // _LP64
}
// [RGV] This routine does not handle 64 bit operations.
// use casx_under_lock() or casx directly!!!
void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
{
// store ptr_reg as the new top value
if (VM_Version::v9_instructions_work()) {
} else {
// If the register is not an out nor global, it is not visible
// after the save. Allocate a register for it, save its
// value in the register save area (the save may not flush
// registers to the save area).
} else {
}
} else {
}
} else {
}
save_frame();
if (top_ptr_reg_after_save == L0) {
}
if (top_reg_after_save == L1) {
}
if (ptr_reg_after_save == L2) {
}
// Initialize yield counter
if(use_call_vm) {
Untested("Need to verify global reg consistancy");
} else {
// Save the regs and make space for a C call
restore();
}
// reset the counter
// try to get lock
// did we get the lock?
// yes, got lock. do we have the same top?
// yes, same top.
restore();
}
}
int offset) {
if (value != 0)
// load indirectly to solve generation ordering problem
load_ptr_contents(a, tmp);
#ifdef ASSERT
#endif
if (offset != 0)
return RegisterOrConstant(tmp);
}
RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
if (s2.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
return res;
}
}
}
RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
if (s2.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
return res;
}
}
}
RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
// Do nothing, just move value.
if (s1.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
return s1;
}
}
if (s1.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
if (s2.is_register()) {
if (d.is_constant()) d = temp;
return d;
} else {
return res;
}
}
}
// Look up the method for a megamorphic invokeinterface call.
// The target method is determined by <intf_klass, itable_index>.
// The receiver klass is in recv_klass.
// On success, the result will be in method_result, and execution falls through.
// On failure, execution transfers to the given label.
"caller must use same register for non-constant itable index as for method");
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
// %%% We should store the aligned, prescaled offset in the klassoop.
// Then the next several instructions would fold away.
int itb_offset = vtable_base;
if (round_to_unit != 0) {
// hoist first instruction of round_to(scan_temp, BytesPerLong):
}
if (round_to_unit != 0) {
// Round up to align_object_offset boundary
// see code for instanceKlass::start_of_itable!
// Was: round_to(scan_temp, BytesPerLong);
// Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
}
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
// if (scan->interface() == intf) {
// result = (klass + scan->offset() + itable_index);
// }
// }
// %%%% Could load both offset and interface in one ldx, if they were
// in the opposite order. This would save a load.
// Check that this entry is non-null. A null entry means that
// the receiver class doesn't implement the interface, and wasn't the
// same as when the caller was compiled.
if (peel) {
} else {
// (invert the test to fall through to found_method...)
}
if (!peel) break;
}
// Got a hit.
// scan_temp[-scan_step] points to the vtable offset we need
ito_offset -= scan_step;
}
NULL, &L_pop_to_failure);
// on success:
restore();
// on failure:
restore();
}
bool need_slow_path = (must_load_sco ||
if (super_check_offset.is_register()) {
} else if (must_load_sco) {
}
int label_nulls = 0;
"at most one NULL in the batch, usually");
// Support for the instanceof hack, which uses delay slots to
// set a destination register to zero or one.
#define BOOL_SET(bool_value) \
if (do_bool_sets && bool_value >= 0) \
#define DELAYED_BOOL_SET(bool_value) \
if (do_bool_sets && bool_value >= 0) \
// Hacked ba(), which may only be used just before L_fallthrough.
if (&(label) == &L_fallthrough) { \
BOOL_SET(bool_value); \
} else { \
}
// If the pointers are equal, we are done (e.g., String[] elements).
// This self-check enables sharing of secondary supertype arrays among
// non-primary types such as array-of-interface. Otherwise, each such
// type would need its own customized SSA.
// We move this check to the front of the fast path because many
// type checks are in fact trivially successful in this manner,
// so we get a nicely predicted branch right at the start of the check.
DELAYED_BOOL_SET(1);
// Check the supertype display:
if (must_load_sco) {
// The super check offset is always positive...
// super_check_offset is register.
}
// This check has worked decisively for primary supers.
// Secondary supers are sought in the super_cache ('super_cache_addr').
// (Secondary supers are interfaces and very deeply nested subtypes.)
// This works in the same check above because of a tricky aliasing
// between the super_cache and the primary super display elements.
// (The 'super_check_addr' can address either, as the case requires.)
// Note that the cache is updated below if it does not help us find
// what we need immediately.
// So if it was a primary super, we can just fail immediately.
// Otherwise, it's the slow path for us (no success at this point).
if (super_check_offset.is_register()) {
// if !do_bool_sets, sneak the next cmp into the delay slot:
if (L_failure == &L_fallthrough) {
BOOL_SET(0); // fallthrough on failure
} else {
DELAYED_BOOL_SET(0);
}
// Need a slow path; fast failure is impossible.
if (L_slow_path == &L_fallthrough) {
DELAYED_BOOL_SET(1);
} else {
}
} else {
// No slow path; it's a fast decision.
if (L_failure == &L_fallthrough) {
DELAYED_BOOL_SET(1);
BOOL_SET(0);
} else {
DELAYED_BOOL_SET(0);
}
}
}
int label_nulls = 0;
// a couple of useful fields in sub_klass:
// Do a linear scan of the secondary super-klass chain.
// This code is rarely used, so simplicity is a virtue here.
#ifndef PRODUCT
#endif
// We will consult the secondary-super array.
// Compress superclass if necessary.
bool decode_super_klass = false;
if (UseCompressedOops) {
} else {
decode_super_klass = true; // scarce temps!
}
// The superclass is never null; it would be a basic system error if a null
// pointer were to sneak in here. Note that we have already loaded the
// Klass::super_check_offset from the super_klass in the fast path,
// so if there is a null in that register, we are already in the afterlife.
}
// Load the array length. (Positive movl does right thing on LP64.)
// Check for empty secondary super list
// Top of search loop
// Skip the array header in all array accesses.
// Load next super to check
if (UseCompressedOops) {
// Don't use load_heap_oop; we don't want to decode the element.
} else {
}
// Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
// A miss means we are NOT a subtype and need to keep looping
// Falling out the bottom means we found a hit; we ARE a subtype
// Success. Cache the super we found and proceed in triumph.
if (L_success != &L_fallthrough) {
}
}
// compare method type against that of the receiver
RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
}
// A method handle has a "vmslots" field which gives the size of its
// argument list in JVM stack slots. This field is either located directly
// in every method handle, or else is indirectly accessed through the
// method handle's MethodType. This macro hides the distinction.
// load mh.type.form.vmslots
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
// hoist vmslots into every mh to avoid dependent load chain
ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
} else {
load_heap_oop(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
load_heap_oop(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
}
}
void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
// pick out the interpreted side of the handler
// NOTE: vmentry is not an oop!
// off we go...
// for the various stubs which take control at this point,
// see MethodHandles::generate_method_handle_stub
// Some callers can fill the delay slot.
if (emit_delayed_nop) {
}
}
int extra_slot_offset) {
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
if (arg_slot.is_constant()) {
return offset;
} else {
if (offset != 0)
return temp;
}
}
int extra_slot_offset) {
}
if (PrintBiasedLockingStatistics) {
}
// Biased locking
// See whether the lock is currently biased toward our thread and
// whether the epoch is still valid
// Note that the runtime guarantees sufficient alignment of JavaThread
// pointers to allow age to be placed into low bits
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
// Reload mark_reg as we may need it later
}
// At this point we know that the header has the bias pattern and
// that we are not the bias owner in the current epoch. We need to
// figure out more details about the state of the header in order to
// know what operations can be legally performed on the object's
// header.
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
// Biasing is still enabled for this data type. See whether the
// epoch of the current bias is still valid, meaning that the epoch
// bits of the mark word are equal to the epoch bits of the
// prototype header. (Note that the prototype header's epoch bits
// only change at a safepoint.) If not, attempt to rebias the object
// toward the current thread. Note that we must be absolutely sure
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
// The epoch of the current bias is still valid but we know nothing
// about the owner; it might be set or it might be clear. Try to
// acquire the bias of the object using an atomic operation. If this
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
mark_reg);
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
// interpreter runtime in the slow case.
cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
}
}
// At this point we know the epoch has expired, meaning that the
// current "bias owner", if any, is actually invalid. Under these
// circumstances _only_, we are allowed to use the current header's
// value as the comparison value when doing the cas to acquire the
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
// interpreter runtime in the slow case.
cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
}
}
// The prototype mark in the klass doesn't have the bias bit set any
// more, indicating that objects of this data type are not supposed
// to be biased any more. We are going to try to reset the mark of
// this object to the prototype value and fall through to the
// CAS-based locking scheme. Note that if our CAS fails, it means
// that another thread raced us for the privilege of revoking the
// bias of this particular object, so it's okay to continue in the
// normal locking code.
//
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in
// removing the bias bit from the object's header.
}
}
bool allow_delay_slot_filling) {
// Check for biased locking unlock case, which is a no-op
// Note: we do not have to check the thread ID for two reasons.
// First, the interpreter checks for IllegalMonitorStateException at
// a higher level. Second, if the bias was revoked while we held the
// lock, the object could not be rebiased toward another thread, so
// the bias bit would be clear.
delayed();
if (!allow_delay_slot_filling) {
nop();
}
}
// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ;
}
// compiler_lock_object() and compiler_unlock_object() are direct transliterations
// of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
// The code could be tightened up considerably.
//
// box->dhw disposition - post-conditions at DONE_LABEL.
// - Successful inflated lock: box->dhw != 0.
// Any non-zero value suffices.
// Consider G2_thread, rsp, boxReg, or unused_mark()
// - Successful Stack-lock: box->dhw == mark.
// box->dhw must contain the displaced mark word value
// - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
// The slow-path fast_enter() and slow_enter() operators
// are responsible for setting box->dhw = NonZero (typically ::unused_mark).
// - Biased: box->dhw is undefined
//
// SPARC refworkload performance - specifically jetstream and scimark - are
// extremely sensitive to the size of the code emitted by compiler_lock_object
// and compiler_unlock_object. Critically, the key factor is code size, not path
// length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
// effect).
bool try_bias) {
}
if (EmitSync & 1) {
return ;
}
if (EmitSync & 2) {
// Fetch object's markword
if (try_bias) {
}
// Save Rbox in Rscratch to be used for the cas operation
// set Rmark to markOop | markOopDesc::unlocked_value
// Initialize the box. (Must happen before we update the object mark!)
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
// hence we are done
#ifdef _LP64
#endif
// we did not find an unlocked object so see if this is a recursive case
// sub(Rscratch, SP, Rscratch);
return ;
}
if (EmitSync & 256) {
// Triage: biased, stack-locked, neutral, inflated
if (try_bias) {
// Invariant: if control reaches this point in the emitted stream
// then Rmark has not been modified.
}
// Store mark into displaced mark field in the on-stack basic-lock "box"
// Critically, this must happen before the CAS
// Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
delayed() ->
// Try stack-lock acquisition.
// Beware: the 1st instruction is in a delay slot
// Stack-lock attempt failed - check for recursive stack-lock.
// See the comments below about how we might remove this case.
#ifdef _LP64
#endif
bind (IsInflated) ;
if (EmitSync & 64) {
// If m->owner != null goto IsLocked
// Pessimistic form: Test-and-CAS vs CAS
// The optimistic form avoids RTS->RTO cache line upgrades.
// m->owner == null : it's unlocked.
}
// Try to CAS m->owner from null to Self
// Invariant: if we acquire the lock then _recursions should be 0.
// Intentional fall-through into done
} else {
// Aggressively avoid the Store-before-CAS penalty
// Defer the store into box->dhw until after the CAS
// Anticipate CAS -- Avoid RTS->RTO upgrade
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
// Triage: biased, stack-locked, neutral, inflated
if (try_bias) {
// Invariant: if control reaches this point in the emitted stream
// then Rmark has not been modified.
}
delayed()-> // Beware - dangling delay-slot
// Try stack-lock acquisition.
// Transiently install BUSY (0) encoding in the mark word.
// if the CAS of 0 into the mark was successful then we execute:
// ST box->dhw = mark -- save fetched mark in on-stack basiclock box
// ST obj->mark = box -- overwrite transient 0 value
// This presumes TSO, of course.
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ;
delayed() ->
}
delayed() ->
// Stack-lock attempt failed - check for recursive stack-lock.
// Tests show that we can remove the recursive case with no impact
// on refworkload 0.83. If we need to reduce the size of the code
// emitted by compiler_lock_object() the recursive case is perfect
// candidate.
//
// A more extreme idea is to always inflate on stack-lock recursion.
// This lets us eliminate the recursive checks in compiler_lock_object
// and compiler_unlock_object and the (box->dhw == 0) encoding.
// A brief experiment - requiring changes to synchronizer.cpp, interpreter,
// and showed a performance *increase*. In the same experiment I eliminated
// the fast-path stack-lock code from the interpreter and always passed
// control to the "slow" operators in synchronizer.cpp.
// RScratch contains the fetched obj->mark value from the failed CASN.
#ifdef _LP64
#endif
// Accounting needs the Rscratch register
} else {
}
bind (IsInflated) ;
if (EmitSync & 64) {
// If m->owner != null goto IsLocked
// Test-and-CAS vs CAS
// Pessimistic form avoids futile (doomed) CAS attempts
// The optimistic form avoids RTS->RTO cache line upgrades.
// m->owner == null : it's unlocked.
}
// Try to CAS m->owner from null to Self
// Invariant: if we acquire the lock then _recursions should be 0.
// ST box->displaced_header = NonZero.
// Any non-zero value suffices:
// unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
// Intentional fall-through into done
}
}
bool try_bias) {
if (EmitSync & 4) {
return ;
}
if (EmitSync & 8) {
if (try_bias) {
}
// Test first if it is a fast recursive unlock
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markOop of the object
return ;
}
// Beware ... If the aggregate size of the code emitted by CLO and CUO is
// is too large performance rolls abruptly off a cliff.
// This could be related to inlining policies, code cache management, or
// I$ effects.
if (try_bias) {
// TODO: eliminate redundant LDs of obj->mark
}
// It's inflated
// Conceptually we need a #loadstore|#storestore "release" MEMBAR before
// the ST of 0 into _owner which releases the lock. This prevents loads
// and stores within the critical section from reordering (floating)
// past the store that releases the lock. But TSO is a strong memory model
// and that particular flavor of barrier is a noop, so we can safely elide it.
// Note that we use 1-0 locking by default for the inflated case. We
// close the resultant (and rare) race by having contented threads in
// monitorenter periodically poll _owner.
delayed()->
if (EmitSync & 65536) {
delayed()->
// invert icc.zf and goto done
} else {
delayed()->
}
// Consider: we could replace the expensive CAS in the exit
// path with a simple ST of the displaced mark value fetched from
// the on-stack basiclock box. That admits a race where a thread T2
// in the slow lock path -- inflating with monitor M -- could race a
// thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
// More precisely T1 in the stack-lock unlock path could "stomp" the
// inflated mark value M installed by T2, resulting in an orphan
// object monitor M and T2 becoming stranded. We can remedy that situation
// by having T2 periodically poll the object's mark word using timed wait
// operations. If T2 discovers that a stomp has occurred it vacates
// the monitor M and wakes any other threads stranded on the now-orphan M.
// In addition the monitor scavenger, which performs deflation,
// would also need to check for orpan monitors and stranded threads.
//
// Finally, inflation is also used when T2 needs to assign a hashCode
// to O and O is stack-locked by T1. The "stomp" race could cause
// an assigned hashCode value to be lost. We can avoid that condition
// and provide the necessary hashCode stability invariants by ensuring
// that hashCode generation is idempotent between copying GCs.
// For example we could compute the hashCode of an object O as
// O's heap address XOR some high quality RNG value that is refreshed
// at GC-time. The monitor scavenger would install the hashCode
// found in any orphan monitors. Again, the mechanism admits a
// lost-update "stomp" WAW race but detects and recovers as needed.
//
// A prototype implementation showed excellent results, although
// the scavenger and timeout code was rather involved.
// Intentional fall through into done ...
}
void MacroAssembler::print_CPU_state() {
// %%%%% need to implement this
}
// %%%%% need to implement this
}
void MacroAssembler::push_IU_state() {
// %%%%% need to implement this
}
void MacroAssembler::pop_IU_state() {
// %%%%% need to implement this
}
void MacroAssembler::push_FPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::pop_FPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::push_CPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::pop_CPU_state() {
// %%%%% need to implement this
}
void MacroAssembler::verify_tlab() {
#ifdef ASSERT
if (UseTLAB && VerifyOops) {
save_frame(0);
stop("assert(top >= start)");
stop("assert(top <= end)");
stop("assert(aligned)");
restore();
}
#endif
}
void MacroAssembler::eden_allocate(
int con_size_in_bytes, // object size in bytes if known at compile time
){
// make sure arguments make sense
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
// No allocation in the shared eden.
} else {
// get eden boundaries
// note: we need both top & top_addr!
// try to allocate
#ifdef ASSERT
// make sure eden top is properly aligned
{
Label L;
stop("eden top is not properly aligned");
bind(L);
}
#endif // ASSERT
if (var_size_in_bytes->is_valid()) {
// size is unknown at compile time
br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
} else {
// size is known at compile time
br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
}
// Compare obj with the value at top_addr; if still equal, swap the value of
// end with the value at top_addr. If not equal, read the value at top_addr
// into end.
casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
// if someone beat us on the allocation, try again, otherwise continue
#ifdef ASSERT
// make sure eden top is properly aligned
{
Label L;
stop("eden top is not properly aligned");
bind(L);
}
#endif // ASSERT
}
}
void MacroAssembler::tlab_allocate(
int con_size_in_bytes, // object size in bytes if known at compile time
){
// make sure arguments make sense
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
verify_tlab();
// calculate amount of free space
if (var_size_in_bytes == noreg) {
} else {
}
// calculate the new top pointer
if (var_size_in_bytes == noreg) {
} else {
}
#ifdef ASSERT
// make sure new free pointer is properly aligned
{
Label L;
stop("updated TLAB free is not properly aligned");
bind(L);
}
#endif // ASSERT
// update the tlab top pointer
verify_tlab();
}
// No allocation in the shared eden.
}
// calculate amount of free space
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
// increment waste limit to prevent getting stuck on this slow path
if (TLABStats) {
// increment number of slow_allocations
}
if (TLABStats) {
// increment number of refills
// accumulate wastage
}
// if tlab is currently allocated (top or end != null) then
// fill [top, end + alignment_reserve) with array object
// set klass to intArrayKlass
// store klass last. concurrent gcs assumes klass length is valid if
// klass field is not null.
// refill the tlab with an eden allocation
// add object_size ??
#ifdef ASSERT
// check that tlab_size (t1) is still valid
{
stop("assert(t1 == tlab_size)");
}
#endif // ASSERT
verify_tlab();
}
switch (cond) {
// Note some conditions are synonyms for others
}
}
Label L;
bind(L);
}
}
}
}
SkipIfEqual::~SkipIfEqual() {
}
// Writes to stack successive pages until offset reached to check for
// stack overflow + shadow pages. This clobbers tsp and scratch.
// Use stack pointer in temp stack pointer
// Bang stack for total size given plus stack shadow page size.
// Bang one page at a time because a large size can overflow yellow and
// red zones (the bang will fail but stack overflow handling can't tell that
// it was a stack overflow bang vs a regular segv).
// Bang down shadow pages too.
// The -1 because we already subtracted 1 page.
for (int i = 0; i< StackShadowPages-1; i++) {
}
}
///////////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
static uint num_stores = 0;
static uint num_null_pre_stores = 0;
static void count_null_pre_vals(void* pre_val) {
num_stores++;
if ((num_stores % 1000000) == 0) {
}
}
static address satb_log_enqueue_with_frame = 0;
static u_char* satb_log_enqueue_with_frame_end = 0;
static address satb_log_enqueue_frameless = 0;
static u_char* satb_log_enqueue_frameless_end = 0;
// The calls to this don't work. We'd need to do a fair amount of work to
// make it work.
static void check_index(int ind) {
"Invariants.");
}
static void generate_satb_log_enqueue(bool with_frame) {
if (with_frame) {
masm.save_frame(0);
} else {
}
int satb_q_index_byte_offset =
int satb_q_buf_byte_offset =
"check sizes in assembly below");
// If the branch is taken, no harm in executing this in the delay slot.
if (!with_frame) {
// Use return-from-leaf
} else {
// Not delayed.
}
if (with_frame) {
}
// This should be rare enough that we can afford to save all the
// scratch registers that the calling context might be using.
// We need the value of O0 above (for the write into the buffer), so we
// save and restore it.
// Since the call will overwrite O7, we save and restore that, as well.
if (with_frame) {
} else {
}
}
static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
if (with_frame) {
if (satb_log_enqueue_with_frame == 0) {
if (G1SATBPrintStubs) {
tty);
}
}
} else {
if (satb_log_enqueue_frameless == 0) {
if (G1SATBPrintStubs) {
tty);
}
}
}
}
void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) {
if (G1DisablePreBarrier) return;
// satb_log_barrier(tmp, obj, offset, preserve_o_regs);
// satb_log_barrier_work0(tmp, filtered);
tmp);
} else {
"Assumption");
tmp);
}
// Check on whether to annul.
// satb_log_barrier_work1(tmp, offset);
} else {
}
} else {
}
// satb_log_barrier_work2(obj, tmp, offset);
// satb_log_barrier_work3(tmp, filtered, preserve_o_regs);
save_frame(0);
// Save G-regs that target may use.
// Restore G-regs that target may have used.
}
// Check on whether to annul.
// OK, it's not filtered, so we'll need to call enqueue. In the normal
// case, pre_val will be a scratch G-reg, but there's some cases in which
// it's an O-reg. In the first case, do a normal call. In the latter,
// do a save here and call the frameless version.
"Or we need to think harder.");
generate_satb_log_enqueue_if_necessary(true); // with frame.
} else {
generate_satb_log_enqueue_if_necessary(false); // with frameless.
save_frame(0);
restore();
}
}
static jint num_ct_writes = 0;
static jint num_ct_writes_filtered_in_hr = 0;
static jint num_ct_writes_filtered_null = 0;
if (filter_val == NULL) {
} else {
}
}
if ((num_ct_writes % 1000000) == 0) {
" (%5.2f%% intra-HR, %5.2f%% null).",
100.0*(float)num_ct_writes_filtered_in_hr/
(float)num_ct_writes,
100.0*(float)num_ct_writes_filtered_null/
(float)num_ct_writes);
}
}
static address dirty_card_log_enqueue = 0;
static u_char* dirty_card_log_enqueue_end = 0;
// This gets to assume that o0 contains the object address.
#ifdef _LP64
#else
#endif
// Get O1 + O2 into a reg by itself -- useful in the take-the-branch
// case, harmless if not.
// We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf
// Not dirty.
// First, dirty it.
// If the branch is taken, no harm in executing this in the delay slot.
// Use return-from-leaf
// This should be rare enough that we can afford to save all the
// scratch registers that the calling context might be using.
// We need the value of O3 above (for the write into the buffer), so we
// save and restore it.
// Since the call will overwrite O7, we save and restore that, as well.
// XXX Should have a guarantee here about not going off the end!
// Does it already do so? Do an experiment...
}
static inline void
if (dirty_card_log_enqueue == 0) {
if (G1SATBPrintStubs) {
tty);
}
}
}
MacroAssembler* post_filter_masm = this;
if (G1DisablePostBarrier) return;
if (G1RSBarrierRegionFilter) {
#ifdef _LP64
#else
#endif
if (G1PrintCTFilterStats) {
// This is a sleazy hack: I'm temporarily hijacking G2, which I
// promise to restore.
save_frame(0);
// Save G-regs that target may use.
// Restore G-regs that target may have used.
}
// XXX Should I predict this taken or not? Does it mattern?
}
// If the "store_addr" register is an "in" or "local" register, move it to
// a scratch reg so we can pass it as an argument.
// Pick a scratch register different from "tmp".
// Make sure we use up the delay slot!
if (use_scr) {
} else {
post_filter_masm->nop();
}
save_frame(0);
if (use_scr) {
} else {
}
restore();
}
#endif // SERIALGC
///////////////////////////////////////////////////////////////////////////////////
// If we're writing constant NULL, we can skip the write barrier.
}
// The number of bytes in this code is used by
// MachCallDynamicJavaNode::ret_addr_offset()
// if this changes, change that.
if (UseCompressedOops) {
} else {
}
}
if (UseCompressedOops) {
} else {
}
}
if (UseCompressedOops) {
assert(s != d, "not enough registers");
}
}
if (UseCompressedOops) {
lduw(s, d);
decode_heap_oop(d);
} else {
ld_ptr(s, d);
}
}
if (UseCompressedOops) {
decode_heap_oop(d, d);
} else {
}
}
if (UseCompressedOops) {
decode_heap_oop(d, d);
} else {
}
}
}
if (UseCompressedOops) {
encode_heap_oop(d);
} else {
}
}
if (UseCompressedOops) {
encode_heap_oop(d);
} else {
}
}
if (UseCompressedOops) {
encode_heap_oop(d);
} else {
}
}
return;
}
// optimize for frequent case src == dst
} else {
// could be moved before branch, and annulate delay,
// but may add some unneeded work decoding null
}
}
verify_oop(r);
sub(r, G6_heapbase, r);
srlx(r, LogMinObjAlignmentInBytes, r);
}
} else {
}
}
// Same algorithm as oops.inline.hpp decode_heap_oop.
}
}
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
// pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
sllx(r, LogMinObjAlignmentInBytes, r);
add(r, G6_heapbase, r);
}
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
// pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
}
void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops) {
// call indirectly to solve generation ordering problem
}
}
// Compare char[] arrays aligned to 4 bytes.
// Note: limit contains number of bytes (2*char_elements) != 0.
// compare the trailing char
// only one char ?
// word by word compare, dont't need alignment check
// Shift ary1 and ary2 to the end of the arrays, negate limit
// annul LDUW if branch is not taken to prevent access past end of array
// Caller should set it:
// add(G0, 1, result); // equals
}