/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/compiledICHolderOop.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_sparc.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#ifdef SHARK
#include "compiler/compileBroker.hpp"
#include "shark/sharkCompiler.hpp"
#endif
class RegisterSaver {
// Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
// The Oregs are problematic. In the 32bit build the compiler can
// have O registers live with 64 bit quantities. A window save will
// cut the heads off of the registers. We have to do a very extensive
// stack dance to save and restore these properly.
// Note that the Oregs problem only exists if we block at either a polling
// page exception a compiled code safepoint that was not originally a call
// or deoptimize following one of these kinds of safepoints.
// Lots of registers to save. For all builds, a window save will preserve
// the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
// builds a window-save will preserve the %o registers. In the LION build
// we need to save the 64-bit %o registers which requires we save them
// before the window-save (as then they become %i registers and get their
// heads chopped off on interrupt). We have to save some %g registers here
// as well.
enum {
// This frame's save area. Includes extra space for the native call:
// vararg's layout space and the like. Briefly holds the caller's
// register save area.
// Make sure save locations are always 8 byte aligned.
// can't use round_to because it doesn't produce compile time constant
};
public:
static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
// During deoptimization only the result register need to be restored
// all the other values have already been extracted.
};
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
// Record volatile registers as callee-save values in an OopMap so their save locations will be
// propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
// deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
// are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
// (as the stub's I's) when the runtime routine called by the stub creates its frame.
int i;
// Always make the frame size 16 byte aligned.
// OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
// CodeBlob frame size is in words.
// OopMap* map = new OopMap(*total_frame_words, 0);
#if !defined(_LP64)
// Save 64-bit O registers; they will get their heads chopped off on a 'save'.
#endif /* _LP64 */
#ifndef _LP64
// Reload the 64 bit Oregs. Although they are now Iregs we load them
// to Oregs here to avoid interrupts cutting off their heads
#endif /* _LP64 */
#ifdef _LP64
int debug_offset = 0;
#else
#endif
// Save the G's
// This is really a waste but we'll keep things as they were for now
if (true) {
#ifndef _LP64
#endif /* _LP64 */
}
// Save the flags
// Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
FloatRegister f = as_FloatRegister(i);
// Record as callee saved both halves of double registers (2 float registers).
offset += sizeof(double);
}
// And we're done.
return map;
}
// Pop the current frame and restore all the registers that we
// saved.
// Restore all the FP registers
}
// Restore the G's
// Note that G2 (AKA GThread) must be saved and restored separately.
// TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
#if !defined(_LP64)
// Restore the 64-bit O's.
// And temporarily place them in TLS
#endif /* _LP64 */
// Restore flags
#if !defined(_LP64)
// Now reload the 64bit Oregs after we've restore the window.
#endif /* _LP64 */
}
// Pop the current frame and restore the registers that might be holding
// a result.
#if !defined(_LP64)
// 32bit build returns longs in G1
// Retrieve the 64-bit O's.
// and save to TLS
#endif /* _LP64 */
#if !defined(_LP64)
// Now reload the 64bit Oregs after we've restore the window.
#endif /* _LP64 */
}
// Is vector's size (in bytes) bigger than a size saved by default?
// 8 bytes FP registers are saved by default on SPARC.
// Note, MaxVectorSize == 8 on SPARC.
return size > 8;
}
// The java_calling_convention describes stack locations as ideal slots on
// a frame with no abi restrictions. Since we must observe abi restrictions
// (like the placement of the register window) the slots must be biased by
// the following value.
}
if (wordSize == 8) {
} else {
}
return ret;
}
// ---------------------------------------------------------------------------
// Read the array of BasicTypes from a signature, and compute where the
// arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
// quantities. Values less than VMRegImpl::stack0 are registers, those above
// refer to 4-byte stack slots. All stack slots are based off of the window
// top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
// values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
// integer registers. Values 64-95 are the (32-bit only) float registers.
// Each 32-bit quantity is given its own number, so the integer registers
// (in either 32- or 64-bit builds) use 2 numbers. For example, there is
// an O0-low and an O0-high. Essentially, all int register numbers are doubled.
// Register results are passed in O0-O5, for outgoing call arguments. To
// convert to incoming arguments, convert all O's to I's. The regs array
// refer to the low and hi 32-bit words of 64-bit registers or stack slots.
// If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
// 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
// passed (used as a placeholder for the other half of longs and doubles in
// the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
// regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
// Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
// == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
// same VMRegPair.
// Note: the INPUTS in sig_bt are in units of Java argument words, which are
// either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
// units regardless of build.
// ---------------------------------------------------------------------------
// The compiled Java calling convention. The Java convention always passes
// 64-bit values in adjacent aligned locations (either registers or stack),
// floats in float registers and doubles in aligned float pairs. There is
// no backing varargs store for values in registers.
// In the 32-bit build, longs are passed on the stack (cannot be
// passed in I's, because longs in I's get their heads chopped off at
// interrupt).
int total_args_passed,
int is_outgoing) {
int int_reg = 0;
int flt_reg = 0;
int slot = 0;
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
case T_INT:
case T_SHORT:
case T_CHAR:
case T_BYTE:
case T_BOOLEAN:
#ifndef _LP64
case T_OBJECT:
case T_ARRAY:
case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
#endif // _LP64
if (int_reg < int_reg_max) {
} else {
}
break;
#ifdef _LP64
case T_LONG:
// fall-through
case T_OBJECT:
case T_ARRAY:
case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
if (int_reg < int_reg_max) {
} else {
slot += 2;
}
break;
#else
case T_LONG:
// On 32-bit SPARC put longs always on the stack to keep the pressure off
// integer argument registers. They should be used for oops.
slot += 2;
#endif
break;
case T_FLOAT:
if (flt_reg < flt_reg_max) {
} else {
}
break;
case T_DOUBLE:
flt_reg += 2;
} else {
slot += 2;
}
break;
case T_VOID:
break;
default:
break;
}
}
// retun the amount of stack space these arguments will need.
return slot;
}
// Helper class mostly to avoid passing masm everywhere, and handle
// store displacement overflow logic.
class AdapterGenerator {
void patch_callers_callsite();
// base+st_off points to top of argument
}
// Argument slot values may be loaded first into a register because
// they might not fit into displacement.
// Stores long into offset pointed to by base
const int st_off);
const int st_off);
const int st_off);
public:
void gen_c2i_adapter(int total_args_passed,
// VMReg max_arg,
int comp_args_on_stack, // VMRegStackSlots
Label& skip_fixup);
void gen_i2c_adapter(int total_args_passed,
// VMReg max_arg,
int comp_args_on_stack, // VMRegStackSlots
};
// Patch the callers callsite with entry to compiled code if it exists.
Label L;
// Call into the VM to patch the caller, then jump to compiled callee
// Must save all the live Gregs the list is:
// G1: 1st Long arg (32bit build)
// G2: global allocated to TLS
// G3: used in inline cache check (scratch)
// G4: 2nd Long arg (32bit build);
// G5: used in inline cache check (methodOop)
// The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
#ifdef _LP64
// mov(s,d)
// Must be a leaf call...
// can be very far once the blob has been relocated
#else
// Must be a leaf call...
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
#endif /* _LP64 */
}
}
}
// Stores long into offset pointed to by base
#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
#else
#ifdef COMPILER2
// Misaligned store of 64-bit data
#else
if (is_stack) {
// Misaligned store of 64-bit data
} else {
}
#endif // COMPILER2
#endif // _LP64
}
const int st_off) {
}
const int st_off) {
}
// Stores into offset pointed to by base
#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
#else
// Need to marshal 64-bit value from misaligned Lesp loads
#endif
}
const int st_off) {
}
int total_args_passed,
// VMReg max_arg,
int comp_args_on_stack, // VMRegStackSlots
Label& L_skip_fixup) {
// Before we get into the guts of the C2I adapter, see if we should be here
// at all. We've come from compiled code and are attempting to jump to the
// interpreter, which means the caller made a static call to get here
// (vcalls always get a compiled target if there is one). Check for a
// compiled target. If there is one, we need to patch the caller's call.
// However we will run interpreted if we come thru here. The next pass
// thru the call site will run compiled. If we ran compiled here then
// we can (theorectically) do endless i2c->c2i->i2c transitions during
// we can have at most one and don't need to play any tricks to keep
// from endlessly growing the stack.
//
// Actually if we detected that we had an i2c->c2i transition here we
// ought to be able to reset the world back to the state of the interpreted
// call and not bother building another interpreter arg area. We don't
// do that at this point.
// Since all args are passed on the stack, total_args_passed*wordSize is the
// space we need. Add in varargs area needed by the interpreter. Round up
// to stack alignment.
const int varargs_area =
// Make some extra space on the stack.
// Write the args into the outgoing interpreter space.
for (int i = 0; i < total_args_passed; i++) {
continue;
}
}
if (r_1->is_Register()) {
} else {
}
} else {
} else {
}
}
}
// Load the interpreter entry point.
// Pass O5_savedSP as an argument to the interpreter.
// The interpreter will restore SP to this value before returning.
// Jump to the interpreter just as if interpreter was doing it.
// Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
// (really L0) is in use by the compiled frame as a generic temp. However,
// the interpreter does not know where its args are without some kind of
// arg pointer being passed in. Pass it in Gargs.
}
static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
}
int total_args_passed,
// VMReg max_arg,
int comp_args_on_stack, // VMRegStackSlots
// Generate an I2C adapter: adjust the I-frame to make space for the C-frame
// layout. Lesp was saved by the calling I-frame and will be restored on
// return. Meanwhile, outgoing arg space is all owned by the callee
// C-frame, so we can mangle it at will. After adjusting the frame size,
// hoist register arguments and repack other args according to the compiled
// code convention. Finally, end in a jump to the compiled code. The entry
// point address is the start of the buffer.
// We will only enter here from an interpreted frame and never from after
// passing thru a c2i. Azul allowed this but we do not. If we lose the
// race and use a c2i we will remain interpreted for the race loser(s).
// This removes all sorts of headaches on the x86 side and also eliminates
// the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
// More detail:
// Adapters can be frameless because they do not require the caller
// to perform additional cleanup work, such as correcting the stack pointer.
// An i2c adapter is frameless because the *caller* frame, which is interpreted,
// routinely repairs its own stack pointer (from interpreter_frame_last_sp),
// even if a callee has modified the stack pointer.
// A c2i adapter is frameless because the *callee* frame, which is interpreted,
// routinely repairs its caller's stack pointer (from sender_sp, which is set
// up via the senderSP register).
// In other words, if *either* the caller or callee is interpreted, we can
// get the stack pointer repaired after a call.
// This is why c2i and i2c adapters cannot be indefinitely composed.
// In particular, if a c2i adapter were to somehow call an i2c adapter,
// both caller and callee would be compiled methods, and neither would
// clean up the stack pointer changes performed by the two adapters.
// If this happens, control eventually transfers back to the compiled
// caller, but with an uncorrected stack, causing delayed havoc.
if (VerifyAdapterCalls &&
// assert(Interpreter::contains($return_addr) ||
// StubRoutines::contains($return_addr),
// "i2c adapter must return to an interpreter frame");
L_ok);
L_ok);
L_ok);
}
// As you can see from the list of inputs & outputs there are not a lot
// of temp registers to work with: mostly G1, G3 & G4.
// Inputs:
// G2_thread - TLS
// G5_method - Method oop
// G4 (Gargs) - Pointer to interpreter's args
// O0..O4 - free for scratch
// O5_savedSP - Caller's saved SP, to be restored if needed
// O6 - Current SP!
// O7 - Valid return address
// L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
// Outputs:
// G2_thread - TLS
// O0-O5 - Outgoing args in compiled layout
// O6 - Adjusted or restored SP
// O7 - Valid return address
// L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
// F0-F7 - more outgoing args
// Gargs is the incoming argument base, and also an outgoing argument.
// ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
// WITH O7 HOLDING A VALID RETURN PC
//
// | |
// : java stack :
// | |
// +--------------+ <--- start of outgoing args
// | receiver | |
// : rest of args : |---size is java-arg-words
// | | |
// +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
// | | |
// : unused : |---Space for max Java stack, plus stack alignment
// | | |
// +--------------+ <--- SP + 16*wordsize
// | |
// : window :
// | |
// +--------------+ <--- SP
// WE REPACK THE STACK. We use the common calling convention layout as
// discovered by calling SharedRuntime::calling_convention. We assume it
// causes an arbitrary shuffle of memory, which may require some register
// temps to do the shuffle. We hope for (and optimize for) the case where
// temps are not needed. We may have to resize the stack slightly, in case
// we need alignment padding (32-bit interpreter can pass longs & doubles
// misaligned, but the compilers expect them aligned).
//
// | |
// : java stack :
// | |
// +--------------+ <--- start of outgoing args
// | pad, align | |
// +--------------+ |
// | ints, longs, | |
// | floats, | |---Outgoing stack args.
// : doubles : | First few args in registers.
// | | |
// +--------------+ <--- SP' + 16*wordsize
// | |
// : window :
// | |
// +--------------+ <--- SP'
// ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
// WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
// FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
// Cut-out for having no stack args. Since up to 6 args are passed
// in registers, we will commonly have no stack args.
if (comp_args_on_stack > 0) {
// Convert VMReg stack slots to words.
int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize
// Now compute the distance from Lesp to SP. This calculation does not
// include the space for total_args_passed because Lesp has not yet popped
// the arguments.
}
// Now generate the shuffle code. Pick up all register args and move the
// rest through G1_scratch.
for (int i = 0; i < total_args_passed; i++) {
// Longs and doubles are passed in native word order, but misaligned
// in the 32-bit build.
continue;
}
// Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
// 32-bit build and aligned in the 64-bit build. Look for the obvious
// Load in argument order going down.
continue;
}
}
} else {
#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
#else
fatal("longs should be on stack");
#endif
}
} else {
} else {
#ifdef _LP64
// In V9, doubles are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. This code also handles longs that
// are passed on the stack, but need a stack-to-stack move through a
// spare float register.
#else
// Need to marshal 64-bit value from misaligned Lesp loads
#endif
}
}
// Was the argument really intended to be on the stack, but was loaded
// Convert stack slot to an SP offset
// Store down the shuffled stack word. Target address _is_ aligned.
}
}
// Jump to the compiled code just as if compiled code was doing it.
// 6243940 We might end up in handle_wrong_method if
// the callee is deoptimized as we race thru here. If that
// happens we don't want to take a safepoint because the
// caller frame will look interpreted and arguments are now
// "compiled" so it is much better to make this transition
// invisible to the stack walking code. Unfortunately if
// we try and find the callee by normal means a safepoint
// is possible. So we stash the desired callee in the thread
// and the vm will find there should this case occur.
if (StressNonEntrant) {
// Open a big window for deopt failure
__ save_frame(0);
}
}
// ---------------------------------------------------------------
int total_args_passed,
// VMReg max_arg,
int comp_args_on_stack, // VMRegStackSlots
// -------------------------------------------------------------------------
// Generate a C2I adapter. On entry we know G5 holds the methodOop. The
// args start out packed in the compiled layout. They need to be unpacked
// into the interpreter layout. This will almost always require some stack
// space. We grow the current (compiled) stack, then repack the args. We
// finally end in a jump to the generic interpreter entry point. On exit
// from the interpreter, the interpreter will restore our SP (lest the
// compiled code, which relys solely on SP and not FP, get sick).
{
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
}
}
// Helper function for native calling conventions
// Bias any stack based VMReg we get by ignoring the window area
// but not the register parameter save area.
//
// This is strange for the following reasons. We'd normally expect
// the calling convention to return an VMReg for a stack slot
// completely ignoring any abi reserved area. C2 thinks of that
// abi area as only out_preserve_stack_slots. This does not include
// the area allocated by the C abi to store down integer arguments
// because the java calling convention does not use it. So
// since c2 assumes that there are only out_preserve_stack_slots
// to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
// location the c calling convention must add in this bias amount
// to make up for the fact that the out_preserve_stack_slots is
// insufficient for C calls. What a mess. I sure hope those 6
// stack words were worth it on every java call!
// Another way of cleaning this up would be for out_preserve_stack_slots
// to take a parameter to say whether it was C or java calling conventions.
// Then things might look a little better (but not much).
if( mem_parm_offset < 0 ) {
return as_oRegister(i)->as_VMReg();
} else {
int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
// Now return a biased offset that will be correct when out_preserve_slots is added back in
}
}
int total_args_passed) {
// Return the number of VMReg stack_slots needed for the args.
// This value does not include an abi space (like register window
// save area).
// The native convention is V8 if !LP64
// The LP64 convention is the V9 convention which is slightly more sane.
// We return the amount of VMReg stack slots we need to reserve for all
// the arguments NOT counting out_preserve_stack_slots. Since we always
// have space for storing at least 6 registers to memory we start with that.
// See int_stk_helper for a further discussion.
int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
#ifdef _LP64
// V9 convention: All things "as-if" on double-wide stack slots.
int j = 0; // Count of actual args, not HALVES
for( int i=0; i<total_args_passed; i++, j++ ) {
switch( sig_bt[i] ) {
case T_BOOLEAN:
case T_BYTE:
case T_CHAR:
case T_INT:
case T_SHORT:
case T_LONG:
case T_ADDRESS: // raw pointers, like current thread, for VM calls
case T_ARRAY:
case T_OBJECT:
break;
case T_FLOAT:
if ( j < 16 ) {
// V9ism: floats go in ODD registers
} else {
// V9ism: floats go in ODD stack slot
}
break;
case T_DOUBLE:
if ( j < 16 ) {
} else {
}
break;
default:
}
}
}
}
#else // _LP64
// V8 convention: first 6 things in O-regs, rest on stack.
// Alignment is willy-nilly.
for( int i=0; i<total_args_passed; i++ ) {
switch( sig_bt[i] ) {
case T_ADDRESS: // raw pointers, like current thread, for VM calls
case T_ARRAY:
case T_BOOLEAN:
case T_BYTE:
case T_CHAR:
case T_FLOAT:
case T_INT:
case T_OBJECT:
case T_SHORT:
break;
case T_DOUBLE:
case T_LONG:
break;
default:
}
}
}
}
#endif // _LP64
}
// ---------------------------------------------------------------------------
switch (ret_type) {
case T_FLOAT:
break;
case T_DOUBLE:
break;
}
}
void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
switch (ret_type) {
case T_FLOAT:
break;
case T_DOUBLE:
break;
}
}
// Check and forward and pending exception. Thread is stored in
// L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
// is no exception handler. We merely pop this frame off and throw the
// exception in the caller's frame.
Label L;
// Since this is a native call, we *know* the proper exception handler
// without calling into the VM: it's the empty function. Just pop this
// frame and then jump to forward_exception_entry; O7 will contain the
// native caller's return PC.
}
// A simple move of integer like type
// stack to stack
} else {
// stack to reg
}
// reg to stack
} else {
}
}
// On 64 bit we will store integer like items to the stack as
// 64 bits items (sparc abi) even though java would only store
// 32bits for a parameter. On 32bit it will simply be 32 bits
// So this routine will do 32->32 on 32bit and 32->64 on 64bit
// stack to stack
} else {
// stack to reg
}
// reg to stack
} else {
}
}
// stack to stack
} else {
// stack to reg
}
// reg to stack
} else {
}
}
// An oop arg. Must pass a handle not the oop itself
int oop_handle_offset,
int framesize_in_slots,
bool is_receiver,
int* receiver_offset) {
// must pass a handle. First figure out the location we use as a handle
// Oop is already on the stack
#ifdef _LP64
#else
#endif
}
if (is_receiver) {
}
} else {
// Oop is in an input register pass we must flush it to the stack
if (is_receiver) {
}
#ifdef _LP64
#else
#endif
} else {
}
}
}
// A float arg may have to do float reg int reg conversion
// stack to stack the easiest of the bunch
} else {
// stack to reg
} else {
__ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
}
}
// reg to stack
} else {
__ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
}
} else {
// reg to reg
// gpr -> gpr
} else {
// gpr -> fpr
}
// fpr -> gpr
} else {
// fpr -> fpr
// In theory these overlap but the ordering is such that this is likely a nop
}
}
}
}
}
// A long move
// Do the simple ones here else do two int moves
if (src.is_single_phys_reg() ) {
if (dst.is_single_phys_reg()) {
} else {
// split src into two separate registers
// Remember hi means hi address or lsw on sparc
// Move msw to lsw
// MSW -> MSW
// Now LSW -> LSW
// this will only move lo -> lo and ignore hi
} else {
// MSW -> MSW (lo ie. first word)
}
}
} else if (dst.is_single_phys_reg()) {
} else {
// dst is a single reg.
// Remember lo is low address not msb for stack slots
// and lo is the "real" register for registers
// src is
// we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
} else {
// msw is stack move to L5
// lsw is stack move to dst.lo (real reg)
// we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
}
// So dst now has the low order correct position the
// msw half
}
} else {
// For LP64 we can probably do better.
}
}
// A double move
// The painful thing here is that like long_move a VMRegPair might be
// 1: a single physical register
// 2: two physical registers (v8)
// 3: a physical reg [lo] and a stack slot [hi] (v8)
// 4: two stack slots
// Since src is always a java calling convention we know that the src pair
// is always either all registers or all stack (and aligned?)
// in a register [lo] and a stack slot [hi]
// stack to stack the easiest of the bunch
} else {
// stack to reg
// stack -> reg, stack -> stack
} else {
__ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
}
// This was missing. (very rare case)
} else {
// stack -> reg
// Eventually optimize for alignment QQQ
} else {
__ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
__ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
}
}
}
// reg to stack
// Eventually optimize for alignment QQQ
} else {
}
} else {
// fpr to stack
} else {
// Is the stack aligned?
// No do as pairs
__ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
__ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
} else {
__ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
}
}
}
} else {
// reg to reg
// gpr -> gpr
} else {
// gpr -> fpr
// ought to be able to do a single store
// ought to be able to do a single load
}
// fpr -> gpr
// ought to be able to do a single store
// ought to be able to do a single load
// REMEMBER first() is low address not LSB
} else {
}
} else {
// fpr -> fpr
// In theory these overlap but the ordering is such that this is likely a nop
}
}
}
}
// Creates an inner frame if one hasn't already been created, and
// saves a copy of the thread in L7_thread_cache
if (!*already_created) {
__ save_frame(0);
// Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
// Don't use save_thread because it smashes G2 and we merely want to save a
// copy
*already_created = true;
}
}
const int stack_slots,
const int total_in_args,
const int arg_save_area,
// if map is non-NULL then the code should store the values,
// otherwise it should load them.
// Fill in the map
for (int i = 0; i < total_in_args; i++) {
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
} else {
}
}
}
}
// Save or restore double word values
int handle_index = 0;
for (int i = 0; i < total_in_args; i++) {
handle_index += 2;
} else {
}
}
handle_index += 2;
} else {
}
}
}
// Save floats
for (int i = 0; i < total_in_args; i++) {
handle_index++;
} else {
}
}
}
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
const int stack_slots,
const int total_in_args,
const int arg_save_area,
// Save down any values that are live in registers and call into the
// runtime to halt for a GC
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
// Reload all the register arguments
#ifdef ASSERT
if (StressCriticalJNINatives) {
// Stress register saving
// Destroy argument registers
for (int i = 0; i < total_in_args; i++) {
}
__ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
}
}
}
#endif
}
// Unpack an array argument into a pointer to the body and the length
// if the array is non-null, otherwise pass 0 for both.
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
// Pass the length, ptr pair
// Load the arg up from the stack
}
__ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
// Pass zeros
}
if (VerifyOops) {
for (int i = 0; i < method->size_of_parameters(); i++) {
if (r->is_stack()) {
} else {
}
}
}
}
}
// Now write the args into the outgoing interpreter space
bool has_receiver = false;
if (ref_kind != 0) {
has_receiver = true;
} else {
}
if (member_reg != noreg) {
// Load the member_arg into register, if necessary.
if (r->is_stack()) {
} else {
// no data motion is needed
member_reg = r->as_Register();
}
}
if (has_receiver) {
// Make sure the receiver is loaded into a register.
if (r->is_stack()) {
// Porting note: This assumes that compiled calling conventions always
// pass the receiver oop in a register. If this is not true on some
// platform, pick a temp and load the receiver from stack.
fatal("receiver always in a register");
} else {
// no data motion is needed
receiver_reg = r->as_Register();
}
}
// Figure out which address we are really jumping to:
}
// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
// convention (handlizes oops, etc), transitions to native, makes the call,
// returns to java state (possibly blocking), unhandlizes any result and
// returns.
//
// Critical native functions are a shorthand for the use of
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point
// check for safepoint in progress
// check if any thread suspend flags are set
// call into JVM and possible unlock the JNI critical
// if a GC was suppressed while in the critical native.
// transition back to thread_in_Java
// return to caller
//
int compile_id,
if (method->is_method_handle_intrinsic()) {
in_regs);
in_ByteSize(-1),
in_ByteSize(-1),
}
bool is_critical_native = true;
if (native_func == NULL) {
is_critical_native = false;
}
// Native nmethod wrappers never take possesion of the oop arguments.
// So the caller will gc the arguments. The only thing we need an
// oopMap for is if the call is static
//
// An OopMap for lock (and class if static), and one for the VM call itself
// First thing make an ic check to see if we should even be here
{
Label L;
}
#ifdef COMPILER1
// Object.hashCode can pull the hashCode from the header word
// instead of doing a full VM transition once it's been computed.
// Since hashCode is usually polymorphic at call sites we can't do
// this optimization at the call site without a lot of work.
// Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
// We depend on hash_mask being at most 32 bits and avoid the use of
// hash_mask_in_place because it could be larger than 32 bits in a 64-bit
// vm: see markOop.hpp.
if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
}
// Check for a valid (non-zero) hash code and get its value.
#ifdef _LP64
#else
#endif
// leaf return.
}
#endif // COMPILER1
// We have received a description of where all the java arg are located
// on entry to the wrapper. We need to convert these args to where
// the jni function will expect them. To figure out where they go
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
if (!is_critical_native) {
total_c_args += 1;
total_c_args++;
}
} else {
for (int i = 0; i < total_in_args; i++) {
// These have to be saved and restored across the safepoint
total_c_args++;
}
}
}
int argc = 0;
if (!is_critical_native) {
}
for (int i = 0; i < total_in_args ; i++ ) {
}
} else {
for (int i = 0; i < total_in_args ; i++ ) {
// Arrays are passed as int, elem* pair
switch (at[1]) {
default: ShouldNotReachHere();
}
}
} else {
in_elem_bt[i] = T_VOID;
}
}
}
}
// Now figure out where the args must be stored and how much stack space
// they require (neglecting out_preserve_stack_slots but space for storing
// the 1st six register arguments). It's weird see int_stk_helper.
//
int out_arg_slots;
if (is_critical_native) {
// Critical natives may have to call out so they need a save area
// for register arguments.
int double_slots = 0;
int single_slots = 0;
for ( int i = 0; i < total_in_args; i++) {
switch (in_sig_bt[i]) {
case T_ARRAY:
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
default: ShouldNotReachHere();
}
switch (in_sig_bt[i]) {
case T_FLOAT: single_slots++; break;
case T_DOUBLE: double_slots++; break;
default: ShouldNotReachHere();
}
}
}
}
// Compute framesize for the wrapper. We need to handlize all oops in
// registers. We must create space for them here that is disjoint from
// the windowed save area because we have no control over when we might
// flush the window again and overwrite values that gc has since modified.
// (The live window race)
//
// We always just allocate 6 word for storing down these object. This allow
// us to simply record the base and use the Ireg number to decide which
// slot to use. (Note that the reg number is the inbound number not the
// outbound number).
// We must shuffle args to match the native convention, and include var-args space.
// Calculate the total number of stack slots we will need.
// First count the abi requirement plus all of the outgoing args
// Now the space for the inbound oop handle area
// Now any space we need for handlizing a klass if static method
int klass_slot_offset = 0;
int lock_slot_offset = 0;
bool is_static = false;
is_static = true;
}
// Plus a lock if needed
if (method->is_synchronized()) {
}
// Now a place to save return value or as a temporary for any gpr -> fpr moves
stack_slots += 2;
// Ok The space we have allocated will look like:
//
//
// FP-> | |
// |---------------------|
// | 2 slots for moves |
// |---------------------|
// | lock box (if sync) |
// |---------------------| <- lock_slot_offset
// | klass (if static) |
// |---------------------| <- klass_slot_offset
// | oopHandle area |
// |---------------------| <- oop_handle_offset
// | outbound memory |
// | based arguments |
// | |
// |---------------------|
// | vararg area |
// |---------------------|
// | |
// SP-> | out_preserved_slots |
//
//
// Now compute actual number of stack words we need rounding to make
// stack properly aligned.
// Generate stack overflow check before creating frame
// Generate a new frame for the wrapper.
__ verify_thread();
if (is_critical_native) {
}
//
// We immediately shuffle the arguments so that any vm call we have to
// make from here on out (sync slow path, jvmti, etc.) we will have
// captured the oops from our caller and have a valid oopMap for
// them.
// -----------------
// The Grand Shuffle
//
// Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
// (derived from JavaThread* which is in L7_thread_cache) and, if static,
// the class mirror instead of a receiver. This pretty much guarantees that
// register layout will not match. We ignore these extra arguments during
// the shuffle. The shuffle is described by the two calling convention
// vectors we have in our possession. We simply walk the java vector to
// get the source locations and the c vector to get the destinations.
// Because we have a new window and the argument registers are completely
// disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
// here.
// This is a trick. We double the stack slots so we can claim
// the oops in the caller's frame. Since we are sure to have
// more args than the caller doubling is enough to make
// sure we can capture all the incoming oop args from the
// caller.
//
// Record sp-based slot for receiver on stack for non-static methods
// We move the arguments backward because the floating point registers
// destination will always be to a register with a greater or equal register
// number or the stack.
#ifdef ASSERT
for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
reg_destroyed[r] = false;
}
for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
freg_destroyed[f] = false;
}
#endif /* ASSERT */
#ifdef ASSERT
assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
}
}
#endif /* ASSERT */
switch (in_sig_bt[i]) {
case T_ARRAY:
if (is_critical_native) {
c_arg--;
break;
}
case T_OBJECT:
((i == 0) && (!is_static)),
break;
case T_VOID:
break;
case T_FLOAT:
break;
case T_DOUBLE:
break;
case T_LONG :
break;
default:
}
}
// Pre-load a static method's oop into O1. Used both by locking code and
// the normal JNI call code.
__ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
// Now handlize the static class mirror in O1. It's known not-null.
}
if (method->is_synchronized()) {
}
// We have all of the arguments setup at this point. We MUST NOT touch any Oregs
// push a new frame and flush the windows.
#ifdef _LP64
{
// Call the next instruction
}
#else
#endif /* _LP64 */
// O7 now has the pc loaded that we will use when we finally call to native.
// Save thread in L7; it crosses a bunch of VM calls below
// Don't use save_thread because it smashes G2 and we merely
// want to save a copy
// If we create an inner frame once is plenty
// when we create it we must also save G2_thread
bool inner_frame_created = false;
// dtrace method entry support
{
// create inner frame
__ save_frame(0);
}
// RedefineClasses() tracing support for obsolete method entry
// create inner frame
__ save_frame(0);
}
// We are in the jni frame unless saved_frame is true in which case
// we are in one frame deeper (the "inner" frame). If we are in the
// "inner" frames the args are in the Iregs and if the jni frame then
// they are in the Oregs.
// If we ever need to go to the VM (for locking, jvmti) then
// we will always be in the "inner" frame.
// Lock a synchronized method
if (method->is_synchronized()) {
#ifdef ASSERT
if (UseBiasedLocking) {
// making the box point to itself will make it clear it went unused
// but also be obviously invalid
}
#endif // ASSERT
//
// Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
//
// None of the above fast optimizations worked so we have to get into the
// slow case of monitor enter. Inline a special case of call_VM that
// disallows any pending_exception.
// Record last_Java_sp, in case the VM code releases the JVM lock.
// do the call
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
#ifdef ASSERT
{ Label L;
}
#endif
}
// Finally just about ready to make the JNI call
__ flush_windows();
if (inner_frame_created) {
} else {
// Store only what we need from this frame
// QQQ I think that non-v9 (like we care) we don't need these saves
// either as the flush traps and the current window goes too.
}
// get JNIEnv* which is first argument to native
if (!is_critical_native) {
}
// Use that pc we placed in O7 a while back as the current frame anchor
// We flushed the windows ages ago now mark them as flushed before transitioning.
// Transition from _thread_in_Java to _thread_in_native.
#ifdef _LP64
#else
#endif
// Unpack native results. For int-types, we do any needed sign-extension
// and move things into I0. The return value there will survive any VM
// calls for blocking or unlocking. An FP or OOP result (handle) is done
// specially in the slow-path code.
switch (ret_type) {
case T_VOID: break; // Nothing to do!
case T_FLOAT: break; // Got it where we want it (unless slow-path)
case T_DOUBLE: break; // Got it where we want it (unless slow-path)
// In 64 bits build result is in O0, in O0, O1 in 32bit build
case T_LONG:
#ifndef _LP64
#endif
// Fall thru
case T_OBJECT: // Really a handle
case T_ARRAY:
case T_INT:
break;
case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
break; // Cannot de-handlize until after reclaiming jvm_lock
default:
}
// must we block?
// Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking.
// Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization
// state is not atomic w.r.t. GC, as this scenario demonstrates:
// Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
// VM thread changes sync state to synchronizing and suspends threads for GC.
// Thread A is resumed to finish this native method, but doesn't block here since it
// didn't see any synchronization is progress, and escapes.
if (UseMembar) {
// Force this write out before the read below
} else {
// Write serialization page so VM thread can do a pseudo remote membar.
// We use the current thread pointer to calculate a thread specific
// offset to write to within the page. This minimizes bus traffic
// due to cache line collision.
}
}
Label L;
// Block. Save any potential method result value before the operation and
// use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
// lets us share the oopMap we used when we went native rather the create
// a distinct one for this pc
//
if (!is_critical_native) {
} else {
}
// Restore any method result value
if (is_critical_native) {
// The call above performed the transition to thread_in_Java so
// skip the transition logic below.
}
}
// thread state is thread_in_native_trans. Any safepoint blocking has already
// happened so we can now change state to _thread_in_Java.
__ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
// Handle possible exception (will unlock if necessary)
// native result if any is live in freg or I0 (and I1 if long and 32bit vm)
// Unlock
if (method->is_synchronized()) {
// Get locked oop from the handle we passed to jni
// Must save pending exception around the slow-path VM call. Since it's a
// leaf call, the pending exception (if any) can be kept in a register.
// Now unlock
// (Roop, Rmark, Rbox, Rscratch)
// save and restore any potential method result value around the unlocking
// operation. Will save in I0 (or stack for FP returns).
// Must clear pending-exception before re-entering the VM. Since this is
// a leaf call, pending-exception-oop can be safely kept in a register.
// slow case of monitor enter. Inline a special case of call_VM that
// disallows any pending_exception.
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
#ifdef ASSERT
{ Label L;
}
#endif
// check_forward_pending_exception jump to forward_exception if any pending
// exception is set. The forward_exception routine expects to see the
// exception in pending_exception and not in a register. Kind of clumsy,
// since all folks who branch to forward_exception must have tested
// pending_exception first and hence have it in a register already.
}
// Tell dtrace about this method exit
{
}
// Clear "last Java frame" SP and PC.
// Unpack oop result
Label L;
}
if (!is_critical_native) {
// reset handle block
}
// Return
#ifndef _LP64
}
#endif
oop_maps);
if (is_critical_native) {
nm->set_lazy_critical_native(true);
}
return nm;
}
#ifdef HAVE_DTRACE_H
// ---------------------------------------------------------------------------
// Generate a dtrace nmethod for a given signature. The method takes arguments
// in the Java compiled code convention, marshals them to the native
// abi and then leaves nops at the position you would expect to call a native
// function. When the probe is enabled the nops are replaced with a trap
// instruction that dtrace inserts and the trace will cause a notification
// to dtrace.
//
// arguments. No other java types are allowed. Strings are converted to utf8
// strings so that from dtrace point of view java strings are converted to C
// strings. There is an arbitrary fixed limit on the total space that a method
// can use for converting the strings. (256 chars per string in the signature).
// So any java string larger then this is truncated.
static bool offsets_initialized = false;
// generate_dtrace_nmethod is guarded by a mutex so we are sure to
// be single threaded in this method.
// Fill in the signature array, for the calling-convention call.
// The signature we are going to use for the trap that dtrace will see
// is converted to a two-slot long, which is why we double the allocation).
int i=0;
int total_strings = 0;
int first_arg_to_pass = 0;
int total_c_args = 0;
// Skip the receiver as dtrace doesn't want to see it
first_arg_to_pass = 1;
}
if (s == vmSymbols::java_lang_String()) {
} else if (s == vmSymbols::java_lang_Boolean() ||
s == vmSymbols::java_lang_Byte()) {
} else if (s == vmSymbols::java_lang_Character() ||
s == vmSymbols::java_lang_Short()) {
} else if (s == vmSymbols::java_lang_Integer() ||
s == vmSymbols::java_lang_Float()) {
} else if (s == vmSymbols::java_lang_Long() ||
s == vmSymbols::java_lang_Double()) {
}
// We convert double to long
// We convert float to int
}
}
// Now get the compiled-Java layout as input arguments
int comp_args_on_stack;
// We have received a description of where all the java arg are located
// on entry to the wrapper. We need to convert these args to where
// the a native (non-jni) function would expect them. To figure out
// where they go we convert the java signature to a C signature and remove
// Now figure out where the args must be stored and how much stack space
// they require (neglecting out_preserve_stack_slots but space for storing
// the 1st six register arguments). It's weird see int_stk_helper.
//
int out_arg_slots;
// Calculate the total number of stack slots we will need.
// First count the abi requirement plus all of the outgoing args
stack_slots += 2;
// Now space for the string(s) we must convert
stack_slots += total_strings *
// Ok The space we have allocated will look like:
//
//
// FP-> | |
// |---------------------|
// | string[n] |
// |---------------------| <- string_locs[n]
// | string[n-1] |
// |---------------------| <- string_locs[n-1]
// | ... |
// | ... |
// |---------------------| <- string_locs[1]
// | string[0] |
// |---------------------| <- string_locs[0]
// | temp |
// |---------------------| <- conversion_temp
// | outbound memory |
// | based arguments |
// | |
// |---------------------|
// | |
// SP-> | out_preserved_slots |
//
//
// Now compute actual number of stack words we need rounding to make
// stack properly aligned.
// First thing make an ic check to see if we should even be here
{
Label L;
}
// The instruction at the verified entry point must be 5 bytes or longer
// because it can be patched on the fly by make_non_entrant. The stack bang
// instruction fits that requirement.
// Generate stack overflow check before creating frame
"valid size for make_non_entrant");
// Generate a new frame for the wrapper.
// Frame is now completed as far a size and linkage.
#ifdef ASSERT
for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
reg_destroyed[r] = false;
}
for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
freg_destroyed[f] = false;
}
#endif /* ASSERT */
#ifdef ASSERT
FloatRegisterImpl::S)], "ack!");
}
FloatRegisterImpl::S)] = true;
}
#endif /* ASSERT */
case T_ARRAY:
case T_OBJECT:
{
// need to unbox a one-slot value
} else {
"must be");
}
// If the final destination is an acceptable register
}
}
}
switch (bt) {
case T_BYTE:
case T_SHORT:
case T_INT:
case T_LONG:
default: ShouldNotReachHere();
}
// If tmp wasn't final destination copy to final destination
} else {
}
}
++c_arg; // move over the T_VOID to keep the loop indices in sync
}
Register s =
Register d =
// We store the oop now so that the conversion pass can reach
// while in the inner frame. This will be the only store if
// the oop is NULL.
if (s != L2) {
// src is register
if (d != L2) {
// dst is register
} else {
STACK_BIAS), "must be");
}
} else {
// src not a register
STACK_BIAS), "must be");
if (d == L2) {
STACK_BIAS), "must be");
}
}
// Convert the arg to NULL
} else {
STACK_BIAS), "must be");
}
}
}
break;
case T_VOID:
break;
case T_FLOAT:
} else {
// freg -> reg
int off =
} else {
if (conversion_off == noreg) {
conversion_off = L6;
}
SP, conversion_off);
}
} else {
// freg -> mem
} else {
if (conversion_off == noreg) {
conversion_off = L6;
}
SP, conversion_off);
}
}
}
break;
case T_DOUBLE:
} else {
// Destination could be an odd reg on 32bit in which case
// we can't load direct to the destination.
d = L2;
}
} else {
if (conversion_off == noreg) {
conversion_off = L6;
}
SP, conversion_off);
}
if (d == L2) {
}
}
break;
case T_LONG :
// 32bit can't do a split move of something like g1 -> O0, O1
// so use a memory temp
}
} else {
if (conversion_off == noreg) {
conversion_off = L6;
}
}
}
} else {
}
break;
default:
}
}
// If we have any strings we must store any register based arg to the stack
// This includes any still live xmm registers too.
if (total_strings > 0 ) {
// protect all the arg registers
__ save_frame(0);
// Get first string offset
// It's a string the oop and it was already copied to the out arg
// position
if (d != noreg) {
} else {
"must be");
}
if (d != noreg) {
} else {
"must be");
}
}
}
}
// Ok now we are done. Need to place the nop that dtrace wants in order to
// patch in the trap
// Return
return nm;
}
#endif // HAVE_DTRACE_H
// this function returns the adjust size (in number of words) to a c2i adapter
// activation for use during deoptimization
"test and remove; got more parms than locals");
if (callee_locals < callee_parameters)
return 0; // No adjustment for negative locals
}
// "Top of Stack" slots that may be unused by the calling convention but must
// otherwise be preserved.
// On Intel these are not necessary and the value can be zero.
// On Sparc this describes the words reserved for storing a register window
// when an interrupt occurs.
}
//
// Common out the new frame generation for deopt and uncommon trap
//
#ifdef ASSERT
// make sure that the frames are aligned properly
#ifndef _LP64
#endif
#endif
// Deopt needs to pass some extra live values from frame to frame
if (deopt) {
}
#ifdef ASSERT
// trash registers to show a clear pattern in backtraces
// Don't touch I5 could have valuable savedSP
// trash the return value as there is nothing to return yet
#endif
}
//
// loop through the UnrollBlock info and create new frames
//
// Before we make new frames, check to see if stack is available.
// Do this after the caller's return address is on top of stack
if (UseStackBanging) {
// Get total frame size for interpreted frames
}
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
// Adjust old interpreter frame to make space for new frame's extra java locals
//
// We capture the original sp for the transition frame only because it is needed in
// order to properly calculate interpreter_sp_adjustment. Even though in real life
// every interpreter frame captures a savedSP it is only needed at the transition
// (fortunately). If we had to have it correct everywhere then we would need to
// be told the sp_adjustment for each frame we create. If the frame size array
// were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
// for each frame we create and keep up the illusion every where.
//
#ifdef ASSERT
// make sure that there is at least one entry in the array
#endif
// Now push the new interpreter frames
// allocate a new frame, filling the registers
}
//------------------------------generate_deopt_blob----------------------------
// Ought to generate an ideal graph & compile, but here's some SPARC ASM
// instead.
// allocate space for the code
// setup code generation tools
if (UseStackBanging) {
}
#ifdef _LP64
#else
// Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
#endif /* _LP64 */
int frame_size_words;
#endif
//
// This is the entry point for code which is returning to a de-optimized
// frame.
// The steps taken by this frame are as follows:
// and all potentially live registers (at a pollpoint many registers can be live).
//
// - call the C routine: Deoptimization::fetch_unroll_info (this function
// returns information about the number and size of interpreter frames
// which are equivalent to the frame which is being deoptimized)
// - deallocate the unpack frame, restoring only results values. Other
// volatile registers will now be captured in the vframeArray as needed.
// - deallocate the deoptimization frame
// - in a loop using the information returned in the previous step
// push new interpreter frames (take care to propagate the return
// values through each new frame pushed)
// - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
// - call the C routine: Deoptimization::unpack_frames (this function
// lays out values on the interpreter frame which was just created)
// - deallocate the dummy unpack_frame
// - ensure that all the return values are correctly set and then do
// a return to the interpreter entry point
//
// Refer to the following methods for more information:
// - Deoptimization::fetch_unroll_info
// - Deoptimization::unpack_frames
// restore G2, the trampoline destroyed it
__ get_thread();
// On entry we have been called by the deoptimized nmethod with a call that
// replaced the original call (or safepoint polling location) so the deoptimizing
// pc is now in O7. Return values are still in the expected places
// restore G2, the trampoline destroyed it
__ get_thread();
// On entry we have been jumped to by the exception handler (or exception_blob
// for server). O0 contains the exception oop and O7 contains the original
// exception pc. So if we push a frame here it will look to the
// stack walking code (fetch_unroll_info) just like a normal call so
// state will be extracted normally.
// save exception oop in JavaThread and fall through into the
// exception_in_tls case since they are handled in same way except
// for where the pending exception is kept.
//
// Vanilla deoptimization with an exception pending in exception_oop
//
// No need to update oop_map as each call to save_live_registers will produce identical oopmap
// Restore G2_thread
__ get_thread();
#ifdef ASSERT
{
// verify that there is really an exception oop in exception_oop
// verify that there is no pending exception
}
#endif
//
// Reexecute entry, similar to c2 uncommon trap
//
// No need to update oop_map as each call to save_live_registers will produce identical oopmap
// do the call by hand so we can get the oopmap
__ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
// Set an oopmap for the call site this describes all our saved volatile registers
// so this move will survive
__ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
// Move the pending exception from exception_oop to Oexception so
// the pending exception will be picked up the interpreter.
// deallocate the deoptimization frame taking care to preserve the return values
// Allocate new interpreter frame(s) and possible c2i adapter frame
make_new_frames(masm, true);
// push a dummy "unpack_frame" taking care of float return values and
// call Deoptimization::unpack_frames to have the unpacker layout
// information in the interpreter frames just created and then return
// to the interpreter entry point
#if !defined(_LP64)
#if defined(COMPILER2)
// 32-bit 1-register longs return longs in G1
#endif
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
#else
// LP64 uses g4 in set_last_Java_frame
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
#endif
// In 32 bit, C2 returns longs in G1 so restore the saved G1 into
#endif
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
}
#ifdef COMPILER2
//------------------------------generate_uncommon_trap_blob--------------------
// Ought to generate an ideal graph & compile, but here's some SPARC ASM
// instead.
// allocate space for the code
// setup code generation tools
if (UseStackBanging) {
}
#ifdef _LP64
#else
// Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
#endif
//
// This is the entry point for all traps the compiler takes when it thinks
// it cannot handle further execution of compilation code. The frame is
// deoptimized in these cases and converted into interpreter frames for
// execution
// The steps taken by this frame are as follows:
// - push a fake "unpack_frame"
// - call the C routine Deoptimization::uncommon_trap (this function
// packs the current compiled frame into vframe arrays and returns
// information about the number and size of interpreter frames which
// are equivalent to the frame which is being deoptimized)
// - deallocate the "unpack_frame"
// - deallocate the deoptimization frame
// - in a loop using the information returned in the previous step
// push interpreter frames;
// - create a dummy "unpack_frame"
// - call the C routine: Deoptimization::unpack_frames (this function
// lays out values on the interpreter frame which was just created)
// - deallocate the dummy unpack_frame
// - return to the interpreter entry point
//
// Refer to the following methods for more information:
// - Deoptimization::uncommon_trap
// - Deoptimization::unpack_frame
// the unloaded class index is in O0 (first parameter to this blob)
// push a dummy "unpack_frame"
// and call Deoptimization::uncommon_trap to pack the compiled frame into
// vframe array and return the UnrollBlock information
__ save_frame(0);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
// deallocate the deoptimized frame taking care to preserve the return values
// Allocate new interpreter frame(s) and possible c2i adapter frame
make_new_frames(masm, false);
// push a dummy "unpack_frame" taking care of float return values and
// call Deoptimization::unpack_frames to have the unpacker layout
// information in the interpreter frames just created and then return
// to the interpreter entry point
__ save_frame(0);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
_uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
}
#endif // COMPILER2
//------------------------------generate_handler_blob-------------------
//
// Generate a special Compile2Runtime blob that saves all registers, and sets
// up an OopMap.
//
// This blob is jumped to (via a breakpoint and the signal handler) from a
// safepoint in compiled code. On entry to this blob, O7 contains the
// address in the original nmethod at which we should resume normal execution.
// Thus, this blob looks like a subroutine which must preserve lots of
// registers and return normally. Note that O7 is never register-allocated,
// so it is guaranteed to be free here.
//
// The hardest part of what this blob must do is to save the 64-bit %o
// registers in the 32-bit build. A simple 'save' turn the %o's to %i's and
// an interrupt will chop off their heads. Making space in the caller's frame
// first will let us save the 64-bit %o's before save'ing, but we cannot hand
// the adjusted FP off to the GC stack-crawler: this will modify the caller's
// SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save
// the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
// Tricky, tricky, tricky...
// allocate space for the code
// setup code generation tools
// Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
// even larger with TraceJumps
int frame_size_words;
// If this causes a return before the processing, then do a "restore"
if (cause_return) {
} else {
// Make it look like we were called via the poll
// so that frame constructor always sees a valid return address
}
// setup last_Java_sp (blows G4)
// call into the runtime to handle illegal instructions exception
// Do not use call_VM_leaf, because we need to make a GC map at this call site.
// Set an oopmap for the call site.
// We need this not only for callee-saved registers, but also for volatile
// registers that the compiler might be keeping live across a safepoint.
// clear last_Java_sp
// Check for exceptions
// We are back the the original state on entry and ready to go.
// Pending exception after the safepoint
// We are back the the original state on entry.
// Tail-call forward_exception_entry, with the issuing PC in O7,
// so it looks like the original nmethod called forward_exception_entry.
// -------------
// make sure all code is generated
// return exception blob
}
//
// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
//
// Generate a stub that calls into vm to find out the proper destination
// of a java call. All the argument registers are live at this point
// but since this is generic code we don't know what they are and the caller
// must do any gc of the args.
//
// allocate space for the code
// setup code generation tools
// Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
// even larger with TraceJumps
int frame_size_words;
// setup last_Java_sp (blows G4)
// call into the runtime to handle illegal instructions exception
// Do not use call_VM_leaf, because we need to make a GC map at this call site.
// O0 contains the address we are going to jump to assuming no exception got installed
// Set an oopmap for the call site.
// We need this not only for callee-saved registers, but also for volatile
// registers that the compiler might be keeping live across a safepoint.
// clear last_Java_sp
// Check for exceptions
// get the returned methodOop
// O0 is where we want to jump, overwrite G3 which is saved and scratch
// We are back the the original state on entry and ready to go.
// Pending exception after the safepoint
// We are back the the original state on entry.
// Tail-call forward_exception_entry, with the issuing PC in O7,
// so it looks like the original nmethod called forward_exception_entry.
// -------------
// make sure all code is generated
// return the blob
// frame_size_words or bytes??
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}