/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interp_masm_sparc.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
#include "oops/methodDataOop.hpp"
#include "oops/methodOop.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/sharedRuntime.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "thread_solaris.inline.hpp"
#endif
#ifndef CC_INTERP
#ifndef FAST_DISPATCH
#endif
// Implementation of InterpreterMacroAssembler
// This file specializes the assember with interpreter-specific macros
const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
#else // CC_INTERP
#ifndef STATE
#endif // STATE
#endif // CC_INTERP
void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
// Note: this algorithm is also used by C1's OSR entry sequence.
// Any changes should also be applied to CodeEmitter::emit_osr_entry().
// max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
// faster.
}
#ifndef CC_INTERP
// Dispatch code executed in the prolog of a bytecode which does not do it's
// own dispatch. The dispatch address is computed and placed in IdispatchAddress
#ifdef FAST_DISPATCH
// FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
// they both use I2.
// add offset to correct dispatch table
#else
// dispatch table to use
#endif
}
// Dispatch code executed in the epilog of a bytecode which does not do it's
// own dispatch. The dispatch address in IdispatchAddress is used for the
// dispatch.
jmp( IdispatchAddress, 0 );
}
// %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
}
// %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
}
// load current bytecode
}
) {
if (!java_thread->is_valid())
// super call
}
int number_of_arguments,
bool check_exception
) {
if (!java_thread->is_valid())
// See class ThreadInVMfromInterpreter, which assumes that the interpreter
// takes responsibility for setting its own thread-state on call-out.
// However, ThreadInVMfromInterpreter resets the state to "in_Java".
//save_bcp(); // save bcp
MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception);
//restore_bcp(); // restore bcp
//restore_locals(); // restore locals pointer
}
if (JvmtiExport::can_pop_frame()) {
Label L;
// Check the "pending popframe condition" flag in the current thread
// Initiate popframe handling only if it is not already being processed. If the flag
// has the popframe_processing bit set, it means that this code is called *during* popframe
// handling - we don't want to reenter.
// Call Interpreter::remove_activation_preserving_args_entry() to get the
// address of the same-named entrypoint in the generated interpreter code.
call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
// Jump to Interpreter::_remove_activation_preserving_args_entry
bind(L);
}
}
switch (state) {
case btos: // fall through
case ctos: // fall through
case stos: // fall through
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
// Clean up tos value in the jvmti thread state
}
if (JvmtiExport::can_force_early_return()) {
Label L;
// Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code
// is called *during* earlyret handling - we don't want to reenter.
// Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code
call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
// Jump to Interpreter::_remove_activation_early_entry
bind(L);
}
}
void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
}
#endif /* CC_INTERP */
#ifndef CC_INTERP
}
}
}
// common code to dispatch and dispatch_only
// dispatch value in Lbyte_code and increment Lbcp
void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
// %%%%% maybe implement +VerifyActivationFrameSize here
//verify_thread(); //too slow; we will just verify on method entry & exit
#ifdef FAST_DISPATCH
// use IdispatchTables
// add offset to correct dispatch table
} else {
#endif
// dispatch table to use
#ifdef FAST_DISPATCH
}
#endif
jmp( G3_scratch, 0 );
}
// Helpers for expression stack
// Longs and doubles are Category 2 computational types in the
// JVM specification (section 3.11.1) and take 2 expression stack or
// local slots.
// Aligning them on 32 bit with tagged stacks is hard because the code generated
// for the dup* bytecodes depends on what types are already on the stack.
// (and we can use 0 for non-reference tags).
// Known good alignment in _LP64 but unknown otherwise
#ifdef _LP64
#else
#endif
}
// Known good alignment in _LP64 but unknown otherwise
#ifdef _LP64
// store something more useful here
#else
#endif
}
// Known good alignment in _LP64 but unknown otherwise
#ifdef _LP64
#else
#endif
}
// Known good alignment in _LP64 but unknown otherwise
#ifdef _LP64
// store something more useful here
#else
#endif
}
}
}
}
}
}
}
}
// remember: our convention for longs in SPARC is:
// O0 (Otos_l1) has high-order part in first word,
// O1 (Otos_l2) has low-order part in second word
// Longs are stored in memory-correct order, even if unaligned.
}
}
// Longs are stored in memory-correct order, even if unaligned.
}
switch (state) {
case ctos:
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
}
switch (state) {
case ctos:
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
}
// Helpers for swap and dup
}
}
}
// Reset Lesp.
// Reset SP by subtracting more space from Lesp.
// A native does not need to do this, since its callee does not change SP.
// Compute max expression stack+register save area
//
// now set up a stack frame with the size computed above
//
//round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below
#ifdef _LP64
#endif
}
#ifdef ASSERT
// Saved SP must be aligned.
#ifdef _LP64
#else
#endif
// Saved SP, plus register window size, must not be above FP.
#ifdef _LP64
#endif
// Saved SP must not be ridiculously below current SP.
#ifdef _LP64
#endif
stop("on return to interpreted call, restored SP is corrupted");
}
// about to read or write Resp[0]
// make sure it is not in the monitors or the register save area
stop("too many pops: Lesp points into monitor area");
#ifdef _LP64
#endif
stop("too many pushes: Lesp points into register window");
}
#endif // ASSERT
// Load compiled (i2c) or interpreter entry when calling from interpreted and
// do the call. Centralized so that all interpreter calls will do the same actions.
// If jvmti single stepping is on for a thread we must not call compiled code.
void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
// Assume we want to go compiled if available
if (JvmtiExport::can_post_interpreter_events()) {
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
// compiled code in threads for which the event is enabled. Check here for
// interp_only_mode if these events CAN be enabled.
}
// the i2c_adapters need methodOop in G5_method (right? %%%)
// do the call
#ifdef ASSERT
{
stop("null entry point");
}
#endif // ASSERT
// Adjust Rret first so Llast_SP can be same as Rret
// Record SP so we can remove any stack space allocated by adapter transition
}
TemplateTable::branch(false,false);
}
int bcp_offset,
switch (is_signed) {
default: ShouldNotReachHere();
}
switch (should_set_CC ) {
default: ShouldNotReachHere();
}
}
int bcp_offset,
switch (should_set_CC ) {
default: ShouldNotReachHere();
case set_CC: break;
case dont_set_CC: break;
}
#ifdef _LP64
#else
#endif
#ifdef _LP64
#else
// Unsigned load is faster than signed on some implementations
#endif
}
if (index_size == sizeof(u2)) {
} else if (index_size == sizeof(u4)) {
} else if (index_size == sizeof(u1)) {
} else {
}
}
// convert from field index to ConstantPoolCacheEntry index and from
// word index to byte offset
}
int byte_no,
int bcp_offset,
size_t index_size) {
ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
"correct shift count");
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
}
if (index_size == sizeof(u2)) {
} else {
ShouldNotReachHere(); // other sizes not supported here
}
// convert from field index to ConstantPoolCacheEntry index
// and from word index to byte offset
// skip past the header
// construct pointer to cache entry
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
Label &ok_is_subtype ) {
// Profile the not-null value's klass.
&ok_is_subtype, NULL);
}
// Separate these two to allow for delay slot in middle
// These are used to do a test and full jump to exception-throwing code.
// %%%%% Could possibly reoptimize this by testing to see if could use
// a single conditional branch (i.e. if span is small enough.
// If you go that route, than get rid of the split and give up
// on the delay-slot hack.
// DELAY SLOT
}
// DELAY SLOT
}
// DELAY SLOT
}
}
// And if you cannot use the delay slot, here is a shorthand:
if (ok_condition != never) {
}
}
if (ok_condition != never) {
}
}
if (ok_condition != never) {
}
}
// Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res
// Note: res is still shy of address by array offset into object.
void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) {
#ifdef _LP64
// sign extend since tos (index) can be a 32bit value
#endif // _LP64
// check array
// convention: move aberrant index into G3_scratch for exception message
// add offset if didn't do it in delay slot
}
void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
// pop array
// check array
}
}
}
}
}
// unlock if synchronized method
//
// Unlock the receiver if this is a synchronized method.
// Unlock any Java monitors from syncronized blocks.
//
// If there are locked Java monitors
// If throw_monitor_exception
// throws IllegalMonitorStateException
// Else if install_monitor_exception
// installs IllegalMonitorStateException
// Else
// no error processing
bool throw_monitor_exception,
bool install_monitor_exception) {
// get the value of _do_not_unlock_if_synchronized into G1_scratch
// check if synchronized method
// Don't unlock anything if the _do_not_unlock_if_synchronized flag
// is set.
// BasicObjectLock will be first in list, since this is a synchronized method. However, need
// to check that the object has not been unlocked by an explicit monitorexit bytecode.
//Intel: if (throw_monitor_exception) ... else ...
// Entry already unlocked, need to throw exception
//...
// pass top-most monitor elem
if (throw_monitor_exception) {
// Entry already unlocked need to throw an exception
MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
} else {
// Monitor already unlocked during a stack unroll.
// If requested, install an illegal_monitor_state_exception.
// Continue with stack unrolling.
if (install_monitor_exception) {
MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
}
}
// I0, I1: Might contain return value
// Check that all monitors are unlocked
"sizeof BasicObjectLock must be even number of doublewords");
#ifdef ASSERT
{ Label L;
// ensure that Rmptr starts out above (or at) Rlimit
stop("monitor stack has negative size");
bind(L);
}
#endif
delayed()->
// Entry is still locked, need to throw exception
if (throw_monitor_exception) {
MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
} else {
// Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
// Unlock does not block, so don't have to worry about the frame
if (install_monitor_exception) {
MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
}
}
delayed()->
#ifdef ASSERT
{ Label L;
// ensure that Rmptr has not somehow stepped below Rlimit
stop("ran off the end of the monitor stack");
bind(L);
}
#endif
delayed()->
}
}
// remove activation
//
// Unlock the receiver if this is a synchronized method.
// Unlock any Java monitors from syncronized blocks.
// Remove the activation from the stack.
//
// If there are locked Java monitors
// If throw_monitor_exception
// throws IllegalMonitorStateException
// Else if install_monitor_exception
// installs IllegalMonitorStateException
// Else
// no error processing
bool throw_monitor_exception,
bool install_monitor_exception) {
// save result (push state before jvmti call and pop it afterwards) and notify jvmti
// return tos
switch (state) {
#ifdef _LP64
#else
#endif
case btos: // fall through
case ctos:
case stos: // fall through
case atos: // fall through
case ftos: // fall through
case dtos: // fall through
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
// C2 expects long results in G1 we can't tell if we're returning to interpreted
// Shift bits into high (msb) of G1
// Zero extend low bits
}
#endif /* COMPILER2 */
}
#endif /* CC_INTERP */
// Lock object
//
// Argument - lock_reg points to the BasicObjectLock to be used for locking,
// it must be initialized with the object to lock
if (UseHeavyMonitors) {
}
else {
// load markOop from object into mark_reg
if (UseBiasedLocking) {
}
// get the address of basicLock on stack that will be stored in the object
// we need a temporary register here as we do not want to clobber lock_reg
// (cas clobbers the destination register)
// set mark reg to be (markOop of object | UNLOCK_VALUE)
// initialize the box (Must happen before we update the object mark!)
// compare and exchange object_addr, markOop | 1, stack address of basicLock
// if the compare and exchange succeeded we are done (we saw an unlocked object)
// We did not see an unlocked object so try the fast recursive case
// Check if owner is self by comparing the value in the markOop of object
// with the stack pointer
#ifdef _LP64
#endif
// Composite "andcc" test:
// (a) %sp -vs- markword proximity check, and,
// (b) verify mark word LSBs == 0 (Stack-locked).
//
// FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
// Note that the page size used for %sp proximity testing is arbitrary and is
// unrelated to the actual MMU page size. We use a 'logical' page size of
// 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate
// field of the andcc instruction.
// if condition is true we are done and hence we can store 0 in the displaced
// header indicating it is a recursive lock and be done
// none of the above fast optimizations worked so we have to get into the
// slow case of monitor enter
}
}
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
//
// Argument - lock_reg points to the BasicObjectLock for lock
// Throw IllegalMonitorException if object is not locked by current thread
if (UseHeavyMonitors) {
} else {
if (UseBiasedLocking) {
// load the object out of the BasicObjectLock
}
// Test first if we are in the fast recursive case
Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
// See if it is still a light weight lock, if so we just unlock
// the object and we are done
if (!UseBiasedLocking) {
// load the object out of the BasicObjectLock
}
// we have the displaced header in displaced_header_reg
// we expect to see the stack address of the basicLock in case the
// lock is still a light weight lock (lock_reg)
// The lock has been converted into a heavy lock and hence
// we need to get into the slow case
}
}
#ifndef CC_INTERP
// Get the method data pointer from the methodOop and set the
// specified register to its value.
}
// Set the method data pointer for the current bcp.
// Test MDO to avoid the call if it is NULL.
}
// Test ImethodDataPtr. If it is null, continue at the specified label
}
#ifdef ASSERT
// If the mdp is valid, it will point to a DataLayout header which is
// consistent with the bcp. The converse is highly probable also.
// %%% should use call_VM_leaf here?
//call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
restore();
#endif // ASSERT
}
// Control will flow to "profile_continue" if the counter is less than the
// limit or if we call profile_method()
// if no method data exists, and the counter is high enough, make one
// Test to see if we should create a method data oop
// Use long branches because call_VM() code and following code generated by
// test_backedge_count_for_osr() is large in debug VM.
// Build it now.
}
// Store a value at some constant offset from the method data pointer.
}
bool decrement) {
// Load the counter.
if (decrement) {
// Decrement the register. Set condition codes.
// If the decrement causes the counter to overflow, stay negative
Label L;
// Store the decremented counter, if it is still negative.
bind(L);
} else {
// Increment the register. Set carry flag.
// If the increment causes the counter to overflow, pull back by 1.
// Store the incremented counter.
}
}
// Increment the value at some constant offset from the method data pointer.
bool decrement) {
// Locate the counter at a fixed offset from the mdp:
}
// Increment the value at some non-fixed (reg + constant) offset from
// the method data pointer.
int constant,
bool decrement) {
// Add the constant to reg to get the offset.
}
// Set a flag value at the current method data pointer position.
// Updates a single byte of the header, to avoid races with other header bits.
// Load the data header
// Set the flag
// Store the modified header.
}
// Test the location at some offset from the method data pointer.
// If it is not equal to value, branch to the not_equal_continue Label.
// Set condition codes to match the nullness of the loaded value.
}
// Update the method data pointer by the displacement located at some fixed
// offset from the method data pointer.
}
// Update the method data pointer by the displacement located at the
// offset (reg + offset_of_disp).
int offset_of_disp,
}
// Update the method data pointer by a simple constant displacement.
}
// Update the method data pointer for a _ret bytecode whose target
// was not among our cached targets.
}
// Count a taken branch in the bytecodes.
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are taking a branch. Increment the taken count.
// The method data pointer needs to be updated to reflect the new target.
}
}
// Count a not-taken branch in the bytecodes.
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are taking a branch. Increment the not taken count.
// The method data pointer needs to be updated to correspond to the
// next bytecode.
}
}
// Count a non-virtual call in the bytecodes.
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are making a call. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
// Count a final call in the bytecodes.
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are making a call. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
// Count a virtual call in the bytecodes.
bool receiver_can_be_null) {
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
if (receiver_can_be_null) {
// We are making a call. Increment the count for null receiver.
}
// Record the receiver type.
// The method data pointer needs to be updated to reflect the new target.
}
}
if (TypeProfileWidth == 0) {
if (is_virtual_call) {
}
return;
}
// Test this row for both the receiver and for null.
// Take any of three different outcomes:
// 1. found receiver => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call.
// See if the receiver is receiver[n].
// delayed()->tst(scratch);
// The receiver is receiver[n]. Increment count[n].
if (test_for_null_also) {
// Failed the equality check on receiver[n]... Test for null.
// The only thing left to do is handle the null case.
if (is_virtual_call) {
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
} else {
}
break;
}
// Since null is rare, make it be the branch-taken case.
// Put all the "Case 3" tests here.
// Found a null. Keep searching for a matching receiver,
// but remember that this is an empty (unused) slot.
}
}
// In the fall-through case, we found no matching receiver, but we
// observed the receiver[start_row] is NULL.
// Fill in the receiver field and increment the count.
if (start_row > 0) {
}
}
}
// Count a ret in the bytecodes.
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Update the total ret count.
// See if return_bci is equal to bci[n]:
// return_bci is equal to bci[n]. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
}
// Profile an unexpected null in the bytecodes.
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// The method data pointer needs to be updated.
if (TypeProfileCasts) {
}
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
if (TypeProfileCasts) {
// Record the object type.
}
// The method data pointer needs to be updated.
}
}
if (ProfileInterpreter && TypeProfileCasts) {
// If no method data exists, go to profile_continue.
// Back up the address, since we have already bumped the mdp.
// *Decrement* the counter. We expect to see zero or small negatives.
}
}
// Count the default case of a switch construct.
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Update the default case count
scratch);
// The method data pointer needs to be updated.
scratch);
}
}
// Count the index'th case of a switch construct.
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
// Update the case count
scratch3);
// The method data pointer needs to be updated.
scratch2);
}
}
// add a InterpMonitorElem to stack (see frame_sparc.hpp)
"sizeof BasicObjectLock must be even number of doublewords");
if (!stack_is_empty) {
// must copy stack contents down
// untested("monitor stack expansion");
// note: must copy from low memory upwards
// On entry to loop,
// Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
// Loop mutates Rtemp
bind( start_copying );
// done copying stack
}
}
// Locals
// Note: index must hold the effective address--the iinc template uses it
}
// Just like access_local_ptr but the tag is a returnAddress
}
// Note: index must hold the effective address--the iinc template uses it
}
// First half stored at index n+1 (which grows down from Llocals[n])
}
}
}
#ifdef ASSERT
void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
Label L;
// untested("reg area corruption");
stop("regsave area is being clobbered");
bind(L);
}
#endif // ASSERT
}
#ifdef ASSERT
#endif
}
}
#ifdef ASSERT
#endif
}
#ifdef ASSERT
#endif
}
#ifdef ASSERT
#endif
}
}
}
}
#endif /* CC_INTERP */
#ifdef CC_INTERP
#else
#endif /* CC_INTERP */
// Load each counter in a register
// Add the delta to the invocation counter and store the result
// Mask the backedge counter
// Store value
// Add invocation counter + backedge counter
// Note that this macro must leave the backedge_count + invocation_count in Rtmp!
}
#ifdef CC_INTERP
#else
#endif /* CC_INTERP */
// Load each counter in a register
// Add the delta to the backedge counter
// Mask the invocation counter, add to backedge counter
// and store the result to memory
// Add backedge + invocation counter
// Note that this macro must leave backedge_count + invocation_count in Rtmp!
}
#ifndef CC_INTERP
// When ProfileInterpreter is on, the backedge_count comes from the
// methodDataOop, which value does not get reset on the call to
// frequency_counter_overflow(). To avoid excessive calls to the overflow
// routine while the method is being compiled, add a second test to make sure
// the overflow function is called only once every overflow_frequency.
if (ProfileInterpreter) {
}
// overflow in loop, pass branch bytecode
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
// Was an OSR adapter generated?
// O0 = osr nmethod
// Has the nmethod been invalidated already?
// migrate the interpreter frame off of the stack
// save nmethod
// move OSR nmethod to I1
// OSR buffer to I0
// remove the interpreter frame
// Jump to the osr code.
}
void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) {
}
// local helper function for the verify_oop_or_return_address macro
#ifndef PRODUCT
// assume it is a valid return address if it is inside m and is preceded by a jsr
#endif // PRODUCT
return false;
}
if (!VerifyOops) return;
// the VM documentation for the astore[_wide] bytecode allows
// the TOS to be not only an oop but also a return address
// See if it is an address (in the current method):
// %%% should use call_VM_leaf here?
// Perform a more elaborate out-of-line call
// Not an address; verify it:
}
}
#endif /* CC_INTERP */
// Inline assembly for:
//
// if (thread is in interp_only_mode) {
// InterpreterRuntime::post_method_entry();
// }
// if (DTraceMethodProbes) {
// SharedRuntime::dtrace_method_entry(method, receiver);
// }
// if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
// SharedRuntime::rc_trace_method_entry(method, receiver);
// }
// C++ interpreter only uses this for native methods.
// Whenever JVMTI puts a thread in interp_only_mode, method
// depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
if (JvmtiExport::can_post_interpreter_events()) {
Label L;
bind(L);
}
{
}
// RedefineClasses() tracing support for obsolete method entry
}
}
// Inline assembly for:
//
// if (thread is in interp_only_mode) {
// // save result
// InterpreterRuntime::post_method_exit();
// // restore result
// }
// if (DTraceMethodProbes) {
// SharedRuntime::dtrace_method_exit(thread, method);
// }
//
// Native methods have their result stored in d_tmp and l_tmp
// Java methods have their result stored in the expression stack
// C++ interpreter only uses this for native methods.
// Whenever JVMTI puts a thread in interp_only_mode, method
// depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
Label L;
// Note: frame::interpreter_frame_result has a dependency on how the
// method result is saved across the call to post_method_exit. For
// native methods it assumes the result registers are saved to
// l_scratch and d_scratch. If this changes then the interpreter_frame_result
// implementation will need to be updated too.
bind(L);
}
{
// Dtrace notification
}
}
#ifdef CC_INTERP
#ifdef _LP64
#else
#endif
#else // CC_INTERP
if (is_native_call) {
#ifdef _LP64
#else
#endif
} else {
}
#endif // CC_INTERP
}
#ifdef CC_INTERP
#ifdef _LP64
#else
#endif
#else // CC_INTERP
if (is_native_call) {
#ifdef _LP64
#else
#endif
} else {
}
#endif // CC_INTERP
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
} else {
}
}