/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interp_masm_x86_64.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
#include "oops/methodDataOop.hpp"
#include "oops/methodOop.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/sharedRuntime.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "thread_bsd.inline.hpp"
#endif
// Implementation of InterpreterMacroAssembler
#ifdef CC_INTERP
}
#endif // CC_INTERP
#ifndef CC_INTERP
int number_of_arguments) {
// interpreter specific
//
// since these are callee saved registers and no blocking/
// GC can happen in leaf calls.
// saved! There used to be a save_bcp() that only happened in
// the ASSERT path (no restore_bcp). Which caused bizarre failures
// when jvm built with ASSERTs.
#ifdef ASSERT
{
Label L;
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL");
bind(L);
}
#endif
// super call
// interpreter specific
// but since they may not have been saved (and we don't want to
// save thme here (see note above) the assert is invalid.
}
int number_of_arguments,
bool check_exceptions) {
// interpreter specific
//
// Note: Could avoid restoring locals ptr (callee saved) - however doesn't
// really make a difference for these runtime calls, since they are
// due to GC.
// assert(java_thread == noreg , "not expecting a precomputed java thread");
save_bcp();
#ifdef ASSERT
{
Label L;
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL");
bind(L);
}
#endif /* ASSERT */
// super call
// interpreter specific
restore_bcp();
}
if (JvmtiExport::can_pop_frame()) {
Label L;
// Initiate popframe handling only if it is not already being
// processed. If the flag has the popframe_processing bit set, it
// means that this code is called *during* popframe handling - we
// don't want to reenter.
// This method is only called just after the call into the vm in
// call_VM_base, so the arg registers are available.
// Call Interpreter::remove_activation_preserving_args_entry() to get the
// address of the same-named entrypoint in the generated interpreter code.
bind(L);
}
}
switch (state) {
case btos: // fall through
case ctos: // fall through
case stos: // fall through
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
// Clean up tos value in the thread object
}
if (JvmtiExport::can_force_early_return()) {
Label L;
// Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code
// is called *during* earlyret handling - we don't want to reenter.
// Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code.
bind(L);
}
}
int bcp_offset) {
}
int bcp_offset,
size_t index_size) {
if (index_size == sizeof(u2)) {
} else if (index_size == sizeof(u4)) {
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
} else if (index_size == sizeof(u1)) {
} else {
}
}
int bcp_offset,
size_t index_size) {
// convert from field index to ConstantPoolCacheEntry index
}
int byte_no,
int bcp_offset,
size_t index_size) {
// We use a 32-bit load here since the layout of 64-bit words on
// little-endian machines allow us that.
movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
"correct shift count");
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
}
int bcp_offset,
size_t index_size) {
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
// skip past the header
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
//
// Args:
// rax: superklass
// Rsub_klass: subklass
//
// Kills:
// rcx, rdi
Label& ok_is_subtype) {
// Profile the not-null value's klass.
// Do the check.
// Profile the failure of the check.
}
// Java Expression Stack
pop(r);
}
// XXX can't use pop currently, upper half non clean
}
}
}
}
push(r);
}
push(r);
}
}
}
}
switch (state) {
case btos:
case ctos:
case stos:
case vtos: /* nothing to do */ break;
default: ShouldNotReachHere();
}
}
switch (state) {
case btos:
case ctos:
case stos:
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
}
// Helpers for swap and dup
}
}
// set sender sp
// record last_sp
}
// Jump to from_interpreted entry of a call unless single stepping is possible
// in this thread in which case we must call the i2i entry
if (JvmtiExport::can_post_interpreter_events()) {
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
// compiled code in threads for which the event is enabled. Check here for
// interp_only_mode if these events CAN be enabled.
// interp_only is an int, on little endian it is sufficient to test the byte only
// Is a cmpl faster?
}
}
// The following two routines provide a hook so that an implementation
// can schedule the dispatch in two parts. amd64 does not do this.
// Nothing amd64 specific to be done here
}
}
bool verifyoop) {
if (VerifyActivationFrameSize) {
Label L;
stop("broken stack frame");
bind(L);
}
if (verifyoop) {
}
}
}
}
}
// load next bytecode (load before advancing r13 to prevent AGI)
// advance r13
}
// load current bytecode
}
// remove activation
//
// Unlock the receiver if this is a synchronized method.
// Unlock any Java monitors from syncronized blocks.
// Remove the activation from the stack.
//
// If there are locked Java monitors
// If throw_monitor_exception
// throws IllegalMonitorStateException
// Else if install_monitor_exception
// installs IllegalMonitorStateException
// Else
// no error processing
bool throw_monitor_exception,
bool notify_jvmdi) {
// Note: Registers rdx xmm0 may be in use for the
// result check if synchronized method
// get the value of _do_not_unlock_if_synchronized into rdx
// get method access flags
// Don't unlock anything if the _do_not_unlock_if_synchronized flag
// is set.
// unlock monitor
// BasicObjectLock will be first in list, since this is a
// synchronized method. However, need to check that the object has
// not been unlocked by an explicit monitorexit bytecode.
wordSize - (int) sizeof(BasicObjectLock));
// We use c_rarg1 so that if we go slow path it will be the correct
// register for unlock_object to pass to VM directly
if (throw_monitor_exception) {
// Entry already unlocked, need to throw exception
} else {
// Monitor already unlocked during a stack unroll. If requested,
// install an illegal_monitor_state_exception. Continue with
// stack unrolling.
if (install_monitor_exception) {
}
}
// Check that for block-structured locking (i.e., that all locked
// objects has been unlocked)
// rax: Might contain return value
// Check that all monitors are unlocked
{
const Address monitor_block_top(
const Address monitor_block_bot(
// We use c_rarg1 so that if we go slow path it will be the correct
// register for unlock_object to pass to VM directly
// with top-most entry
// monitor block
// Entry already locked, need to throw exception
if (throw_monitor_exception) {
// Throw exception
} else {
// Stack unrolling. Unlock object and install illegal_monitor_exception.
// Unlock does not block, so don't have to worry about the frame.
// We don't have to preserve c_rarg1 since we are going to throw an exception.
if (install_monitor_exception) {
}
}
// check if current entry is used
}
// jvmti support
if (notify_jvmdi) {
} else {
}
// remove activation
// get sender sp
leave(); // remove frame anchor
}
#endif // C_INTERP
// Lock object
//
// Args:
// c_rarg1: BasicObjectLock to be used for locking
//
// Kills:
// rax
// c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
// rscratch1, rscratch2 (scratch regs)
if (UseHeavyMonitors) {
lock_reg);
} else {
// Load object pointer into obj_reg %c_rarg3
if (UseBiasedLocking) {
}
// Load immediate 1 into swap_reg %rax
// Load (object->mark() | 1) into swap_reg %rax
// Save (object->mark() | 1) into BasicLock's displaced header
assert(lock_offset == 0,
"displached header must be first word in BasicObjectLock");
if (PrintBiasedLockingStatistics) {
}
// Test if the oopMark is an obvious stack pointer, i.e.,
// 1) (mark & 7) == 0, and
// 2) rsp <= mark < mark + os::pagesize()
//
// These 3 tests can be done by evaluating the following
// expression: ((mark - rsp) & (7 - os::vm_page_size())),
// assuming both stack pointer and pagesize have their
// least significant 3 bits clear.
// NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
// Save the test result, for recursive case, the result is zero
if (PrintBiasedLockingStatistics) {
}
// Call the runtime routine for slow case
lock_reg);
}
}
// Unlocks an object. Used in monitorexit bytecode and
// remove_activation. Throws an IllegalMonitorException if object is
// not locked by current thread.
//
// Args:
// c_rarg1: BasicObjectLock for lock
//
// Kills:
// rax
// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
// rscratch1, rscratch2 (scratch regs)
if (UseHeavyMonitors) {
lock_reg);
} else {
save_bcp(); // Save in case of exception
// Convert from BasicObjectLock structure to object and BasicLock
// structure Store the BasicLock address into %rax
// Load oop into obj_reg(%c_rarg3)
// Free entry
if (UseBiasedLocking) {
}
// Load the old header from BasicLock structure
// Test for recursion
// zero for recursive case
// Atomic swap back the old header
// zero for recursive case
// Call the runtime routine for slow case.
lock_reg);
restore_bcp();
}
}
#ifndef CC_INTERP
Label& zero_continue) {
}
// Set the method data pointer for the current bcp.
// Test MDO to avoid the call if it is NULL.
// rbx: method
// r13: bcp
// rax: mdi
// mdo is guaranteed to be non-zero here, we checked for it before the call.
}
#ifdef ASSERT
// If the mdp is valid, it will point to a DataLayout header which is
// consistent with the bcp. The converse is highly probable also.
// rbx: method
// r13: bcp
// c_rarg3: mdp
#endif // ASSERT
}
int constant,
}
int constant,
bool decrement) {
// Counter address
}
bool decrement) {
// %%% this does 64bit counters at best it is wasting space
// at worst it is a rare bug when counters overflow
if (decrement) {
// Decrement the register. Set condition codes.
// If the decrement causes the counter to overflow, stay negative
Label L;
bind(L);
} else {
"flow-free idiom only works with 1");
// Increment the register. Set carry flag.
// If the increment causes the counter to overflow, pull back by 1.
}
}
int constant,
bool decrement) {
}
int flag_byte_constant) {
// Set the flag
}
int offset,
if (test_value_out == noreg) {
} else {
// Put the test value into a register, so caller can use it:
}
}
int offset_of_disp) {
}
int offset_of_disp) {
}
int constant) {
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Otherwise, assign to mdp
// We are taking a branch. Increment the taken count.
// We inline increment_mdp_data_at to return bumped_count in a register
//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
"flow-free idiom only works with 1");
sbbptr(bumped_count, 0);
// The method data pointer needs to be updated to reflect the new target.
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are taking a branch. Increment the not taken count.
// The method data pointer needs to be updated to correspond to
// the next bytecode
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are making a call. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are making a call. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
bool receiver_can_be_null) {
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
if (receiver_can_be_null) {
// We are making a call. Increment the count for null receiver.
}
// Record the receiver type.
// The method data pointer needs to be updated to reflect the new target.
}
}
// This routine creates a state machine for updating the multi-row
// type profile at a virtual call site (or other type-sensitive bytecode).
// is found, or until it runs out of rows. At the same time, it remembers
// the location of the first empty row. (An empty row records null for its
// receiver, and can be allocated for a newly-observed receiver type.)
// Because there are two degrees of freedom in the state, a simple linear
// search will not work; it must be a decision tree. Hence this helper
// function is recursive, to generate the required tree structured code.
// It's the interpreter, so we are trading off code space for speed.
// See below for example code.
if (TypeProfileWidth == 0) {
if (is_virtual_call) {
}
return;
}
// Test this row for both the receiver and for null.
// Take any of three different outcomes:
// 1. found receiver => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call.
// See if the receiver is receiver[n].
// (Reg2 now contains the receiver from the CallData.)
// The receiver is receiver[n]. Increment count[n].
if (test_for_null_also) {
// Failed the equality check on receiver[n]... Test for null.
// The only thing left to do is handle the null case.
if (is_virtual_call) {
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
} else {
}
break;
}
// Since null is rare, make it be the branch-taken case.
// Put all the "Case 3" tests here.
// Found a null. Keep searching for a matching receiver,
// but remember that this is an empty (unused) slot.
}
}
// In the fall-through case, we found no matching receiver, but we
// observed the receiver[start_row] is NULL.
// Fill in the receiver field and increment the count.
if (start_row > 0) {
}
}
// Example state machine code for three profile rows:
// // main copy of decision tree, rooted at row[1]
// if (row[0].rec == rec) { row[0].incr(); goto done; }
// if (row[0].rec != NULL) {
// // inner copy of decision tree, rooted at row[1]
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[1].rec != NULL) {
// // degenerate decision tree, rooted at row[2]
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
// row[2].init(rec); goto done;
// } else {
// // remember row[1] is empty
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[1].init(rec); goto done;
// }
// } else {
// // remember row[0] is empty
// if (row[1].rec == rec) { row[1].incr(); goto done; }
// if (row[2].rec == rec) { row[2].incr(); goto done; }
// row[0].init(rec); goto done;
// }
// done:
bool is_virtual_call) {
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Update the total ret count.
// See if return_bci is equal to bci[n]:
// return_bci is equal to bci[n]. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// The method data pointer needs to be updated.
if (TypeProfileCasts) {
}
}
}
if (ProfileInterpreter && TypeProfileCasts) {
// If no method data exists, go to profile_continue.
// Back up the address, since we have already bumped the mdp.
// *Decrement* the counter. We expect to see zero or small negatives.
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// The method data pointer needs to be updated.
if (TypeProfileCasts) {
// Record the object type.
}
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Update the default case count
// The method data pointer needs to be updated.
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Build the base (index * per_case_size_in_bytes()) +
// case_array_offset_in_bytes()
// Update the case count
// The method data pointer needs to be updated.
}
}
}
}
}
#endif // !CC_INTERP
// track stack depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
if (JvmtiExport::can_post_interpreter_events()) {
Label L;
bind(L);
}
{
}
// RedefineClasses() tracing support for obsolete method entry
}
}
// track stack depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
Label L;
// Note: frame::interpreter_frame_result has a dependency on how the
// method result is saved across the call to post_method_exit. If this
// is changed then the interpreter_frame_result implementation will
// need to be updated too.
// For c++ interpreter the result is always stored at a known location in the frame
// template interpreter will leave it on the top of the stack.
bind(L);
}
{
}
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
if (!preloaded) {
}
}