/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interp_masm_x86_32.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markOop.hpp"
#include "oops/methodDataOop.hpp"
#include "oops/methodOop.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/sharedRuntime.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "thread_bsd.inline.hpp"
#endif
// Implementation of InterpreterMacroAssembler
#ifdef CC_INTERP
}
#endif // CC_INTERP
#ifndef CC_INTERP
) {
// interpreter specific
//
// since these are callee saved registers and no blocking/
// GC can happen in leaf calls.
// saved! There used to be a save_bcp() that only happened in
// the ASSERT path (no restore_bcp). Which caused bizarre failures
// when jvm built with ASSERTs.
#ifdef ASSERT
{ Label L;
stop("InterpreterMacroAssembler::call_VM_leaf_base: last_sp != NULL");
bind(L);
}
#endif
// super call
// interpreter specific
// but since they may not have been saved (and we don't want to
// save them here (see note above) the assert is invalid.
}
int number_of_arguments,
bool check_exceptions
) {
#ifdef ASSERT
{ Label L;
stop("InterpreterMacroAssembler::call_VM_base: last_sp != NULL");
bind(L);
}
#endif /* ASSERT */
// interpreter specific
//
// Note: Could avoid restoring locals ptr (callee saved) - however doesn't
// really make a difference for these runtime calls, since they are
// due to GC.
save_bcp();
// super call
MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
// interpreter specific
restore_bcp();
}
if (JvmtiExport::can_pop_frame()) {
Label L;
// Initiate popframe handling only if it is not already being processed. If the flag
// has the popframe_processing bit set, it means that this code is called *during* popframe
// handling - we don't want to reenter.
// Call Interpreter::remove_activation_preserving_args_entry() to get the
// address of the same-named entrypoint in the generated interpreter code.
bind(L);
}
}
+ in_ByteSize(wordSize));
switch (state) {
case ltos:
case btos: // fall through
case ctos: // fall through
case stos: // fall through
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
// Clean up tos value in the thread object
}
if (JvmtiExport::can_force_early_return()) {
Label L;
// Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code
// is called *during* earlyret handling - we don't want to reenter.
// Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code.
bind(L);
}
}
}
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, size_t index_size) {
if (index_size == sizeof(u2)) {
} else if (index_size == sizeof(u4)) {
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
} else if (index_size == sizeof(u1)) {
} else {
}
}
}
int byte_no,
int bcp_offset,
size_t index_size) {
movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
"correct shift count");
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
}
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
// skip past the header
}
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. EAX holds the super_klass. Blows ECX.
// Resets EDI to locals. Register sub_klass cannot be any of the above.
// Profile the not-null value's klass.
// Do the check.
// Profile the failure of the check.
}
if (IEEEPrecision) {
}
}
if (IEEEPrecision) {
}
}
// Java Expression Stack
pop(r);
}
pop(r);
}
}
}
}
switch (state) {
case btos: // fall through
case ctos: // fall through
case stos: // fall through
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
}
push(r);
}
push(r);
}
}
// Do not schedule for no AGI! Never write beyond rsp!
}
// Do not schedule for no AGI! Never write beyond rsp!
}
switch (state) {
case btos: // fall through
case ctos: // fall through
case stos: // fall through
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
}
// Helpers for swap and dup
}
}
// set sender sp
// record last_sp
}
// Jump to from_interpreted entry of a call unless single stepping is possible
// in this thread in which case we must call the i2i entry
if (JvmtiExport::can_post_interpreter_events()) {
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
// compiled code in threads for which the event is enabled. Check here for
// interp_only_mode if these events CAN be enabled.
// interp_only is an int, on little endian it is sufficient to test the byte only
// Is a cmpl faster?
}
}
// The following two routines provide a hook so that an implementation
// can schedule the dispatch in two parts. Intel does not do this.
// Nothing Intel-specific to be done here.
}
}
bool verifyoop) {
if (VerifyActivationFrameSize) {
Label L;
stop("broken stack frame");
bind(L);
}
}
}
}
}
// load next bytecode (load before advancing rsi to prevent AGI)
// advance rsi
}
// load current bytecode
}
// remove activation
//
// Unlock the receiver if this is a synchronized method.
// Unlock any Java monitors from syncronized blocks.
// Remove the activation from the stack.
//
// If there are locked Java monitors
// If throw_monitor_exception
// throws IllegalMonitorStateException
// Else if install_monitor_exception
// installs IllegalMonitorStateException
// Else
// no error processing
bool throw_monitor_exception,
bool notify_jvmdi) {
// Note: Registers rax, rdx and FPU ST(0) may be in use for the result
// check if synchronized method
movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags
// Don't unlock anything if the _do_not_unlock_if_synchronized flag
// is set.
// unlock monitor
// BasicObjectLock will be first in list, since this is a synchronized method. However, need
// to check that the object has not been unlocked by an explicit monitorexit bytecode.
const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
if (throw_monitor_exception) {
empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow
// Entry already unlocked, need to throw exception
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
} else {
// Monitor already unlocked during a stack unroll.
// If requested, install an illegal_monitor_state_exception.
// Continue with stack unrolling.
if (install_monitor_exception) {
empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow
}
}
// Check that for block-structured locking (i.e., that all locked objects has been unlocked)
// rax, rdx: Might contain return value
// Check that all monitors are unlocked
{
// Entry already locked, need to throw exception
if (throw_monitor_exception) {
empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow
// Throw exception
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
} else {
// Stack unrolling. Unlock object and install illegal_monitor_exception
// Unlock does not block, so don't have to worry about the frame
if (install_monitor_exception) {
empty_FPU_stack(); // remove possible return value from FPU-stack, otherwise stack could overflow
}
}
cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
}
// jvmti support
if (notify_jvmdi) {
} else {
}
// remove activation
leave(); // remove frame anchor
if (UseSSE) {
// float and double are returned in xmm register in SSE-mode
}
}
}
#endif /* !CC_INTERP */
// Lock object
//
// Argument: rdx : Points to BasicObjectLock to be used for locking. Must
// be initialized with object to lock
if (UseHeavyMonitors) {
} else {
// Load object pointer into obj_reg %rcx
if (UseBiasedLocking) {
// Note: we use noreg for the temporary register since it's hard
// to come up with a free register on all incoming code paths
}
// Load immediate 1 into swap_reg %rax,
// Load (object->mark() | 1) into swap_reg %rax,
// Save (object->mark() | 1) into BasicLock's displaced header
lock();
}
if (PrintBiasedLockingStatistics) {
}
// Test if the oopMark is an obvious stack pointer, i.e.,
// 1) (mark & 3) == 0, and
// 2) rsp <= mark < mark + os::pagesize()
//
// These 3 tests can be done by evaluating the following
// expression: ((mark - rsp) & (3 - os::vm_page_size())),
// assuming both stack pointer and pagesize have their
// least significant 2 bits clear.
// NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
// Save the test result, for recursive case, the result is zero
if (PrintBiasedLockingStatistics) {
}
// Call the runtime routine for slow case
}
}
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
//
// Argument: rdx : Points to BasicObjectLock structure for lock
// Throw an IllegalMonitorException if object is not locked by current thread
//
// Uses: rax, rbx, rcx, rdx
if (UseHeavyMonitors) {
} else {
save_bcp(); // Save in case of exception
// Convert from BasicObjectLock structure to object and BasicLock structure
// Store the BasicLock address into %rax,
// Load oop into obj_reg(%rcx)
// Free entry
if (UseBiasedLocking) {
}
// Load the old header from BasicLock structure
// Test for recursion
// zero for recursive case
// Atomic swap back the old header
// zero for recursive case
// Call the runtime routine for slow case.
restore_bcp();
}
}
#ifndef CC_INTERP
// Test ImethodDataPtr. If it is null, continue at the specified label
}
// Set the method data pointer for the current bcp.
// Test MDO to avoid the call if it is NULL.
// rbx,: method
// rsi: bcp
// rax,: mdi
// mdo is guaranteed to be non-zero here, we checked for it before the call.
}
#ifdef ASSERT
// If the mdp is valid, it will point to a DataLayout header which is
// consistent with the bcp. The converse is highly probable also.
// rbx,: method
// rsi: bcp
// rcx: mdp
#endif // ASSERT
}
// %%% this seems to be used to store counter data which is surely 32bits
// however 64bit side stores 64 bits which seems wrong
}
int constant,
bool decrement) {
// Counter address
}
bool decrement) {
// %%% 64bit treats this as 64 bit which seems unlikely
if (decrement) {
// Decrement the register. Set condition codes.
// If the decrement causes the counter to overflow, stay negative
Label L;
bind(L);
} else {
"flow-free idiom only works with 1");
// Increment the register. Set carry flag.
// If the increment causes the counter to overflow, pull back by 1.
}
}
int constant,
bool decrement) {
}
// Set the flag
}
int offset,
if (test_value_out == noreg) {
} else {
// Put the test value into a register, so caller can use it:
}
}
}
void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) {
}
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Otherwise, assign to mdp
// We are taking a branch. Increment the taken count.
// We inline increment_mdp_data_at to return bumped_count in a register
//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
// %%% 64bit treats these cells as 64 bit but they seem to be 32 bit
sbbl(bumped_count, 0);
// The method data pointer needs to be updated to reflect the new target.
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are taking a branch. Increment the not taken count.
// The method data pointer needs to be updated to correspond to the next bytecode
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are making a call. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// We are making a call. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
bool receiver_can_be_null) {
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
if (receiver_can_be_null) {
// We are making a call. Increment the count for null receiver.
}
// Record the receiver type.
// The method data pointer needs to be updated to reflect the new target.
}
}
if (TypeProfileWidth == 0) {
if (is_virtual_call) {
}
return;
}
// Test this row for both the receiver and for null.
// Take any of three different outcomes:
// 1. found receiver => increment count and goto done
// 2. found null => keep looking for case 1, maybe allocate this cell
// 3. found something else => keep looking for cases 1 and 2
// Case 3 is handled by a recursive call.
// See if the receiver is receiver[n].
// (Reg2 now contains the receiver from the CallData.)
// The receiver is receiver[n]. Increment count[n].
// Failed the equality check on receiver[n]... Test for null.
// The only thing left to do is handle the null case.
if (is_virtual_call) {
// Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case.
} else {
}
break;
}
// Since null is rare, make it be the branch-taken case.
// Put all the "Case 3" tests here.
// Found a null. Keep searching for a matching receiver,
// but remember that this is an empty (unused) slot.
}
}
// In the fall-through case, we found no matching receiver, but we
// observed the receiver[start_row] is NULL.
// Fill in the receiver field and increment the count.
if (start_row > 0) {
}
}
bool is_virtual_call) {
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Update the total ret count.
// See if return_bci is equal to bci[n]:
// return_bci is equal to bci[n]. Increment the count.
// The method data pointer needs to be updated to reflect the new target.
}
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// The method data pointer needs to be updated.
if (TypeProfileCasts) {
}
}
}
if (ProfileInterpreter && TypeProfileCasts) {
// If no method data exists, go to profile_continue.
// Back up the address, since we have already bumped the mdp.
// *Decrement* the counter. We expect to see zero or small negatives.
}
}
{
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// The method data pointer needs to be updated.
if (TypeProfileCasts) {
// Record the object type.
restore_locals(); // Restore EDI
}
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Update the default case count
// The method data pointer needs to be updated.
}
}
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
// index is positive and so should have correct value if this code were
// used on 64bits
// Update the case count
// The method data pointer needs to be updated.
}
}
#endif // !CC_INTERP
}
#ifndef CC_INTERP
}
#endif /* CC_INTERP */
// track stack depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
if (JvmtiExport::can_post_interpreter_events()) {
Label L;
bind(L);
}
{
}
// RedefineClasses() tracing support for obsolete method entry
}
}
// track stack depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
Label L;
// Note: frame::interpreter_frame_result has a dependency on how the
// method result is saved across the call to post_method_exit. If this
// is changed then the interpreter_frame_result implementation will
// need to be updated too.
// For c++ interpreter the result is always stored at a known location in the frame
// template interpreter will leave it on the top of the stack.
bind(L);
}
{
}
}
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
if (!preloaded) {
}
}