/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// no precompiled headers
#include "classfile/vmSymbols.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/bytecodeInterpreter.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/threadCritical.hpp"
#include "utilities/exceptions.hpp"
#ifdef TARGET_OS_ARCH_linux_x86
# include "orderAccess_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "orderAccess_linux_sparc.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_zero
# include "orderAccess_linux_zero.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_x86
# include "orderAccess_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_sparc
# include "orderAccess_solaris_sparc.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_windows_x86
# include "orderAccess_windows_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_arm
# include "orderAccess_linux_arm.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_ppc
# include "orderAccess_linux_ppc.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_x86
# include "orderAccess_bsd_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_zero
# include "orderAccess_bsd_zero.inline.hpp"
#endif
// no precompiled headers
#ifdef CC_INTERP
/*
* USELABELS - If using GCC, then use labels for the opcode dispatching
* rather -then a switch statement. This improves performance because it
* gives us the oportunity to have the instructions that calculate the
* next opcode to jump to be intermixed with the rest of the instructions
* that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
*/
#ifdef __GNUC__
/*
ASSERT signifies debugging. It is much easier to step thru bytecodes if we
don't use the computed goto approach.
*/
#ifndef ASSERT
#define USELABELS
#endif
#endif
#ifdef USELABELS
#else
#define DEFAULT default
#endif
/*
* PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
* opcode before going back to the top of the while loop, rather then having
* the top of the while loop handle it. This provides a better opportunity
* for instruction scheduling. Some compilers just do this prefetch
* automatically. Some actually end up with worse performance if you
* force the prefetch. Solaris gcc seems to do better, but cc does worse.
*/
#define PREFETCH_OPCCODE
/*
Interpreter safepoint: it is expected that the interpreter will have no live
handles of its own creation live at an interpreter safepoint. Therefore we
run a HandleMarkCleaner and trash all handles allocated in the call chain
since the JavaCalls::call_helper invocation that initiated the chain.
There really shouldn't be any handles remaining to trash but this is cheap
in relation to a safepoint.
*/
#define SAFEPOINT \
if ( SafepointSynchronize::is_synchronizing()) { \
{ \
/* zap freed handles rather than GC'ing them */ \
} \
}
/*
* VM_JAVA_ERROR - Macro for throwing a java exception from
* the interpreter loop. Should really be a CALL_VM but there
* is no entry point to do the transition to vm so we just
* do it by hand here.
*/
DECACHE_STATE(); \
SET_LAST_JAVA_FRAME(); \
{ \
} \
CACHE_STATE();
// Normal throw of a java error
goto handle_exception;
#ifdef PRODUCT
#else
{ \
if (TraceBytecodes) { \
handle_exception); \
} \
}
#endif
#ifdef VM_JVMTI
/* NOTE: (kbr) This macro must be called AFTER the PC has been
incremented. JvmtiExport::at_single_stepping_point() may cause a
breakpoint opcode to get inserted at the current PC to allow the
debugger to coalesce single-step events.
As a result if we call at_single_stepping_point() we refetch opcode
to get the current opcode. This will override any other prefetching
that might have occurred.
*/
#define DEBUGGER_SINGLE_STEP_NOTIFY() \
{ \
if (_jvmti_interp_events) { \
if (JvmtiExport::should_post_single_step()) { \
DECACHE_STATE(); \
SET_LAST_JAVA_FRAME(); \
pc); \
CACHE_STATE(); \
if (THREAD->pop_frame_pending() && \
!THREAD->pop_frame_in_process()) { \
goto handle_Pop_Frame; \
} \
} \
} \
}
#else
#define DEBUGGER_SINGLE_STEP_NOTIFY()
#endif
/*
* CONTINUE - Macro for executing the next opcode.
*/
#ifdef USELABELS
// Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
// initialization (which is is the initialization of the table pointer...)
#define CONTINUE { \
}
#else
#ifdef PREFETCH_OPCCODE
#define CONTINUE { \
continue; \
}
#else
#define CONTINUE { \
continue; \
}
#endif
#endif
/*
* UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
*/
/*
* UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
* and executing the next opcode. It's somewhat similar to the combination
* of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
*/
#ifdef USELABELS
}
}
#else
#ifdef PREFETCH_OPCCODE
goto do_continue; \
}
goto do_continue; \
}
#else
goto do_continue; \
}
goto do_continue; \
}
#endif /* PREFETCH_OPCCODE */
#endif /* USELABELS */
// About to call a new method, update the save the adjusted pc and return to frame manager
DECACHE_TOS(); \
return;
/*
* For those opcodes that need to have a GC point on a backwards branch
*/
// Backedge counting is kind of strange. The asm interpreter will increment
// the backedge counter as a separate counter but it does it's comparisons
// to the sum (scaled) of invocation counter and backedge count to make
// a decision. Seems kind of odd to sum them together like that
if ((skip) <= 0) { \
if (UseLoopCounter) { \
bool do_OSR = UseOnStackReplacement; \
BACKEDGE_COUNT->increment(); \
if (do_OSR) { \
nmethod* osr_nmethod; \
return; \
} \
} \
} /* UseCompiler ... */ \
SAFEPOINT; \
}
/*
* For those opcodes that need to have a GC point on a backwards branch
*/
/*
* Macros for caching and flushing the interpreter state. Some local
* variables need to be flushed out to the frame before we do certain
* things (like pushing frames or becomming gc safe) and some need to
* be recached later (like after popping a frame). We could use one
* macro to cache or decache everything, but this would be less then
* optimal because we don't always need to cache or decache everything
* because some things we know are already cached or decached.
*/
#define CACHE_FRAME()
/*
* CHECK_NULL - Macro for throwing a NullPointerException if the object
* passed is a null ref.
* On some architectures/platforms it should be possible to do this implicitly
*/
} \
/*
* Alignment
*/
// Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
// Reload interpreter state after calling the VM or a possible GC
#define CACHE_STATE() \
CACHE_TOS(); \
CACHE_PC(); \
CACHE_CP(); \
CACHE_LOCALS();
// Call the VM don't check for pending exceptions
DECACHE_STATE(); \
SET_LAST_JAVA_FRAME(); \
func; \
CACHE_STATE(); \
if (THREAD->pop_frame_pending() && \
!THREAD->pop_frame_in_process()) { \
goto handle_Pop_Frame; \
}
// Call the VM and check for pending exceptions
CALL_VM_NOCHECK(func); \
}
/*
* BytecodeInterpreter::run(interpreterState istate)
* BytecodeInterpreter::runWithChecks(interpreterState istate)
*
* The real deal. This is where byte codes actually get interpreted.
* Basically it's a big while loop that iterates until we return from
* the method passed in.
*
* The runWithChecks is used if JVMTI is enabled.
*
*/
#if defined(VM_JVMTI)
void
#else
void
#endif
// In order to simplify some tests based on switches set at runtime
// we invoke the interpreter a single time after switches are enabled
// and set simpler to to test variables rather than method calls or complex
// boolean expressions.
static int initialized = 0;
static int checkit = 0;
os::breakpoint();
}
#ifdef VM_JVMTI
static bool _jvmti_interp_events = 0;
#endif
#ifdef ASSERT
assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
#ifndef SHARK
#endif // !SHARK
}
// Verify linkages.
interpreterState l = istate;
do {
l = l->_prev_link;
} while (l != NULL);
// Screwups with stack management usually cause us to overwrite istate
// save a copy so we can verify it.
#endif
#ifdef LOTS_OF_REGS
#else
#endif
#ifdef USELABELS
};
#endif /* USELABELS */
#ifdef ASSERT
// this will trigger a VERIFY_OOP on entry
}
#endif
// #define HACK
#ifdef HACK
bool interesting = false;
#endif // HACK
/* QQQ this should be a stack method so we don't know actual direction */
"Stack top out of range");
case initialize: {
#ifdef VM_JVMTI
#endif
return;
}
break;
case method_entry: {
// count invocations
if (_compiling) {
if (ProfileInterpreter) {
}
if (INVOCATION_COUNT->reached_InvocationLimit()) {
// We no longer retry on a counter overflow
// istate->set_msg(retry_method);
// THREAD->clr_do_not_unlock();
// return;
}
}
// initialize
os::breakpoint();
}
#ifdef HACK
{
interesting = true;
}
}
#endif // HACK
// lock method if synchronized
if (METHOD->is_synchronized()) {
// oop rcvr = locals[0].j.r;
} else {
rcvr = LOCALS_OBJECT(0);
}
// The initial monitor is ours for the taking
if (UseBiasedLocking) {
if (mark->has_bias_pattern()) {
// The bias pattern is present in the object's header. Need to check
// whether the bias owner and the epoch are both still current.
if (yy != 0 ) {
// At this point we know that the header has the bias pattern and
// that we are not the bias owner in the current epoch. We need to
// figure out more details about the state of the header in order to
// know what operations can be legally performed on the object's
// header.
// If the low three bits in the xor result aren't clear, that means
// the prototype header is no longer biased and we have to revoke
// the bias on this object.
// Biasing is still enabled for this data type. See whether the
// epoch of the current bias is still valid, meaning that the epoch
// bits of the mark word are equal to the epoch bits of the
// prototype header. (Note that the prototype header's epoch bits
// only change at a safepoint.) If not, attempt to rebias the object
// toward the current thread. Note that we must be absolutely sure
// that the current epoch is invalid in order to do this because
// otherwise the manipulations it performs on the mark word are
// illegal.
// The epoch of the current bias is still valid but we know nothing
// about the owner; it might be set or it might be clear. Try to
// acquire the bias of the object using an atomic operation. If this
// fails we will go in to the runtime to revoke the object's bias.
// Note that we first construct the presumed unbiased header so we
// don't accidentally blow away another thread's valid bias.
if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) {
}
} else {
// At this point we know the epoch has expired, meaning that the
// current "bias owner", if any, is actually invalid. Under these
// circumstances _only_, we are allowed to use the current header's
// value as the comparison value when doing the cas to acquire the
// bias in the current epoch. In other words, we allow transfer of
// the bias from one thread to another directly in this situation.
if (Atomic::cmpxchg_ptr((intptr_t)THREAD | (intptr_t) rcvr->klass()->klass_part()->prototype_header(),
}
}
} else {
// The prototype mark in the klass doesn't have the bias bit set any
// more, indicating that objects of this data type are not supposed
// to be biased any more. We are going to try to reset the mark of
// this object to the prototype value and fall through to the
// CAS-based locking scheme. Note that if our CAS fails, it means
// that another thread raced us for the privilege of revoking the
// bias of this particular object, so it's okay to continue in the
// normal locking code.
//
// (*counters->revoked_lock_entry_count_addr())++;
success = false;
}
}
}
} else {
success = false;
}
}
if (!success) {
// Is it simple recursive case?
} else {
}
}
}
}
// Notify jvmti
#ifdef VM_JVMTI
if (_jvmti_interp_events) {
// Whenever JVMTI puts a thread in interp_only_mode, method
if (THREAD->is_interp_only_mode()) {
}
}
#endif /* VM_JVMTI */
goto run;
}
case popping_frame: {
// returned from a java call to pop the frame, restart the call
// clear the message so we don't confuse ourselves later
ShouldNotReachHere(); // we don't return this.
goto run;
}
case method_resume: {
// resume
os::breakpoint();
}
#ifdef HACK
{
interesting = true;
}
}
#endif // HACK
// returned from a java call, continue executing.
goto handle_Pop_Frame;
}
// Update the pc by the saved amount of the invoke bytecode size
goto run;
}
case deopt_resume2: {
// Returned from an opcode that will reexecute. Deopt was
// a result of a PopFrame request.
//
goto run;
}
case deopt_resume: {
// Returned from an opcode that has completed. The stack has
// the result all we need to do is skip across the bytecode
// and continue (assuming there is no exception pending)
//
// compute continuation length
//
// Note: it is possible to deopt at a return_register_finalizer opcode
// because this requires entering the vm to do the registering. While the
// opcode is complete we can't advance because there are no more opcodes
// much like trying to deopt at a poll return. In that has we simply
// get out of here
//
// this will do the right thing even if an exception is pending.
goto handle_return;
}
goto run;
}
case got_monitors: {
// continue locking now that we have a monitor to use
// we expect to find newly allocated monitor at the "top" of the monitor stack.
// derefing's lockee ought to provoke implicit null check
// find a free monitor
// Is it simple recursive case?
} else {
}
}
goto run;
}
default: {
fatal("Unexpected message from frame manager");
}
}
run:
#ifdef PREFETCH_OPCCODE
#endif
#ifndef USELABELS
while (1)
#endif
{
#ifndef PREFETCH_OPCCODE
#endif
// Seems like this happens twice per opcode. At worst this is only
// need at entry to the loop.
// DEBUGGER_SINGLE_STEP_NOTIFY();
/* Using this labels avoids double breakpoints when quickening and
* when returing from transition frames.
*/
/* QQQ Hmm this has knowledge of direction, ought to be a stack method */
#ifdef USELABELS
#else
switch (opcode)
#endif
{
/* Push miscellaneous constants onto the stack. */
SET_STACK_OBJECT(NULL, 0);
{ \
}
/* Load constant from constant pool: */
/* Push a 1-byte signed integer value onto the stack. */
/* Push a 2-byte signed integer constant onto the stack. */
/* load from local variable */
\
\
OPC_LOAD_n(0);
OPC_LOAD_n(1);
OPC_LOAD_n(2);
OPC_LOAD_n(3);
/* store to a local variable */
switch(opcode) {
// Be nice to see what this generates.... QQQ
}
default:
}
}
OPC_STORE_n(0);
OPC_STORE_n(1);
OPC_STORE_n(2);
OPC_STORE_n(3);
OPC_DSTORE_n(0);
OPC_DSTORE_n(1);
OPC_DSTORE_n(2);
OPC_DSTORE_n(3);
/* stack pop, dup, and insert opcodes */
}
/* Perform various binary integer operations */
"/ by zero"); \
} \
STACK_INT(-1)), \
-2); \
{ \
if (test) { \
"/ by long zero"); \
} \
} \
/* First long at (-1,-2) next long at (-3,-4) */ \
STACK_LONG(-1)), \
-3); \
}
/* Perform various binary floating number operations */
STACK_DOUBLE(-1)), \
-3); \
} \
STACK_FLOAT(-1)), \
-2); \
/* Shift operations
* Shift left int and long: ishl, lshl
*/
STACK_INT(-1)), \
-2); \
{ \
STACK_INT(-1)), \
-2); \
}
/* Increment local variable by constant */
{
// locals[pc[1]].j.i += (jbyte)(pc[2]);
}
/* negate the value on the top of the stack */
{
}
{
}
/* Conversion operations */
{
// this is ugly QQQ
SET_STACK_LONG(r, 1);
}
{
// this is ugly QQQ (why cast to jlong?? )
SET_STACK_DOUBLE(r, 1);
}
{
SET_STACK_INT(r, 0);
}
{
SET_STACK_FLOAT(VMlong2Float(r), 0);
}
{
}
{
SET_STACK_LONG(r, 1);
}
{
jfloat f;
jdouble r;
f = STACK_FLOAT(-1);
r = (jdouble) f;
SET_STACK_DOUBLE(r, 1);
}
{
MORE_STACK(-2);
SET_STACK_INT(r1, 0);
}
{
MORE_STACK(-2);
SET_STACK_FLOAT(r1, 0);
}
{
MORE_STACK(-2);
}
/* comparison operators */
CONTINUE; \
} \
CONTINUE; \
}
CONTINUE; \
}
CONTINUE; \
}
CONTINUE; \
}
COMPARISON_OP(lt, <);
COMPARISON_OP(gt, >);
COMPARISON_OP(le, <=);
COMPARISON_OP(ge, >=);
/* Goto pc at specified offset in switch table. */
// Does this really need a full backedge check (osr?)
}
/* Goto pc whose table entry matches specified key */
while (--npairs >= 0) {
lpc += 2;
break;
}
}
}
{
STACK_FLOAT(-1),
-2);
}
{
STACK_DOUBLE(-1),
SET_STACK_INT(r, 0);
}
{
MORE_STACK(-4);
SET_STACK_INT(r, 0);
}
/* Return from a method */
{
// Allow a safepoint before returning to frame manager.
goto handle_return;
}
{
// Allow a safepoint before returning to frame manager.
goto handle_return;
}
}
goto handle_return;
}
// Allow a safepoint before returning to frame manager.
goto handle_return;
}
/* Array access byte-codes */
/* Every array access byte-code starts out like this */
// arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
char message[jintAsStringSize]; \
CHECK_NULL(arrObj); \
message); \
}
/* 32-bit loads. These handle conversion from < 32-bit types */
{ \
ARRAY_INTRO(-2); \
extra; \
-2); \
}
/* 64-bit loads */
{ \
ARRAY_INTRO(-2); \
extra; \
UPDATE_PC_AND_CONTINUE(1); \
}
/* 32-bit stores. These handle conversion to < 32-bit types */
{ \
ARRAY_INTRO(-3); \
extra; \
}
/* 64-bit stores */
{ \
ARRAY_INTRO(-4); \
extra; \
}
/*
* This one looks different because of the assignability check
*/
ARRAY_INTRO( -3);
// arrObj, index are set
/* Check assignability of rhsObject into arrObj */
klassOop elemKlassOop = ((objArrayKlass*) arrObj->klass()->klass_part())->element_klass(); // superklass EAX
//
// Check for compatibilty. This check must not GC!!
// Seems way more expensive now that we must dispatch
//
if (rhsKlassOop != elemKlassOop && !rhsKlassOop->klass_part()->is_subtype_of(elemKlassOop)) { // ebx->is...
}
}
// *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject;
// Mark the card
OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0);
}
{
}
// derefing's lockee ought to provoke implicit null check
// find a free monitor or one already allocated for this object
// if we find a matching object then we need a new monitor
// since this is recursive enter
while (most_recent != limit ) {
most_recent++;
}
// Is it simple recursive case?
} else {
}
}
} else {
UPDATE_PC_AND_RETURN(0); // Re-execute
}
}
// derefing's lockee ought to provoke implicit null check
// find our monitor slot
while (most_recent != limit ) {
// If it isn't recursive we either must swap old header or call the runtime
// restore object for the slow case
}
}
}
most_recent++;
}
// Need to throw illegal monitor state exception
}
/* All of the non-quick opcodes. */
/* -Set clobbersCpIndex true if the quickened opcode clobbers the
* constant pool index in the instruction.
*/
{
// QQQ Need to make this as inlined as possible. Probably need to
// split all the bytecode cases out so c++ compiler has a chance
// for constant prop to fold everything possible away.
}
#ifdef VM_JVMTI
if (_jvmti_interp_events) {
int *count_addr;
// Check to see if a field modification watch has been set
// before we take the time to call into the VM.
if ( *count_addr > 0 ) {
} else {
}
obj,
cache),
}
}
#endif /* VM_JVMTI */
} else {
}
//
// Now store the result on the stack
//
if (cache->is_volatile()) {
MORE_STACK(1);
} else {
MORE_STACK(1);
}
} else {
MORE_STACK(1);
} else {
MORE_STACK(1);
}
}
}
{
}
#ifdef VM_JVMTI
if (_jvmti_interp_events) {
int *count_addr;
// Check to see if a field modification watch has been set
// before we take the time to call into the VM.
if ( *count_addr > 0 ) {
}
else {
} else {
}
}
obj,
}
}
#endif /* VM_JVMTI */
// QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
// out so c++ compiler has a chance for constant prop to fold everything possible away.
int count;
count = -1;
--count;
}
} else {
--count;
}
//
// Now store the result
//
if (cache->is_volatile()) {
} else {
}
OrderAccess::storeload();
} else {
} else {
}
}
}
// Make sure klass is initialized and doesn't have a finalizer
// If the TLAB isn't pre-zeroed then we'll have to do it
if (UseTLAB) {
}
need_zero = true;
// Try allocate in shared eden
goto retry;
}
}
}
// Initialize object (if nonzero size and need) and then the header
if (need_zero ) {
if (obj_size > 0 ) {
}
}
if (UseBiasedLocking) {
} else {
}
result->set_klass_gap(0);
SET_STACK_OBJECT(result, 0);
}
}
}
// Slow case allocation
}
}
// stack grows down, dimensions are up!
//adjust pointer to start of stack element
}
if (ProfileInterpreter) {
// needs Profile_checkcast QQQ
}
// Constant pool may have actual klass or unresolved klass. If it is
// unresolved we must resolve it
}
//
// Check for compatibilty. This check must not GC!!
// Seems way more expensive now that we must dispatch
//
if (objKlassOop != klassOf &&
}
} else {
if (UncommonNullCast) {
// istate->method()->set_null_cast_seen();
// [RGV] Not sure what to do here!
}
}
SET_STACK_INT(0, -1);
} else {
// Constant pool may have actual klass or unresolved klass. If it is
// unresolved we must resolve it
}
//
// Check for compatibilty. This check must not GC!!
// Seems way more expensive now that we must dispatch
//
} else {
SET_STACK_INT(0, -1);
}
}
{
bool wide = false;
} else {
incr = 3;
wide = true;
}
case JVM_CONSTANT_Integer:
break;
case JVM_CONSTANT_Float:
break;
case JVM_CONSTANT_String:
break;
case JVM_CONSTANT_Class:
break;
break;
default: ShouldNotReachHere();
}
}
{
case JVM_CONSTANT_Long:
break;
case JVM_CONSTANT_Double:
break;
default: ShouldNotReachHere();
}
}
CASE(_fast_aldc): {
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
}
int incr;
incr = 2;
} else {
incr = 3;
}
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
// GC might move cache while returning from VM call.
}
SET_STACK_OBJECT(result, 0);
}
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
}
// We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
if (cache->is_f1_null()) {
// GC might move cache while returning from VM call.
}
if (cache->has_appendix()) {
MORE_STACK(1);
}
UPDATE_PC_AND_RETURN(0); // I'll be back...
}
if (!EnableInvokeDynamic) {
}
// GC might move cache while returning from VM call.
}
if (cache->has_appendix()) {
MORE_STACK(1);
}
UPDATE_PC_AND_RETURN(0); // I'll be back...
}
// QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
// out so c++ compiler has a chance for constant prop to fold everything possible away.
}
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCacheOop.cpp for details.
// This code isn't produced by javac, but could be produced by
// another compliant java compiler.
if (cache->is_forced_virtual()) {
} else {
// get receiver
// Same comments as invokevirtual apply here
}
#ifdef VM_JVMTI
}
#endif /* VM_JVMTI */
UPDATE_PC_AND_RETURN(0); // I'll be back...
}
// this could definitely be cleaned up QQQ
// instanceKlass* interface = (instanceKlass*) iclass->klass_part();
// get receiver
int i;
}
// If the interface isn't found, this class doesn't implement this
// interface. The link resolver checks this but only for the first
// time this interface is called.
if (i == int2->itable_length()) {
}
}
#ifdef VM_JVMTI
}
#endif /* VM_JVMTI */
UPDATE_PC_AND_RETURN(0); // I'll be back...
}
CASE(_invokestatic): {
// QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
// out so c++ compiler has a chance for constant prop to fold everything possible away.
}
{
else {
// get receiver
// this works but needs a resourcemark and seems to create a vtable on every call:
// methodOop callee = rcvr->klass()->klass_part()->vtable()->method_at(cache->f2_as_index());
//
// this fails with an assert
// instanceKlass* rcvrKlass = instanceKlass::cast(STACK_OBJECT(-parms)->klass());
// but this works
/*
Executing this code in java.lang.String:
public String(char value[]) {
this.count = value.length;
this.value = (char[])value.clone();
}
a find on rcvr->klass()->klass_part() reports:
{type array char}{type array class}
- klass: {other class}
but using instanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
because rcvr->klass()->klass_part()->oop_is_instance() == 0
However it seems to have a vtable in the right location. Huh?
*/
}
} else {
}
}
#ifdef VM_JVMTI
}
#endif /* VM_JVMTI */
UPDATE_PC_AND_RETURN(0); // I'll be back...
}
}
/* Allocate memory for a new java object. */
}
/* Throw an exception. */
// set pending_exception so we use common code
goto handle_exception;
}
/* goto and jsr. They are exactly the same except jsr pushes
* the address of the next instruction first.
*/
/* push bytecode index on stack */
MORE_STACK(1);
/* FALL THROUGH */
}
{
}
/* push return address on the stack */
MORE_STACK(1);
/* FALL THROUGH */
}
{
}
/* return from a jsr or jsr_w */
}
/* debugger breakpoint */
CACHE_STATE();
goto opcode_switch;
}
goto finish;
} /* switch(opc) */
#ifdef USELABELS
#endif
{
if (!THREAD->has_pending_exception()) {
}
/* We will be gcsafe soon, so flush our state. */
DECACHE_PC();
goto handle_exception;
}
do_continue: ;
} /* while (1) interpreter loop */
// An exception exists in the thread state see whether this activation can handle it
// Prevent any subsequent HandleMarkCleaner in the VM
// from freeing the except_oop handle.
// expression stack is emptied
CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
if (continuation_bci >= 0) {
// Place exception on top of stack
SET_STACK_OBJECT(except_oop(), 0);
MORE_STACK(1);
if (TraceExceptions) {
tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
}
// for AbortVMOnException flag
goto run;
}
if (TraceExceptions) {
tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
THREAD);
}
// for AbortVMOnException flag
// No handler in this activation, unwind and try again
goto handle_return;
} /* handle_exception: */
// Return from an interpreter invocation with the result of the interpretation
// on the top of the Java Stack (or a pending exception)
// We don't really do anything special here except we must be aware
// that we can get here without ever locking the method (if sync).
// Also we skip the notification of the exit.
// Clear pending so while the pop is in process
// we don't start another one if a call_vm is done.
// Let interpreter (only) see the we're in the process of popping a frame
{
// We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
// in any following VM entries from freeing our live handles, but illegal_state_oop
// isn't really allocated yet and so doesn't become live until later and
// in unpredicatable places. Instead we must protect the places where we enter the
// VM. It would be much simpler (and safer) if we could allocate a real handle with
// a NULL oop in it and then overwrite the oop later as needed. This isn't
// unfortunately isn't possible.
//
// As far as we are concerned we have returned. If we have a pending exception
// that will be returned as this invocation's result. However if we get any
// exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
// will be our final result (i.e. monitor exception trumps a pending exception).
//
// If we never locked the method (or really passed the point where we would have),
// there is no need to unlock it (or look for other monitors), since that
// could not have happened.
if (THREAD->do_not_unlock()) {
// Never locked, reset the flag now because obviously any caller must
// have passed their point of locking for us to have gotten here.
} else {
// At this point we consider that we have returned. We now check that the
// locks were properly block structured. If we find that they were not
// used properly we will return with an illegal monitor exception.
// The exception is checked by the caller not the callee since this
// checking is considered to be part of the invocation and therefore
// in the callers scope (JVM spec 8.13).
//
// Another weird thing to watch for is if the method was locked
// recursively and then not exited properly. This means we must
// examine all the entries in reverse time(and stack) order and
// unlock as we find them. If we find the method monitor before
// we are at the initial entry then we should throw an exception.
// It is not clear the template based interpreter does this
// correctly
// We know the initial monitor was used for the method don't check that
// slot in the loop
if (method_unlock_needed) base--;
// Check all the monitors to see they are unlocked. Install exception if found to be locked.
// If it isn't recursive we either must swap old header or call the runtime
// restore object for the slow case
{
// Prevent any HandleMarkCleaner from freeing our live handles
}
}
}
// One error is plenty
{
// Prevent any HandleMarkCleaner from freeing our live handles
}
}
}
end++;
}
// Unlock the method if needed
if (method_unlock_needed) {
// The method is already unlocked this is not good.
{
// Prevent any HandleMarkCleaner from freeing our live handles
}
}
} else {
//
// The initial monitor is always used for the method
// However if that slot is no longer the oop for the method it was unlocked
// and reused by something that wasn't unlocked!
//
// deopt can come in with rcvr dead because c2 knows
// its value is preserved in the monitor. So we can't use locals[0] at all
// and must use first monitor slot.
//
if (!suppress_error) {
}
} else {
// If it isn't recursive we either must swap old header or call the runtime
// restore object for the slow case
{
// Prevent any HandleMarkCleaner from freeing our live handles
}
if (THREAD->has_pending_exception()) {
}
}
}
}
}
}
}
//
//
// NOTE: we do not notify a method_exit if we have a pending exception,
// including an exception we generate for unlocking checks. In the former
// case, JVMDI has already been notified by our call for the exception handler
// and in both cases as far as JVMDI is concerned we have already returned.
// If we notify it again JVMDI will be all confused about how many frames
// are still on the stack (4340444).
//
// NOTE Further! It turns out the the JVMTI spec in fact expects to see
// method_exit events whenever we leave an activation unless it was done
// for popframe. This is nothing like jvmdi. However we are passing the
// tests at the moment (apparently because they are jvmdi based) so rather
// than change this code and possibly fail tests we will leave it alone
// (with this note) in anticipation of changing the vm and the tests
// simultaneously.
//
#ifdef VM_JVMTI
if (_jvmti_interp_events) {
// Whenever JVMTI puts a thread in interp_only_mode, method
{
// Prevent any HandleMarkCleaner from freeing our live handles
}
}
}
#endif /* VM_JVMTI */
//
// See if we are returning any exception
// A pending exception that was pending prior to a possible popping frame
// overrides the popping frame.
//
assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed");
// inform the frame manager we have no result
if (illegal_state_oop() != NULL)
else
}
// Make it simpler on the assembly code and set the message for the frame pop.
// returns
// We must be returning to a deoptimized frame (because popframe only happens between
// two interpreted frames). We need to save the current arguments in C heap so that
// the deoptimized frame when it restarts can copy the arguments to its expression
// stack and re-execute the call. We also have to notify deoptimization that this
// has occurred and to pick the preserved args copy them to the deoptimized frame's
// java expression stack. Yuck.
//
}
}
// Normal return
// Advance the pc and return to frame manager
} /* handle_return: */
// This is really a fatal error return
DECACHE_TOS();
DECACHE_PC();
return;
}
/*
* All the code following this point is only produced once and is not present
* in the JVMTI version of the interpreter
*/
#ifndef VM_JVMTI
// This constructor should only be used to contruct the object to signal
// interpreter initialization. All other instances should be created by
// the frame manager.
_self_link = this;
_prev_link = NULL;
}
// Inline static functions for Java Stack and Local manipulation
// The implementations are platform dependent. We have to worry about alignment
// issues on some machines which can change on the same platform depending on
// whether it is an LP64 machine also.
}
}
}
}
}
}
// only used for value types
int offset) {
}
int offset) {
}
int offset) {
}
int offset) {
}
// needs to be platform dep for the 32 bit platforms.
int offset) {
}
((VMJavaVal64*)addr)->d);
}
int offset) {
}
((VMJavaVal64*)addr)->l;
}
// Locals
}
}
}
}
}
}
// Returns the address of locals value.
}
}
// Used for local value or returnAddress
}
}
}
}
}
}
}
}
}
int to_offset) {
}
}
}
/* insert top word two down */
}
/* insert top word three down */
}
/* insert top 2 slots three down */
}
/* insert top 2 slots four down */
}
// swap top two elements
// Copy -2 entry to -1
// Store saved -1 entry into -2
}
// --------------------------------------------------------------------------------
// Non-product code
#ifndef PRODUCT
switch (msg) {
// status message to C++ interpreter
// requests to frame manager from C++ interpreter
// deopt
default: return("BAD MSG");
}
}
void
{
}
tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
#ifdef SPARC
#endif
#endif // IA64 && !ZERO
}
extern "C" {
}
}
#endif // PRODUCT
#endif // JVMTI
#endif // CC_INTERP