/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "memory/universe.inline.hpp"
#include "oops/methodDataOop.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#ifndef CC_INTERP
// Misc helpers
// Do an oop store like *(base + index + offset) = val
// index can be noreg,
int offset,
bool precise) {
switch (barrier) {
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
// Load and record the previous value.
noreg /* pre_val */,
tmp, true /*preserve_o_regs*/);
// G1 barrier needs uncompressed oop for region cross check.
}
} else {
}
// No need for post barrier if storing NULL
if (precise) {
} else {
}
}
}
}
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
{
} else {
}
// No need for post barrier if storing NULL
if (precise) {
} else {
}
}
}
}
break;
case BarrierSet::ModRef:
case BarrierSet::Other:
break;
default :
}
}
//----------------------------------------------------------------------------------------------------
// Platform-dependent initialization
// (none)
}
//----------------------------------------------------------------------------------------------------
// Condition conversion
switch (cc) {
}
}
//----------------------------------------------------------------------------------------------------
// Miscelaneous helper routines
}
int byte_no) {
// With sharing on, may need to test methodOop flag.
if (!RewriteBytecodes) return;
switch (bc) {
case Bytecodes::_fast_aputfield:
case Bytecodes::_fast_bputfield:
case Bytecodes::_fast_cputfield:
case Bytecodes::_fast_dputfield:
case Bytecodes::_fast_fputfield:
case Bytecodes::_fast_iputfield:
case Bytecodes::_fast_lputfield:
case Bytecodes::_fast_sputfield:
{
// We skip bytecode quickening for putfield instructions when
// the put_code written to the constant pool cache is zero.
// This is required so that every execution of this instruction
// calls out to InterpreterRuntime::resolve_get_put to do
// additional, required work.
}
break;
default:
if (load_bc_into_bc_reg) {
}
}
if (JvmtiExport::can_post_breakpoint()) {
__ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
// perform the quickening, slowly, in the bowels of the breakpoint table
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
}
#ifdef ASSERT
#endif
// patch bytecode
}
//----------------------------------------------------------------------------------------------------
// Individual instructions
// nothing to do
}
}
}
}
#ifdef _LP64
#else
#endif
}
float* p;
switch( value ) {
default: ShouldNotReachHere();
case 0: p = &zero; break;
case 1: p = &one; break;
case 2: p = &two; break;
}
AddressLiteral a(p);
}
double* p;
switch( value ) {
default: ShouldNotReachHere();
case 0: p = &zero; break;
case 1: p = &one; break;
}
AddressLiteral a(p);
}
// %%%%% Should factore most snippet templates across platforms
}
}
if (wide) {
} else {
}
// get type from tags
// unresolved string? If so, must resolve
// unresolved class? If so, must resolve
// unresolved class in error state
__ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
// __ add(O0, base_offset, O0);
// __ cmp(O2, JVM_CONSTANT_String);
// __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
}
// Fast path for caching oop constants.
// %%% We should use this to handle Class and String constants also.
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
// the call_VM checks for exception, so we should never return here.
return;
}
// Load the exception from the system-array which wraps it:
}
// get type from tags
// A double can be placed at word-aligned locations in the constant pool.
// Check out Conversions.java for an example.
// Also constantPoolOopDesc::header_size() is 20, which makes it very difficult
// to double-align double on the constant pool. SG, 11/7/97
#ifdef _LP64
#else
FloatRegister f = Ftos_d;
f->successor());
#endif
#ifdef _LP64
#else
#endif
}
}
// offset is 2, not 1, because Lbcp points to wide prefix code
}
// Rewrite iload,iload pair into fast_iload2
// iload,caload pair into fast_icaload
if (RewriteFrequentPairs) {
// get next byte
// if _iload, wait to rewrite to iload2. We only want to rewrite the
// last two iloads in a pair. Comparing against fast_iload means that
// the next bytecode is neither an iload or a caload, and therefore
// an iload pair.
// rewrite
// G4_scratch: fast bytecode
}
// Get the local value into tos
}
}
}
}
}
}
}
}
}
}
}
}
// Otos_i: index
// tos: array
}
// Otos_i: index
// O2: array
}
// Otos_i: index
// O2: array
}
// Otos_i: index
// O2: array
}
// Otos_i: index
// tos: array
}
// Otos_i: index
// tos: array
}
// Otos_i: index
// tos: array
}
// Otos_i: index
// tos: array
}
// Otos_i: index
// tos: array
}
}
}
}
void TemplateTable::dload(int n) {
}
void TemplateTable::aload(int n) {
}
void TemplateTable::aload_0() {
// According to bytecode histograms, the pairs:
//
// _aload_0, _fast_igetfield (itos)
// _aload_0, _fast_agetfield (atos)
// _aload_0, _fast_fgetfield (ftos)
//
// occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
// bytecode checks the next bytecode and then rewrites the current
// bytecode into a pair bytecode; otherwise it rewrites the current
// bytecode into _fast_aload_0 that doesn't do the pair check anymore.
//
if (RewriteFrequentPairs) {
// get next byte
// do actual aload_0
aload(0);
// if _getfield then wait with rewrite
// if _igetfield then rewrite to _fast_iaccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
// if _agetfield then rewrite to _fast_aaccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
// if _fgetfield then rewrite to _fast_faccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
// else rewrite to _fast_aload0
assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
// rewrite
// G4_scratch: fast bytecode
} else {
aload(0);
}
}
void TemplateTable::istore() {
}
void TemplateTable::lstore() {
}
void TemplateTable::fstore() {
}
void TemplateTable::dstore() {
}
void TemplateTable::astore() {
}
void TemplateTable::wide_istore() {
}
void TemplateTable::wide_lstore() {
}
void TemplateTable::wide_fstore() {
}
void TemplateTable::wide_dstore() {
}
void TemplateTable::wide_astore() {
}
void TemplateTable::iastore() {
// Otos_i: val
// O3: array
}
void TemplateTable::lastore() {
// Otos_l: val
// O3: array
}
void TemplateTable::fastore() {
// Ftos_f: val
// O3: array
}
void TemplateTable::dastore() {
// Fos_d: val
// O3: array
}
void TemplateTable::aastore() {
// Otos_i: val
// O2: index
// O3: array
// do array store check - check for NULL value first
// do fast instanceof cache test
// Otos_i: value
// O1: addr - offset
// O2: index
// O3: array
// O4: array element klass
// O5: value klass
// Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Generate a fast subtype check. Branch to store_ok if no
// failure. Throw if failure.
// Not a subtype; so must throw exception
// Store is OK.
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
}
void TemplateTable::bastore() {
// Otos_i: val
// O3: array
}
void TemplateTable::castore() {
// Otos_i: val
// O3: array
}
void TemplateTable::sastore() {
// %%%%% Factor across platform
castore();
}
void TemplateTable::istore(int n) {
}
void TemplateTable::lstore(int n) {
}
void TemplateTable::fstore(int n) {
}
void TemplateTable::dstore(int n) {
}
void TemplateTable::astore(int n) {
}
void TemplateTable::pop() {
}
void TemplateTable::pop2() {
}
void TemplateTable::dup() {
// stack: ..., a
// load a and tag
// stack: ..., a, a
}
void TemplateTable::dup_x1() {
// stack: ..., a, b
// stack: ..., b, a, b
}
void TemplateTable::dup_x2() {
// stack: ..., a, b, c
// get c and push on stack, reuse registers
// stack: ..., a, b, c, c (c in reg) (Lesp - 4)
// (stack offsets n+1 now)
// stack: ..., c, b, c, c (a in reg)
// stack: ..., c, a, c, c (b in reg)
// stack: ..., c, a, b, c
}
void TemplateTable::dup2() {
// stack: ..., a, b, a, b
}
void TemplateTable::dup2_x1() {
// stack: ..., a, b, c
// stack: ..., b, b, c
// stack: ..., b, c, c
// stack: ..., b, c, a
// stack: ..., b, c, a, b, c
}
// The spec says that these types can be a mixture of category 1 (1 word)
void TemplateTable::dup2_x2() {
// stack: ..., a, b, c, d
// stack: ..., c, b, a, d
// stack: ..., c, d, a, b
// stack: ..., c, d, a, b, c, d
}
void TemplateTable::swap() {
// stack: ..., a, b
// stack: ..., b, a
}
switch (op) {
// %%%%% Mul may not exist: better to call .mul?
default: ShouldNotReachHere();
}
}
switch (op) {
#ifdef _LP64
#else
#endif
default: ShouldNotReachHere();
}
}
void TemplateTable::idiv() {
// %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
// Y contains upper 32 bits of result, set it to 0 or all ones
__ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
const int min_int = 0x80000000;
#ifdef _LP64
// Don't put set in delay slot
// Set will turn into multiple instructions in 64 bit mode
#else
#endif
}
void TemplateTable::irem() {
idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
}
void TemplateTable::lmul() {
#ifdef _LP64
#else
#endif
}
void TemplateTable::ldiv() {
// check for zero
#ifdef _LP64
__ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
#else
__ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
#endif
}
void TemplateTable::lrem() {
// check for zero
#ifdef _LP64
__ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
#else
#endif
}
void TemplateTable::lshl() {
#ifdef _LP64
#else
#endif
}
void TemplateTable::lshr() {
#ifdef _LP64
#else
#endif
}
void TemplateTable::lushr() {
#ifdef _LP64
#else
#endif
}
switch (op) {
case rem:
#ifdef _LP64
// LP64 calling conventions use F1, F3 for passing 2 floats
#else
#endif
break;
default: ShouldNotReachHere();
}
}
switch (op) {
case rem:
#ifdef _LP64
// Pass arguments in D0, D2
#else
// Pass arguments in O0O1, O2O3
#endif
break;
default: ShouldNotReachHere();
}
}
void TemplateTable::ineg() {
}
void TemplateTable::lneg() {
#ifdef _LP64
#else
#endif
}
void TemplateTable::fneg() {
}
void TemplateTable::dneg() {
// v8 has fnegd if source and dest are the same
}
void TemplateTable::iinc() {
}
void TemplateTable::wide_iinc() {
}
void TemplateTable::convert() {
// %%%%% Factor this first part accross platforms
#ifdef ASSERT
switch (bytecode()) {
default : ShouldNotReachHere();
}
switch (bytecode()) {
default : ShouldNotReachHere();
}
#endif
// Conversion
switch (bytecode()) {
#ifdef _LP64
// Sign extend the 32 bits
#else
#endif
break;
break;
break;
break;
break;
break;
#ifndef _LP64
#else
// Sign-extend into the high 32 bits
#endif
break;
if (VM_Version::v9_instructions_work()) {
} else {
}
} else {
);
}
break;
// result must be 0 if value is NaN; test by comparing value to itself
// According to the v8 manual, you have to have a non-fp instruction
// between fcmp and fb.
if (!VM_Version::v9_instructions_work()) {
}
}
break;
// must uncache tos
#ifdef _LP64
#else
#endif
break;
break;
// must uncache tos
#ifdef _LP64
// LP64 calling conventions pass first double arg in D0
#else
#endif
break;
if (VM_Version::v9_instructions_work()) {
}
else {
// must uncache tos
}
break;
default: ShouldNotReachHere();
}
}
void TemplateTable::lcmp() {
#ifdef _LP64
#else
#endif
}
}
// Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
__ verify_thread();
// get (wide) offset to O1_disp
if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
// Handle all the JSR stuff here, then exit.
// It's much shorter and cleaner than intermingling with the
// non-JSR normal-branch stuff occurring below.
if( is_jsr ) {
// compute return address as bci in Otos_i
// Bump Lbcp to target of JSR
// Push returnAddress for "ret" on stack
// And away we go!
return;
}
// Normal (non-jsr) branch handling
// Save the current Lbcp
// check branch direction
// Bump bytecode pointer by displacement (take the branch)
if (TieredCompilation) {
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
// Increment backedge counter in the MDO
}
// If there's no MDO, increment counter in methodOop
// notify point for loop, pass branch bytecode
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
// Was an OSR adapter generated?
// O0 = osr nmethod
// Has the nmethod been invalidated already?
// migrate the interpreter frame off of the stack
// save nmethod
// move OSR nmethod to I1
// OSR buffer to I0
// remove the interpreter frame
// Jump to the osr code.
} else {
// Update Backedge branch separately from invocations
if (ProfileInterpreter) {
if (UseOnStackReplacement) {
}
} else {
if (UseOnStackReplacement) {
}
}
}
} else
// Bump bytecode pointer by displacement (take the branch)
// continue with bytecode @ target
// %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
// %%%%% and changing dispatch_next to dispatch_only
}
// Note Condition in argument is TemplateTable::Condition
// arg scope is within class scope
// no pointers, integer only!
// assume branch is more often taken than not (loops use backward branches)
}
}
}
}
void TemplateTable::ret() {
// Otos_i contains the bci, compute the bcp from that
#ifdef _LP64
#ifdef ASSERT
// jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
// the result. The return address (really a BCI) was stored with an
// 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
// the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
// loaded value.
}
#endif
#endif
}
void TemplateTable::wide_ret() {
// Otos_i contains the bci, compute the bcp from that
}
void TemplateTable::tableswitch() {
// align bcp
// load lo, hi
#ifdef _LP64
// Sign extend the 32 bits
#endif /* _LP64 */
// check against lo & hi
// lookup dispatch offset
// handle default
// continue execution
}
void TemplateTable::lookupswitch() {
}
void TemplateTable::fast_linearswitch() {
// align bcp
// set counter
// table search
// default case
if (ProfileInterpreter) {
}
// entry found -> get offset
if (ProfileInterpreter) {
}
}
void TemplateTable::fast_binaryswitch() {
// Implementation using the following core algorithm: (copied from Intel)
//
// int binary_search(int key, LookupswitchPair* array, int n) {
// // Binary search according to "Methodik des Programmierens" by
// // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
// int i = 0;
// int j = n;
// while (i+1 < j) {
// // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
// // with Q: for all i: 0 <= i < n: key < a[i]
// // where a stands for the array and assuming that the (inexisting)
// // element a[n] is infinitely big.
// int h = (i + j) >> 1;
// // i < h < j
// if (key < array[h].fast_match()) {
// j = h;
// } else {
// i = h;
// }
// }
// // R: a[i] <= key < a[i+1] or Q
// // (i.e., if key is within array, i is the correct index)
// return i;
// }
// register allocation
const int log_entry_size = 3;
// Find Array start
// initialize i & j (in delay slot)
// and start
// (Rj is already in the native byte-ordering.)
// binary search loop
// int h = (i + j) >> 1;
// if (key < array[h].fast_match()) {
// j = h;
// } else {
// i = h;
// }
// (Rscratch is already in the native byte-ordering.)
if ( VM_Version::v9_instructions_work() ) {
__ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
__ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
}
else {
}
// while (i+1 < j)
}
// end of binary search, result index is i (must check again!)
if (ProfileInterpreter) {
}
// (Rscratch is already in the native byte-ordering.)
// entry found -> j = offset
// (Rj is already in the native byte-ordering.)
if (ProfileInterpreter) {
}
}
// Call out to do finalizer registration
}
// The caller's SP was adjusted upon method entry to accomodate
// the callee's non-argument locals. Undo that adjustment.
}
// ----------------------------------------------------------------------------
// Volatile variables demand their effects be made known to all CPU's in
// order. Store buffers on most chips allow reads & writes to reorder; the
// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
// memory barrier (i.e., it's not sufficient that the interpreter does not
// reorder volatile references, the hardware also must not reorder them).
//
// According to the new Java Memory Model (JMM):
// (1) All volatiles are serialized wrt to each other.
// ALSO reads & writes act as aquire & release, so:
// (2) A read cannot let unrelated NON-volatile memory refs that happen after
// the read float up to before the read. It's OK for non-volatile memory refs
// that happen before the volatile read to float down below it.
// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
// that happen BEFORE the write float down to after the write. It's OK for
// non-volatile memory refs that happen after the volatile write to float up
// before it.
//
// We only put in barriers around volatile refs (they are expensive), not
// _between_ memory refs (that would require us to track the flavor of the
// previous memory refs). Requirements (2) and (3) require some barriers
// before volatile stores and after volatile loads. These nearly cover
// requirement (1) but miss the volatile-store-volatile-load case. This final
// case is placed after volatile-stores although it could just as well go
// before volatile-loads.
// Helper function to insert a is-volatile test and memory barrier
// All current sparc implementations run in TSO, needing only StoreLoad
}
// ----------------------------------------------------------------------------
size_t index_size) {
// Depends on cpCacheOop layout!
// We are resolved if the f1 field contains a non-null object (CallSite, MethodType, etc.)
// This kind of CP cache entry does not need to match bytecode_1 or bytecode_2, because
// there is a 1-1 relation between bytecode type and CP entry type.
// The caller will also load a methodOop from f2.
} else {
}
switch (bytecode()) {
case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
default:
break;
}
// first time invocation - must resolve first
// Update registers with resolved info
}
bool is_invokevirtual,
bool is_invokevfinal,
bool is_invokedynamic) {
// Uses both G3_scratch and G4_scratch
// determine constant pool cache field offsets
const int method_offset = in_bytes(
)
);
// access constant pool cache fields
if (is_invokevfinal) {
// Resolved f1_oop (CallSite, MethodType, etc.) goes into 'itable_index'.
// Resolved f2_oop (methodOop invoker) will go into 'method' (at index_offset).
// See ConstantPoolCacheEntry::set_dynamic_call and set_method_handle.
} else {
}
if (itable_index != noreg) {
// pick up itable index from f2 also:
}
}
// The Rcache register must be set before call
bool is_static) {
if (is_static) {
}
}
// The registers Rcache and index expected to be set before call.
// Correct values of the Rcache and index registers are preserved.
bool is_static,
bool has_tos) {
if (JvmtiExport::can_post_field_access()) {
// Check to see if a field access watch has been set before we take
// the time to call into the VM.
if (is_static) {
} else {
if (has_tos) {
// save object pointer before call_VM() clobbers it
} else {
// Load top of stack (do not pop the value off the stack);
}
}
// Otos_i: object pointer or NULL if static
// Rcache: cache entry pointer
}
}
}
if (!is_static) {
} else {
}
// Get volatile flag
}
// compute field type
// Make sure we don't need to mask Rflags after the above shift
// Check atos before itos for getstatic, more likely (in Queens at least)
// atos
if (!is_static) {
}
// cmp(Rflags, itos);
// itos
if (!is_static) {
}
// cmp(Rflags, ltos);
// ltos
// load must be atomic
if (!is_static) {
}
// cmp(Rflags, btos);
// btos
if (!is_static) {
}
// cmp(Rflags, ctos);
// ctos
if (!is_static) {
}
// cmp(Rflags, stos);
// stos
if (!is_static) {
}
// cmp(Rflags, ftos);
// ftos
if (!is_static) {
}
// dtos
if (!is_static) {
}
// __ tst(Lscratch); executed in delay slot
}
}
getfield_or_static(byte_no, false);
}
getfield_or_static(byte_no, true);
}
// Get volatile flag
}
switch (bytecode()) {
case Bytecodes::_fast_bgetfield:
break;
case Bytecodes::_fast_cgetfield:
break;
case Bytecodes::_fast_sgetfield:
break;
case Bytecodes::_fast_igetfield:
break;
case Bytecodes::_fast_lgetfield:
break;
case Bytecodes::_fast_fgetfield:
break;
case Bytecodes::_fast_dgetfield:
break;
case Bytecodes::_fast_agetfield:
break;
default:
}
}
}
}
void TemplateTable::jvmti_post_fast_field_mod() {
if (JvmtiExport::can_post_field_modification()) {
// Check to see if a field modification watch has been set before we take
// the time to call into the VM.
// Save tos values before call_VM() clobbers them. Since we have
// to do it for every data type, we use the saved values as the
// jvalue object.
switch (bytecode()) { // save tos values before call_VM() clobbers them
// get words in right order for use as jvalue object
}
// setup pointer to jvalue object
// G4_scratch: object pointer
// G1_scratch: cache entry pointer
// G3_scratch: jvalue object on the stack
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
switch (bytecode()) { // restore tos values
}
}
}
// The registers Rcache and index expected to be set before call.
// The function may destroy various registers, just not the Rcache and index registers.
if (JvmtiExport::can_post_field_modification()) {
// Check to see if a field modification watch has been set before we take
// the time to call into the VM.
// The Rcache and index registers have been already set.
// This allows to eliminate this call but the Rcache and index
// registers must be correspondingly used after this line.
if (is_static) {
// Life is simple. Null out the object pointer.
} else {
// Life is harder. The stack holds the value on top, followed by the
// object. We don't know the size of the value, though; it could be
// one or two words depending on its type. As a result, we must find
// the type to determine where the object is.
// Make sure we don't need to mask Rflags after the above shift
// setup object pointer
}
// setup pointer to jvalue object
// G4_scratch: object pointer or NULL if static
// G3_scratch: cache entry pointer
// G1_scratch: jvalue object on the stack
}
}
__ verify_oop(r);
}
}
}
// Make sure we don't need to mask Rflags after the above shift
// compute field type
if (is_static) {
// putstatic with object type most likely, check that first
// atos
{
}
// cmp(Rflags, itos);
// itos
{
}
} else {
// putfield with int type most likely, check that first
// itos
{
}
// cmp(Rflags, atos);
// atos
{
}
}
// cmp(Rflags, btos);
// btos
{
if (!is_static) {
}
}
// cmp(Rflags, ltos);
// ltos
{
if (!is_static) {
}
}
// cmp(Rflags, ctos);
// ctos (char)
{
if (!is_static) {
}
}
// cmp(Rflags, stos);
// stos (short)
{
if (!is_static) {
}
}
// cmp(Rflags, ftos);
// ftos
{
if (!is_static) {
}
}
// dtos
{
if (!is_static) {
}
}
// __ tst(Lscratch); in delay slot
}
}
}
}
switch (bytecode()) {
case Bytecodes::_fast_fputfield:
break;
case Bytecodes::_fast_dputfield:
break;
case Bytecodes::_fast_aputfield:
break;
default:
}
}
}
putfield_or_static(byte_no, false);
}
putfield_or_static(byte_no, true);
}
// access constant pool cache (is resolved)
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
} else {
}
// Get is_volatile value in Rflags and check if membar is needed
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
// Test volatile
}
}
//----------------------------------------------------------------------------------------------------
// Calls
// implemented elsewhere
}
) {
// determine flags
assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
// setup registers & access constant pool cache
load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
// maybe push appendix to arguments
if (is_invokedynamic || is_invokehandle) {
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
}
// load receiver if needed (after appendix is pushed so parameter size is correct)
if (load_receiver) {
}
// compute return type
// Make sure we don't need to mask flags after the above shift
// load return address
{
}
}
// get target methodOop & entry point
}
// Check for vfinal
// receiver is in O0_recv
// get return address
// Make sure we don't need to mask Rret after the above shift
// get receiver klass
}
/*is_invokevfinal*/true, false);
}
// Load receiver from stack slot
// receiver NULL check
// get return address
// Make sure we don't need to mask Rret after the above shift
// do the call
}
// do the call
}
// do the call
}
// Check for vfinal
// do the call - the index (f2) contains the methodOop
}
// get receiver klass
// Special case of invokeinterface called for virtual method of
// java.lang.Object. See cpCacheOop.cpp for details.
// This code isn't produced by javac, but could be produced by
// another compliant java compiler.
//
// find entry point to call
//
// compute start of first itableOffsetEntry (which is at end of vtable)
}
} else {
}
{
// Check that entry is non-null. Null entries are probably a bytecode
// problem. If the interface isn't implemented by the receiver class,
// the VM should throw IncompatibleClassChangeError. linkResolver checks
// this too but that's only if the entry isn't already resolved, so we
// need to check again.
}
// entry found and Rscratch points to it
// Check for abstract method error.
{
}
}
if (!EnableInvokeDynamic) {
// rewriter does not generate this bytecode
return;
}
// G4: MethodType object (from f1)
// G5: MH.linkToCallSite method (from f2)
// Note: G4_mtype is already pushed (if necessary) by prepare_invoke
// do the call
}
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
// the call_VM checks for exception, so we should never return here.
return;
}
// G4: CallSite object (from f1)
// G5: MH.linkToCallSite method (from f2)
// Note: G4_callsite is already pushed by prepare_invoke
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
// do the call
}
//----------------------------------------------------------------------------------------------------
// Allocation
void TemplateTable::_new() {
// make sure the class we're about to instantiate has been resolved
// This is done before loading instanceKlass to be consistent with the order
// how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
// get instanceKlass
//__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
// make sure klass is fully initialized:
// get instance_size in instanceKlass (already aligned)
//__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
// allocate the instance
// 1) Try to allocate in the TLAB
// 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
// 3) if the above fails (or is not applicable), go to a slow case
// (creates a new TLAB, etc.)
const bool allow_shared_alloc =
if(UseTLAB) {
// check if we can allocate in the TLAB
__ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
// if there is enough space, we do not CAS and do not clear
if(ZeroTLAB) {
// the fields have already been cleared
} else {
// initialize both the header and fields
}
if (allow_shared_alloc) {
// Check if tlab should be discarded (refill_waste_limit >= free)
#ifdef _LP64
#else
#endif
__ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
// increment waste limit to prevent getting stuck on this slow path
__ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
} else {
// No allocation in the shared eden.
}
}
// Allocation in the shared Eden
if (allow_shared_alloc) {
// RnewTopValue contains the top address after the new object
// has been allocated.
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
// if someone beat us on the allocation, try again, otherwise continue
// bump total bytes allocated by this thread
// RoldTopValue and RtopAddr are dead, so can use G1 and G3
}
// clear object fields
// initialize remaining object fields
if (UseBlockZeroing) {
// Use BIS for zeroing
} else {
//__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
}
}
// slow case
// Initialize the header: mark, klass
if (UseBiasedLocking) {
} else {
}
{
// Trigger dtrace event
}
// continue
}
void TemplateTable::newarray() {
}
void TemplateTable::anewarray() {
}
void TemplateTable::arraylength() {
}
void TemplateTable::checkcast() {
// Check for casting a NULL
// Get value klass in RobjKlass
// Get constant pool tag
// See if the checkcast has been quickened
// Extract target class from constant pool
// Generate a fast subtype check. Branch to cast_ok if no
// failure. Throw exception if failure.
// Not a subtype; so must throw exception
if (ProfileInterpreter) {
}
}
void TemplateTable::instanceof() {
// Check for casting a NULL
// Get value klass in RobjKlass
// Get constant pool tag
// See if the checkcast has been quickened
// Extract target class from constant pool
// Generate a fast subtype check. Branch to cast_ok if no
// failure. Return 0 if failure.
// Not a subtype; return 0;
if (ProfileInterpreter) {
}
}
void TemplateTable::_breakpoint() {
// Note: We get here even if we are single stepping..
// jbug inists on setting breakpoints at every bytecode
// even if we are in single step mode.
// get the unpatched byte code
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
// post the breakpoint event
// complete the execution of original bytecode
}
//----------------------------------------------------------------------------------------------------
// Exceptions
void TemplateTable::athrow() {
// This works because exception is cached in Otos_i which is same as O0,
// which is same as what throw_exception_entry_expects
}
//----------------------------------------------------------------------------------------------------
// Synchronization
// See frame_sparc.hpp for monitor block layout.
// Monitor elements are dynamically allocated by growing stack as needed.
void TemplateTable::monitorenter() {
// Try to acquire a lock on the object
// Repeat until succeeded (i.e., until
// monitorenter returns true).
}
// find a free slot in the monitor block
// initialize entry pointer
{
if (VM_Version::v9_instructions_work())
else {
Label L;
}
}
// found free slot?
}
// Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
// The object has already been poped from the stack, so the expression stack looks correct.
// check if there's enough space on the stack for the monitors after locking
// The bcp has already been incremented. Just need to dispatch to next instruction.
}
void TemplateTable::monitorexit() {
__ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
// use Lscratch to hold monitor elem to check, start with most recent monitor,
// By using a local it survives the call to the C routine.
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
}
}
//----------------------------------------------------------------------------------------------------
// Wide instructions
void TemplateTable::wide() {
// Note: the Lbcp increment step is part of the individual wide bytecode implementations
}
//----------------------------------------------------------------------------------------------------
// Multi arrays
void TemplateTable::multianewarray() {
// put ndims * wordSize into Lscratch
// Lesp points past last_dim, so set to O1 to first_dim address
}
#endif /* !CC_INTERP */