/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeBlob.hpp"
#include "code/compiledIC.hpp"
#include "code/pcDesc.hpp"
#include "code/scopeDesc.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/barrierSet.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/events.hpp"
// Implementation of StubAssembler
StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
_must_gc_arguments = false;
_num_rt_args = 0;
}
}
if (_frame_size == no_frame_size) {
_frame_size = size;
}
}
if (_num_rt_args == 0) {
_num_rt_args = args;
}
}
// Implementation of Runtime1
};
#ifndef PRODUCT
// statistics
static int _byte_arraycopy_cnt = 0;
static int _short_arraycopy_cnt = 0;
static int _int_arraycopy_cnt = 0;
static int _long_arraycopy_cnt = 0;
static int _oop_arraycopy_cnt = 0;
switch (type) {
case T_BOOLEAN:
case T_CHAR:
case T_FLOAT:
case T_DOUBLE:
case T_ARRAY:
default:
return NULL;
}
}
#endif
// Simple helper to see if the caller of a runtime stub which
// entered the VM has been deoptimized
static bool caller_is_deopted() {
return caller_frame.is_deoptimized_frame();
}
// Stress deoptimization
static void deopt_caller() {
if ( !caller_is_deopted()) {
}
}
// create code buffer for code storage
// create assembler for code generation
// generate code for runtime stub
"if stub has an oop map it must have a valid frame size");
#ifdef ASSERT
// Make sure that stubs that need oopmaps have them
switch (id) {
// These stubs don't need to have an oopmap
case dtrace_object_alloc_id:
case g1_pre_barrier_slow_id:
case g1_post_barrier_slow_id:
case slow_subtype_check_id:
case fpu2long_stub_id:
case unwind_exception_id:
case counter_overflow_id:
case handle_exception_nofpu_id: // Unused on sparc
#endif
break;
// All other stubs should have oopmaps
default:
}
#endif
// align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
// make sure all code is in code buffer
// create blob - distinguish a few special cases
&code,
sasm->frame_size(),
sasm->must_gc_arguments());
// install blob
}
// platform-dependent initialization
// generate stubs
// printing
#ifndef PRODUCT
if (PrintSimpleStubs) {
}
}
}
#endif
}
}
return _blob_names[id];
}
}
#define FUNCTION_CASE(a, f) \
#ifdef TRACE_HAVE_INTRINSICS
#endif
// Soft float adds more runtime names.
return pd_name_for_address(entry);
}
h->check_valid_for_instantiation(true, CHECK);
// make sure klass is initialized
h->initialize(CHECK);
// allocate instance and return via TLS
// Note: no handle for klass needed since they are not used
// anymore after new_typeArray() and no GC can happen before.
// (This may have to change if this code changes!)
// This is pretty rare but this runtime patch is stressful to deoptimization
// if we deoptimize here so force a deopt to stress the path.
if (DeoptimizeALot) {
deopt_caller();
}
JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, klassOopDesc* array_klass, jint length))
// Note: no handle for klass needed since they are not used
// anymore after new_objArray() and no GC can happen before.
// (This may have to change if this code changes!)
// This is pretty rare but this runtime patch is stressful to deoptimization
// if we deoptimize here so force a deopt to stress the path.
if (DeoptimizeALot) {
deopt_caller();
}
JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, klassOopDesc* klass, int rank, jint* dims))
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayStoreException(), klass_name);
// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
// associated with the top activation record. The inlinee (that is possibly included in the enclosing
// method) method oop is passed as an argument. In order to do that it is embedded in the code as
// a constant.
if (branch_bci != InvocationEntryBci) {
// Compute desination bci
int offset = 0;
switch (branch) {
break;
break;
default: ;
}
}
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
return osr_nm;
}
JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method))
}
return NULL;
// Enter this method from compiled code handler below. This is where we transition
// to VM mode. This is done as a helper routine so that the method called directly
// from compiled code does not have to transition to VM. This allows the entry
// method to see if the nmethod that we have just looked up a handler for has
// been deoptimized while we were in the vm. This simplifies the assembly code
// cpu directories.
//
// We are entering here from exception stub (via the entry method below)
// If there is a compiled exception handler in this method, we will continue there;
// otherwise we will unwind the stack and continue at the caller of top frame method
// Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
// control the area where we can allow a safepoint. After we exit the safepoint area we can
// check to see if the handler we are going to return is now in a nmethod that has
// been deoptimized. If that is the case we return the deopt blob
// unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
// Reset method handle flag.
thread->set_is_method_handle_return(false);
// Adjust the pc as needed/
// if the frame isn't deopted then pc must not correspond to the caller of last_frame
}
#ifdef ASSERT
// Check that exception is a subclass of Throwable, otherwise we have a VerifyError
}
#endif
// Check the stack guard pages and reenable them if necessary and there is
// enough space on the stack to do so. Use fast exceptions only if the guard
// pages are enabled.
if (JvmtiExport::can_post_on_exceptions()) {
// To ensure correct notification of exception catches and throws
// we have to deoptimize here. If we attempted to notify the
// catches and throws during this exception lookup it's possible
// we could deoptimize on the way out of the VM and end back in
// the interpreter at the throw site. This would result in double
// notifications since the interpreter would also notify about
// these same catches and throws as it unwound the frame.
// We don't really want to deoptimize the nmethod itself since we
// can actually continue in the exception handler ourselves but I
// don't see an easy way to have the desired effect.
}
// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
if (guard_pages_enabled) {
if (fast_continuation != NULL) {
// Set flag if return address is a method handle call site.
return fast_continuation;
}
}
// If the stack guard pages are enabled, check whether there is a handler in
// the current method. Otherwise (guard pages disabled), force an unwind and
// skip the exception cache update (i.e., just leave continuation==NULL).
if (guard_pages_enabled) {
// New exception handling mechanism can support inlined methods
// with exception handlers since the mappings are from PC to PC
// debugging support
// tracing
if (TraceExceptions) {
tty->print_cr("Exception <%s> (0x%x) thrown in compiled method <%s> at PC " PTR_FORMAT " for thread 0x%x",
exception->print_value_string(), (address)exception(), nm->method()->print_value_string(), pc, thread);
}
// for AbortVMOnException flag
// Clear out the exception oop and pc since looking up an
// exception handler can cause class loading, which might throw an
// exception and those fields are expected to be clear during
// normal bytecode execution.
// If an exception was thrown during exception dispatch, the exception oop may have changed
// the exception cache is used only by non-implicit exceptions
if (continuation != NULL) {
}
}
// Set flag if return address is a method handle call site.
if (TraceExceptions) {
tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,
}
return continuation;
// Enter this method from compiled code only if there is a Java exception handler
// in the method handling the exception.
// We are entering here from exception stub. We don't do a normal VM transition here.
// We do it in a helper. This is so we can check to see if the nmethod we have just
// searched for an exception handler has been deoptimized in the meantime.
// Still in Java mode
{
// Enter VM mode by calling the helper
}
// Back in JAVA, use no oops DON'T safepoint
// Now check to see if the nmethod we were called from is now deoptimized.
// If so we must return to the deopt blob and deoptimize the nmethod
}
return continuation;
}
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
if (PrintBiasedLockingStatistics) {
}
if (UseBiasedLocking) {
// Retry fast entry if bias is revoked to avoid unnecessary inflation
} else {
if (UseFastLocking) {
// When using fast locking, the compiled code has already tried the fast case
} else {
}
}
// monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
if (UseFastLocking) {
// When using fast locking, the compiled code has already tried the fast case
} else {
}
// Cf. OptoRuntime::deoptimize_caller_frame
// Called from within the owner thread, so no need for safepoint
// We are coming from a compiled method; check this is true.
// Deoptimize the caller frame.
// Return to the now deoptimized frame.
// This can be static or non-static field access
// We must load class, initialize class and resolvethe field
LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
}
//
// This routine patches sites where a class wasn't loaded or
// initialized at the time the code was generated. It handles
// references to classes, fields and forcing of initialization. Most
// of the cases are straightforward and involving simply forcing
// resolution of a class, rewriting the instruction stream with the
// needed constant and replacing the call in this function with the
// patched code. The case for static field is more complicated since
// the thread which is in the process of initializing a class can
// access it's static fields but other threads can't so the code
// either has to deoptimize when this case is detected or execute a
// check that the current thread is the initializing thread. The
// current
//
// Patches basically look like this:
//
//
// patch_site: jmp patch stub ;; will be patched
// continue: ...
// ...
// ...
// ...
//
// They have a stub which looks like this:
//
// ;; patch body
// movl <const>, reg (for class constants)
// <or> movl [reg1 + <const>], reg (for field offsets)
// <or> movl reg, [reg1 + <const>] (for field offsets)
// <being_init offset> <bytes to copy> <bytes to skip>
// patch_stub: call Runtime1::patch_code (through a runtime stub)
// jmp patch_site
//
//
// A normal patch is done by rewriting the patch body, usually a move,
// and then copying it into place over top of the jmp instruction
// being careful to flush caches and doing it in an MP-safe way. The
// constants following the patch body are used to find various pieces
// of the patch relative to the call site for Runtime1::patch_code.
// The case for getstatic and putstatic is more complicated because
// getstatic and putstatic have special semantics when executing while
// which is being_initialized may be executed by the initializing
// thread but other threads have to block when they execute it. This
// is accomplished in compiled code by executing a test of the current
// thread against the initializing thread of the class. It's emitted
// as boilerplate in their stub which allows the patched code to be
// executed before it's copied back into the main body of the nmethod.
//
// being_init: get_thread(<tmp reg>
// cmpl [reg1 + <init_thread_offset>], <tmp reg>
// jne patch_stub
// movl [reg1 + <const>], reg (for field offsets) <or>
// movl reg, [reg1 + <const>] (for field offsets)
// jmp continue
// <being_init offset> <bytes to copy> <bytes to skip>
// patch_stub: jmp Runtim1::patch_code (through a runtime stub)
// jmp patch_site
//
// If the class is being initialized the patch body is rewritten and
// the patch site is rewritten to jump to being_init, instead of
// patch_stub. Whenever this code is executed it checks the current
// thread against the intializing thread so other threads will enter
// the runtime and end up blocked waiting the class to finish
// initializing inside the calls to resolve_field below. The
// initializing class will continue on it's way. Once the class is
// fully_initialized, the intializing_thread of the class becomes
// NULL, so the next thread to execute this code will fail the test,
// call into patch_code and complete the patching process by copying
// the patch body back into the main part of the nmethod and resume
// executing.
//
//
// last java frame on stack
// Note that caller_method->code() may not be same as caller_code because of OSR's
// Note also that in the presence of inlining it is not guaranteed
// that caller_method() == caller_code->method()
#ifndef PRODUCT
// this is used by assertions in the access_field_patching_id
#endif // PRODUCT
bool deoptimize_for_volatile = false;
LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
// If we're patching a field which is volatile then at compile it
// must not have been know to be volatile, so the generated code
// isn't correct for a volatile reference. The nmethod has to be
// deoptimized so that the code can be regenerated correctly.
// This check is only needed for access_field_patching since this
// is the path for patching field offsets. load_klass is only
// used for patching references to oops which don't need special
// handling in the volatile case.
#ifndef PRODUCT
#endif
oop k;
switch (code) {
case Bytecodes::_putstatic:
case Bytecodes::_getstatic:
// Save a reference to the class that has to be checked for initialization
k = klass->java_mirror();
}
break;
}
break;
case Bytecodes::_multianewarray:
}
break;
case Bytecodes::_instanceof:
}
break;
case Bytecodes::_checkcast:
}
break;
case Bytecodes::_anewarray:
}
break;
{
}
break;
default: Unimplemented();
}
// convert to handle
} else {
}
if (deoptimize_for_volatile) {
// At compile time we assumed the field wasn't volatile but after
// loading it turns out it was volatile so we have to throw the
// compiled code out and let it be regenerated.
if (TracePatching) {
}
// It's possible the nmethod was invalidated in the last
// safepoint, but if it's still alive then make it not_entrant.
nm->make_not_entrant();
}
// Return to the now deoptimized frame.
}
// If we are patching in a non-perm oop, make sure the nmethod
// is on the right list.
if (!nm->on_scavenge_root_list())
}
// Now copy code back
{
//
// Deoptimization may have happened while we waited for the lock.
// In that case we don't bother to do any patching we just return
// and let the deopt happen
if (!caller_is_deopted()) {
// the jump has not been patched yet
// The jump destination is slow case and therefore not part of the stubs
// (stubs are only for StaticCalls)
// format of buffer
// ....
// instr byte 0 <-- copy_buff
// instr byte 1
// ..
// instr byte n-1
// n
// .... <-- call destination
if (TracePatching) {
// NOTE we use pc() not original_pc() because we already know they are
// identical otherwise we'd have never entered this block of code
}
// depending on the code below, do_patch says whether to copy the patch body back into the nmethod
bool do_patch = true;
// The offset may not be correct if the class was not loaded at code generation time.
// Set it now.
assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
// If a getstatic or putstatic is referencing a klass which
// isn't fully initialized, the patch body isn't copied into
// place until initialization is complete. In this case the
// patch site is setup so that any threads besides the
// initializing thread are forced to come into the VM and
// block.
} else {
// patch the instruction <move reg, klass>
"illegal init value");
if (TracePatching) {
}
// Update the oop location in the nmethod with the proper
// oop. When the code was generated, a NULL was stuffed
// in the oop table and that table needs to be update to
// have the right value. On intel the value is kept
// directly in the instruction instead of in the oop
// table, so set_data above effectively updated the value.
bool found = false;
*oop_adr = load_klass();
r->fix_oop_relocation();
found = true;
}
}
#endif
}
} else {
}
if (do_patch) {
// replace instructions
// first replace the tail, then the call
#ifdef ARM
break;
}
}
copy_buff -= *byte_count;
}
#endif
}
// update relocInfo to oop
// The old patch site is now a move instruction so update
// the reloc info so that it will get updated during
// future GCs.
#ifdef SPARC
// Sparc takes two relocations for an oop so update the second one.
#endif
#ifdef PPC
relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, relocInfo::none, relocInfo::oop_type);
}
#endif
}
} else {
}
}
}
}
//
// Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to
// know after the patching is complete and any safepoint(s) are taken
// if the calling nmethod was deoptimized. We do this by calling a
// helper method which does the normal VM transition and when it
// completes we can check for deoptimization. This simplifies the
// assembly code in the cpu directories.
//
//
// NOTE: we are still in Java
//
{
// Enter VM mode
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted();
}
//
// Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to
// know after the patching is complete and any safepoint(s) are taken
// if the calling nmethod was deoptimized. We do this by calling a
// helper method which does the normal VM transition and when it
// completes we can check for deoptimization. This simplifies the
// assembly code in the cpu directories.
//
//
// NOTE: we are still in Java
//
{
// Enter VM mode
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted();
// for now we just print out the block id
// Array copy return codes.
enum {
};
// Below length is the # elements copied.
int length) {
// For performance reasons, we assume we are using a card marking write
// barrier. The assert will fail if this is not the case.
// Note that we use the non-virtual inlineable variant of write_ref_array.
// same object, no check
return ac_ok;
} else {
// Elements are guaranteed to be subtypes, so no check necessary
return ac_ok;
}
}
return ac_failed;
}
// fast and direct copy of arrays; returning -1, means that an exception may be thrown
// and we did not copy anything
JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
#ifndef PRODUCT
_generic_arraycopy_cnt++; // Slow-path oop array copy
#endif
if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
if (src->is_typeArray()) {
// Potential problem: memmove is not guaranteed to be word atomic
// Revisit in Merlin
return ac_ok;
if (UseCompressedOops) {
} else {
}
}
return ac_failed;
#ifndef PRODUCT
#endif
if (length == 0) return;
// Not guaranteed to be word atomic, but that doesn't matter
// for anything but an oop array, which is covered by oop_arraycopy.
#ifndef PRODUCT
#endif
if (num == 0) return;
if (UseCompressedOops) {
} else {
}
// had to return int instead of bool, otherwise there may be a mismatch
// between the C calling convention and the Java one.
// e.g., on x86, GCC may clear only %al when returning a bool false, but
// JVM takes the whole %eax as the return value, which may misinterpret
// the return value as a boolean true.
#ifndef PRODUCT
tty->print_cr(" _throw_incompatible_class_change_error_count: %d:", _throw_incompatible_class_change_error_count);
}
#endif // PRODUCT