/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/compiledICHolderOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_sparc.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_sparc.inline.hpp"
// Implementation of StubAssembler
int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry_point, int number_of_arguments) {
// for sparc changing the number of arguments doesn't change
// anything about the frame size so we'll always lie and claim that
// we are only passing 1 argument.
set_num_rt_args(1);
// bang stack before going to runtime
// debugging support
// do the call
if (!VerifyThread) {
} else {
}
// check for pending exceptions
{ Label L;
if (frame_size() == no_frame_size) {
// we use O7 linkage so that forward_exception_entry has the issuing PC
} else {
}
bind(L);
}
// get oop result if there is one and reset the value in the thread
} else {
// be a little paranoid and clear the result
}
if (oop_result2->is_valid()) {
} else {
// be a little paranoid and clear the result
}
return call_offset;
}
int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
// O0 is reserved for the thread
}
int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
// O0 is reserved for the thread
}
int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
// O0 is reserved for the thread
}
// Implementation of Runtime1
static int reg_save_size_in_words;
"mismatch in calculation");
int i;
for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
Register r = as_Register(i);
r->as_VMReg());
}
}
if (save_fpu_registers) {
for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
FloatRegister r = as_FloatRegister(i);
r->as_VMReg());
}
}
return oop_map;
}
"mismatch in calculation");
// Record volatile registers as callee-save values in an OopMap so their save locations will be
// propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
// deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
// are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
// (as the stub's I's) when the runtime routine called by the stub creates its frame.
// OopMap frame sizes are in c2 stack slot sizes (sizeof(jint))
int i;
for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
Register r = as_Register(i);
}
}
if (save_fpu_registers) {
for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
FloatRegister r = as_FloatRegister(i);
}
}
}
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
Register r = as_Register(i);
}
}
if (restore_fpu_registers) {
for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
FloatRegister r = as_FloatRegister(i);
}
}
}
// compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
//
// A stub routine will have a frame that is at least large enough to hold
// a register window save area (obviously) and the volatile g registers
// and floating registers. A user of save_live_registers can have a frame
// that has more scratch area in it (although typically they will use L-regs).
// in that case the frame will look like this (stack growing down)
//
// FP -> | |
// | scratch mem |
// | " " |
// --------------
// | float regs |
// | " " |
// ---------------
// | G regs |
// | " " |
// ---------------
// | abi reg. |
// | window save |
// | area |
// SP -> ---------------
//
int i;
// only G int registers are saved explicitly; others are found in register windows
for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
Register r = as_Register(i);
sp_offset++;
}
}
// all float registers are saved explicitly
for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
sp_offset++;
}
// this should match assembler::total_frame_size_in_bytes, which
// isn't callable from this context. It's checked by an assert when
// it's used though.
}
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
// make a frame and preserve the caller's caller-save registers
int call_offset;
if (!has_argument) {
} else {
}
return oop_maps;
}
// make a frame and preserve the caller's caller-save registers
int call_offset;
} else {
}
return oop_maps;
}
// make a frame and preserve the caller's caller-save registers
// call the runtime patching routine, returns non-zero if nmethod got deopted.
// re-execute the patched instruction or, if the nmethod was deoptmized, return to the
// deoptimization handler entry that will cause re-execution of the current bytecode
// return to the deoptimization handler entry for unpacking and rexecute
// if we simply returned the we'd deopt as if any call we patched had just
// returned.
return oop_maps;
}
// for better readability
const bool must_gc_arguments = true;
const bool dont_gc_arguments = false;
// stub code & info for the different stubs
switch (id) {
case forward_exception_id:
{
}
break;
case new_instance_id:
case fast_new_instance_id:
{
if (id == new_instance_id) {
} else if (id == fast_new_instance_id) {
} else {
}
UseTLAB && FastTLABRefill) {
// Push a frame since we may do dtrace notification for the
// allocation which requires calling out and we don't want
// to stomp the real return address.
__ save_frame(0);
if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized
__ cmp_and_br_short(G3_t1, instanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
}
#ifdef ASSERT
// assert object can be fast path allocated
{
// make sure it's an instance (LH > 0)
}
#endif // ASSERT
// if we got here then the TLAB allocation failed, so try
// refilling the TLAB or allocating directly from eden.
// get the instance size
// get the instance size
// pop this frame so generate_stub_call can push it's own
}
// I0->O0: new instance
}
break;
case counter_overflow_id:
// G4 contains bci, G5 contains method
break;
case new_type_array_id:
case new_object_array_id:
{
// Use this offset to pick out an individual byte of the layout_helper:
if (id == new_type_array_id) {
} else {
}
#ifdef ASSERT
// assert object type is really an array of the proper kind
{
}
#endif // ASSERT
if (UseTLAB && FastTLABRefill) {
// check that array length is small enough for fast path
// if we got here then the TLAB allocation failed, so try
// refilling the TLAB or allocating directly from eden.
// get the allocation size: (length << (layout_helper & 0x1F)) + header_size
// get the allocation size: (length << (layout_helper & 0x1F)) + header_size
}
if (id == new_type_array_id) {
oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
} else {
oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);
}
// I0 -> O0: new array
}
break;
case new_multi_array_id:
{ // O0: klass
// O1: rank
// O2: address of 1st dimension
// I0 -> O0: new multi array
}
break;
case register_finalizer_id:
{
// load the klass and check the has finalizer flag
// do a leaf return
// Now restore all the live registers
}
break;
// G4: index
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
}
break;
case throw_index_exception_id:
// G4: index
}
break;
case throw_div0_exception_id:
}
break;
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
}
break;
case handle_exception_id:
}
break;
}
break;
case unwind_exception_id:
{
// O0: exception
// I7: address of call to this method
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
// Restore SP from L7 if the exception PC is a method handle call site.
__ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.
}
break;
{
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
}
break;
{
// G4: object
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
}
break;
{
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
}
break;
case slow_subtype_check_id:
{ // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
// Arguments :
//
// ret : G3
// sub : G3, argument, destroyed
// super: G1, argument, not changed
// raddr: O7, blown by call
}
case monitorenter_nofpu_id:
case monitorenter_id:
{ // G4: object
// G5: lock address
// make a frame and preserve the caller's caller-save registers
}
break;
case monitorexit_nofpu_id:
case monitorexit_id:
{ // G4: lock address
// note: really a leaf routine but must setup last java sp
// => use call_RT for now (speed can be improved by
// doing last java sp setup manually)
// make a frame and preserve the caller's caller-save registers
}
break;
case deoptimize_id:
{
}
break;
case access_field_patching_id:
}
break;
case load_klass_patching_id:
}
break;
case dtrace_object_alloc_id:
{ // O0: object
// we can't gc here so skip the oopmap but make sure that all
// the live registers get saved.
}
break;
#ifndef SERIALGC
case g1_pre_barrier_slow_id:
{ // G4: previous value of memory
__ save_frame(0);
break;
}
int satb_q_index_byte_offset =
int satb_q_buf_byte_offset =
// Load the index into the SATB buffer. PtrQueue::_index is a
// size_t so ld_ptr is appropriate
// index == 0?
// Use return-from-leaf
__ save_frame(0);
}
break;
case g1_post_barrier_slow_id:
{
__ save_frame(0);
break;
}
#ifdef _LP64
#else
#endif
// We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf
// Not dirty.
// Get cardtable + tmp into a reg by itself
// First, dirty it.
// these registers are now dead
// Get the index into the update buffer. PtrQueue::_index is
// a size_t so ld_ptr is appropriate here.
// index == 0?
// Use return-from-leaf
__ save_frame(0);
}
break;
#endif // !SERIALGC
default:
__ save_frame(0);
}
break;
}
return oop_maps;
}
// Save registers, if required.
switch (id) {
case forward_exception_id:
// We're handling an exception in the context of a compiled frame.
// The registers have been saved in the standard places. Perform
// an exception lookup in the caller and dispatch to the handler
// if found. Otherwise unwind and dispatch to the callers
// exception handler.
// transfer the pending exception to the exception_oop
break;
case handle_exception_id:
// At this point all registers MAY be live.
break;
// At this point all registers except exception oop (Oexception)
// and exception pc (Oissuing_pc) are dead.
break;
default: ShouldNotReachHere();
}
// save the exception and issuing pc in the thread
// use the throwing pc as the return address to lookup (has bci & oop map)
// Note: if nmethod has been deoptimized then regardless of
// whether it had a handler or not we will deoptimize
// by entering the deopt blob with a pending exception.
// Restore the registers that were saved at the beginning, remove
// the frame and jump to the exception handler.
switch (id) {
case forward_exception_id:
case handle_exception_id:
break;
// Restore SP from L7 if the exception PC is a method handle call site.
__ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.
break;
default: ShouldNotReachHere();
}
return oop_maps;
}
return "<unknown function>";
}