/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/methodOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "vmreg_sparc.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#include "runtime/vframeArray.hpp"
#endif
if (_thread->has_last_Java_frame()) {
} else {
}
}
// Unified register numbering scheme: each 32-bits counts as a register
// number, so all the V9 registers take 2 slots.
static bool register_map_inited = false;
static void register_map_init() {
if (!register_map_inited) {
register_map_inited = true;
int i;
for (i = 0; i < 8; i++) {
}
for (i = 0; i < 8; i++) {
}
}
}
// Only the GPRs get handled this way
if( !regname->is_Register())
return NULL;
// don't talk about bad registers
return NULL;
}
// Convert to a GPR
int second_word = 0;
// 32-bit registers for in, out and local
if (!regname->is_concrete()) {
// HMM ought to return NULL for any non-concrete (odd) vmreg
// this all tied up in the fact we put out double oopMaps for
// register locations. When that is fixed we'd will return NULL
// (or assert here).
#ifdef _LP64
second_word = sizeof(jint);
#else
return NULL;
#endif // _LP64
} else {
}
}
}
// Only the window'd GPRs get handled this way; not the globals.
return NULL;
}
#ifdef ASSERT
void RegisterMap::check_location_valid() {
}
#endif
// We are shifting windows. That means we are moving all %i to %o,
// getting rid of all current %l, and keeping all %g. This is only
// complicated if any of the location pointers for these are valid.
// The normal case is that everything is in its standard register window
// home, and _location_valid[0] is zero. In that case, this routine
// does exactly nothing.
void RegisterMap::shift_individual_registers() {
if (!update_map()) return; // this only applies to maps with locations
// if we cleared some non-%g locations, we may have to do some shifting
// copy %i0-%i5 to %o0-%o5, if they have special locations
// This can happen in within stubs which spill argument registers
// around a dynamic link operation, such as resolve_opt_virtual_call.
for (int i = 0; i < 8; i++) {
}
}
}
_location_valid[0] = lv;
}
// sp must be within the stack
if (!sp_safe) {
return false;
}
// unextended sp must be within the stack and above or equal sp
(_UNEXTENDED_SP >= _SP);
if (!unextended_sp_safe) return false;
// an fp must be within the stack and above (but not equal) sp
// We know sp/unextended_sp are safe only fp is questionable here
// If the current frame is known to the code cache then we can attempt to
// to construct the sender and do some validation of it. This goes a long way
// toward eliminating issues when we get in frame construction code
// First check if frame is complete and tester is reliable
// Unfortunately we can only check frame complete for runtime stubs and nmethod
// other generic buffer blobs are more problematic so we just assume they are
// ok. adapter blobs never have a frame complete and are never ok.
return false;
}
}
// Could just be some random pointer within the codeBlob
return false;
}
// Entry frame checks
if (is_entry_frame()) {
// an entry frame must have a valid fp.
if (!fp_safe) {
return false;
}
// Validate the JavaCallWrapper an entry frame must have
return jcw_safe;
}
bool adjusted_stack = is_interpreted_frame();
// We must always be able to find a recognizable pc
return false;
}
// Could be a zombie method
return false;
}
// It should be safe to construct the sender though it might not be valid
// Do we have a valid fp?
// an fp must be within the stack and above (but not equal) current frame's _FP
if (!sender_fp_safe) {
return false;
}
// If the potential sender is the interpreter then we can do some more checking
}
// Could just be some random pointer within the codeBlob
return false;
}
// We should never be able to see an adapter if the current frame is something from code cache
return false;
}
if( sender.is_entry_frame()) {
// Validate the JavaCallWrapper an entry frame must have
return jcw_safe;
}
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
// because you must allocate window space
if (sender_blob->frame_size() <= 0) {
return false;
}
// The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
// The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
// stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
// the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
// that initial frame and retrying.
if (!sender_blob->is_nmethod()) {
return false;
}
// Could put some more validation for the potential non-interpreted sender
// frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
// One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
// We've validated the potential sender that would be created
return true;
}
// Must be native-compiled frame. Since sender will try and use fp to find
// linkages it must be safe
if (!fp_safe) return false;
// could try and do some more potential verification of native frame if we could think of some...
return true;
}
// constructors
// Construct an unpatchable, deficient frame
#ifdef _LP64
#endif
_younger_sp = NULL;
}
#ifdef ASSERT
// Without a valid unextended_sp() we can't convert the pc to "original"
}
#endif // ASSERT
}
if (younger_sp == NULL) {
// make a deficient frame which doesn't know where its PC is
} else {
assert( (intptr_t*)younger_sp[FP->sp_offset_in_saved_window()] == (intptr_t*)((intptr_t)sp - STACK_BIAS), "younger_sp must be valid");
// Any frame we ever build should always "safe" therefore we should not have to call
// find_blob_unsafe
// In case of native stubs, the pc retrieved here might be
// wrong. (the _last_native_pc will have the right value)
// So do not put add any asserts on the _pc here.
}
// Check for MethodHandle call sites.
_sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
// The SP is already adjusted by this MH call site, don't
// overwrite this value with the wrong interpreter value.
younger_frame_is_interpreted = false;
}
}
}
if (younger_frame_is_interpreted) {
// compute adjustment to this frame's SP made by its interpreted callee
_sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
}
// It is important that the frame is fully constructed when we do
// this lookup as get_deopt_original_pc() needs a correct value for
// unextended_sp() which uses _sp_adjustment_by_callee.
if (original_pc != NULL) {
_pc = original_pc;
} else {
}
}
}
}
// sender_sp
return fp();
}
#ifndef CC_INTERP
}
#endif // CC_INTERP
#ifdef ASSERT
// Debugging aid
for(int i = 0; i < n; ++i)
if (f.is_compiled_frame()) {
if (f.is_deoptimized_frame())
printf("deoptimized frame 1\n");
else
printf("compiled frame 1\n");
}
return f;
}
#endif
// Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender
// Since we are walking the stack now this nested anchor is obviously walkable
// even if it wasn't when it was stacked.
// Capture _last_Java_pc (if needed) and mark anchor walkable.
}
}
}
}
// Default is not to follow arguments; update it accordingly below
map->set_include_argument_oops(false);
// Note: The version of this operation on any platform with callee-save
// registers must update the register map (if not null).
// In order to do this correctly, the various subtypes of
// of frame (interpreted, compiled, glue, native),
// must be distinguished. There is no need on SPARC for
// such distinctions, because all callee-save registers are
// preserved for all frames via SPARC-specific mechanisms.
//
// *** HOWEVER, *** if and when we make any floating-point
// registers callee-saved, then we will have to copy over
// the RegisterMap update logic from the Intel code.
// The constructor of the sender must know whether this frame is interpreted so it can set the
// sender's _sp_adjustment_by_callee field. An osr adapter frame was originally
// interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
// explicitly recognized.
if (frame_is_interpreted) {
// Update the locations of implicitly saved registers to be their
// addresses in the register save area.
// For %o registers, the addresses of %i registers in the next younger
// frame are used.
if (map->update_map()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag
// outside of update_register_map.
}
}
}
}
}
if (TracePcPatching) {
// QQQ this assert is invalid (or too strong anyway) sice _pc could
// be original pc and frame could have the deopt pc.
// assert(_pc == *O7_addr() + pc_return_offset, "frame has wrong pc");
}
if (original_pc != NULL) {
} else {
}
}
}
/*
Find the (biased) sp that is just younger than old_sp starting at sp.
If not found return NULL. Register windows are assumed to be flushed.
*/
if (max_frames-- <= 0)
// too many frames have gone by; invalid parameters given to this function
break;
previous_sp = sp;
}
}
/*
Determine if "sp" is a valid stack pointer. "sp" is assumed to be younger than
"valid_sp". So if "sp" is valid itself then it should be possible to walk frames
from "sp" to "valid_sp". The assumption is that the registers windows for the
thread stack in question are flushed.
*/
}
}
if (is_interpreted_frame()) {
// set constant pool cache entry for interpreter
}
}
#ifdef CC_INTERP
// Is there anything to do?
#else
// These are reasonable sanity checks
return false;
}
return false;
}
return false;
}
// These are hacks to keep us out of trouble.
// The problem with these is that they mask other problems
return false;
}
// do some validation of frame elements
// first the method
// validate the method we'd find in this potential sender
// stack frames shouldn't be much larger than max_stack elements
return false;
}
if (m->validate_bci_from_bcx(bcx) < 0) {
return false;
}
// validate constantPoolCacheOop
// validate locals
// We'd have to be pretty unlucky to be mislead at this point
#endif /* CC_INTERP */
return true;
}
// Windows have been flushed on entry (but not marked). Capture the pc that
// is the return address to the frame that contains "sp" as its stack pointer.
// This pc resides in the called of the frame corresponding to "sp".
// As a side effect we mark this JavaFrameAnchor as having flushed the windows.
// This side effect lets us mark stacked JavaFrameAnchors (stacked in the
// call_helper) as flushed when we have flushed the windows for the most
// recent (i.e. current) JavaFrameAnchor. This saves useless flushing calls
// and lets us find the pc just once rather than multiple times as it did
// in the bad old _post_Java_state days.
//
// try and find the sp just younger than _last_Java_sp
// Really this should never fail otherwise VM call must have non-standard
// frame linkage (bad) or stack is not properly flushed (worse).
_last_Java_pc = (address) _post_Java_sp[ I7->sp_offset_in_saved_window()] + frame::pc_return_offset;
}
}
if (walkable()) return;
// Eventually make an assert
// We always flush in case the profiler wants it but we won't mark
// the windows as flushed unless we have a last_Java_frame
if (last_Java_sp() != NULL ) {
}
}
// convert offset to index to deal with tsi
}
// Prior to notifying the runtime of the method_exit the possible result
// value is saved to l_scratch and d_scratch.
#ifdef CC_INTERP
#else /* CC_INTERP */
#endif /* CC_INTERP */
#ifdef _LP64
// On 64-bit the result for 1/8/16/32-bit result types is in the other
// word half
#endif
switch (type) {
case T_OBJECT:
case T_ARRAY: {
#ifdef CC_INTERP
#else
*oop_result = obj;
#endif // CC_INTERP
break;
}
case T_VOID : /* Nothing to do */ break;
default : ShouldNotReachHere();
}
} else {
switch(type) {
case T_OBJECT:
case T_ARRAY: {
*oop_result = obj;
break;
}
case T_VOID : /* Nothing to do */ break;
default : ShouldNotReachHere();
}
};
return type;
}
// Lesp pointer is one word lower than the top item on the stack.
return &interpreter_frame_tos_address()[index];
}
#ifndef PRODUCT
for (int w = 0; w < frame::register_save_words; w++) {
}
if (is_interpreted_frame()) {
// esp, according to Lesp (e.g. not depending on bci), if seems valid
}
}
if (!is_compiled_frame()) {
if (frame::callee_aggregate_return_pointer_words != 0) {
values.describe(frame_no, sp() + frame::callee_aggregate_return_pointer_sp_offset, "callee_aggregate_return_pointer_word");
}
for (int w = 0; w < frame::callee_register_argument_save_area_words; w++) {
err_msg("callee_register_argument_save_area_words %d", w));
}
}
}
#endif
// unused... but returns fp() to minimize changes introduced by 7087445
return fp();
}