generateOopMap.cpp revision 605
/*
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
//
//
// Compute stack layouts for each instruction in method.
//
// Problems:
// - What to do about jsr with different types of local vars?
// Need maps that are conditional on jsr path?
// - Jsr and exceptions should be done more efficiently (the retAddr stuff)
//
// Alternative:
// - Could extend verifier to provide this information.
// For: one fewer abstract interpreter to maintain. Against: the verifier
// solves a bigger problem so slower (undesirable to force verification of
// everything?).
//
// Algorithm:
// Partition bytecodes into basic blocks
// For each basic block: store entry state (vars, stack). For instructions
// inside basic blocks we do not store any state (instead we recompute it
// from state produced by previous instruction).
//
// Perform abstract interpretation of bytecodes over this lattice:
//
// _--'#'--_
// / / \ \
// / / \ \
// / | | \
// 'r' 'v' 'p' ' '
// \ | | /
// \ \ / /
// \ \ / /
// -- '@' --
//
// '#' top, result of conflict merge
// 'r' reference type
// 'v' value type
// ' ' uninitialized; never occurs on operand stack in Java
// '@' bottom/unexecuted; initial state each bytecode.
//
// Basic block headers are the only merge points. We use this iteration to
// compute the information:
//
// find basic blocks;
// initialize them with uninitialized state;
// initialize first BB according to method signature;
// mark first BB changed
// while (some BB is changed) do {
// perform abstract interpration of all bytecodes in BB;
// merge exit state of BB into entry state of all successor BBs,
// noting if any of these change;
// }
//
// One additional complication is necessary. The jsr instruction pushes
// a return PC on the stack (a 'p' type in the abstract interpretation).
// To be able to process "ret" bytecodes, we keep track of these return
// PC's in a 'retAddrs' structure in abstract interpreter context (when
// processing a "ret" bytecodes, it is not sufficient to know that it gets
// an argument of the right type 'p'; we need to know which address it
// returns to).
//
// (Note this comment is borrowed form the original author of the algorithm)
#include "incls/_precompiled.incl"
#include "incls/_generateOopMap.cpp.incl"
// ComputeCallStack
//
// Specialization of SignatureIterator - compute the effects of a call
//
class ComputeCallStack : public SignatureIterator {
int _idx;
void setup();
public:
// Compute methods
_idx = 0;
if (!is_static)
return length();
};
_idx = 0;
return length();
}
};
//=========================================================================================
// ComputeEntryStack
//
// Specialization of SignatureIterator - in order to set up first stack frame
//
class ComputeEntryStack : public SignatureIterator {
int _idx;
void setup();
public:
// Compute methods
_idx = 0;
if (!is_static)
return length();
};
_idx = 0;
return length();
}
};
//=====================================================================================
//
// Implementation of RetTable/RetTableEntry
//
// Contains function to itereate through all bytecodes
// and find all return entry points
//
}
}
BytecodeStream i(method);
switch (bytecode) {
break;
break;
}
}
}
// Scan table for entry
if (!entry) {
// Allocate new entry and put in list
}
// Now "entry" is set. Make sure that the entry is initialized
// and has room for the new jsr.
}
while(cur) {
}
return NULL;
}
// The instruction at bci is changing size by "delta". Update the return map.
while(cur) {
}
}
//
// Celltype state
//
// Commonly used constants
static CellTypeState vrCTS[3] = { CellTypeState::value, CellTypeState::ref, CellTypeState::bottom };
static CellTypeState vvCTS[3] = { CellTypeState::value, CellTypeState::value, CellTypeState::bottom };
static CellTypeState rvrCTS[4] = { CellTypeState::ref, CellTypeState::value, CellTypeState::ref, CellTypeState::bottom };
static CellTypeState vvrCTS[4] = { CellTypeState::value, CellTypeState::value, CellTypeState::ref, CellTypeState::bottom };
static CellTypeState vvvCTS[4] = { CellTypeState::value, CellTypeState::value, CellTypeState::value, CellTypeState::bottom };
static CellTypeState vvvrCTS[5] = { CellTypeState::value, CellTypeState::value, CellTypeState::value, CellTypeState::ref, CellTypeState::bottom };
static CellTypeState vvvvCTS[5] = { CellTypeState::value, CellTypeState::value, CellTypeState::value, CellTypeState::value, CellTypeState::bottom };
char CellTypeState::to_char() const {
if (can_be_reference()) {
if (can_be_value() || can_be_address())
return '#'; // Conflict that needs to be rewritten
else
return 'r';
} else if (can_be_value())
return 'v';
else if (can_be_address())
return 'p';
else if (can_be_uninit())
return ' ';
else
return '@';
}
// Print a detailed CellTypeState. Indicate all bits that are set. If
// the CellTypeState represents an address or a reference, print the
// value of the additional information.
if (can_be_address()) {
} else {
}
if (can_be_reference()) {
} else {
}
if (can_be_value()) {
} else {
}
if (can_be_uninit()) {
} else {
}
if (is_info_top()) {
} else if (is_info_bottom()) {
} else {
if (is_reference()) {
if (info & ref_not_lock_bit) {
// Not a monitor lock reference.
if (info & ref_slot_bit) {
// slot
} else {
// line
}
} else {
// lock
}
} else {
}
}
}
//
// Basicblock handling methods
//
void GenerateOopMap ::initialize_bb() {
_gc_points = 0;
_bb_count = 0;
}
if (c->is_bb_header(bci))
return;
if (TraceNewOopMapGeneration) {
}
c->set_bbmark_bit(bci);
c->_bb_count++;
}
bool fellThrough = false; // False to get first BB marked.
// First mark all exception handlers as start of a basic-block
int handler_pc_idx = i+2;
}
// Then iterate through the code
if (!fellThrough)
/* We will also mark successors of jsr's as basic block headers. */
switch (bytecode) {
break;
break;
}
if (possible_gc_point(&bcs))
_gc_points++;
}
}
bb->mark_as_alive();
}
}
void GenerateOopMap::mark_reachable_code() {
// Mark entry basic block as alive and all exception handlers
_basic_blocks[0].mark_as_alive();
int handler_pc_idx = i+2;
// If block is not already alive (due to multiple exception handlers to same bb), then
// make it alive
}
// Iterate through all basic blocks until we reach a fixpoint
while (change) {
change = 0;
for (int i = 0; i < _bb_count; i++) {
// Position bytecodestream at last bytecode in basicblock
// We will also mark successors of jsr's as alive.
switch (bytecode) {
break;
}
if (fell_through) {
// Mark successor as alive
change = 1;
}
}
}
}
}
}
/* If the current instruction in "c" has no effect on control flow,
returns "true". Otherwise, calls "jmpFct" one or more times, with
"c", an appropriate "pcDelta", and "data" as arguments, then
returns "false". There is one exception: if the current
instruction is a "ret", returns "false" without calling "jmpFct".
Arrangements for tracking the control flow of a "ret" must be made
externally. */
case Bytecodes::_if_icmpeq:
case Bytecodes::_if_icmpne:
case Bytecodes::_if_icmplt:
case Bytecodes::_if_icmpge:
case Bytecodes::_if_icmpgt:
case Bytecodes::_if_icmple:
case Bytecodes::_if_acmpeq:
case Bytecodes::_if_acmpne:
case Bytecodes::_ifnonnull:
break;
break;
break;
case Bytecodes::_tableswitch:
while (--len >= 0) {
}
break;
}
case Bytecodes::_lookupswitch:
while(--npairs >= 0) {
}
break;
}
break;
break;
return true;
break;
break;
default:
return true;
}
return false;
}
/* Requires "pc" to be the head of a basic block; returns that basic
block. */
return bb;
}
// Requires "pc" to be the start of an instruction; returns the basic
// block containing that instruction. */
int nbci;
if ( m == _bb_count-1) {
return bbs+m;
} else {
}
return bbs+m;
lo = m + 1;
} else {
hi = m - 1;
}
}
fatal("should have found BB");
return NULL;
}
{
}
}
//
// CellType handling methods
//
void GenerateOopMap::init_state() {
_state_vec_buf = NEW_RESOURCE_ARRAY(char, MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */);
}
void GenerateOopMap::make_context_uninitialized() {
for (int i = 0; i < _max_locals; i++)
_stack_top = 0;
_monitor_top = 0;
}
int GenerateOopMap::methodsig_to_effect(symbolOop signature, bool is_static, CellTypeState* effect) {
}
// Return result of merging cts1 and cts2.
"merge of bottom values is handled elsewhere");
// If the top bit is set, we don't need to do any more work.
if (!result.is_info_top()) {
"only addresses and references have non-top info");
// The two values being merged are different. Raise to top.
if (result.is_reference()) {
} else {
}
}
}
return result;
}
// Merge the variable state for locals and stack from cts into bbts.
CellTypeState* bbts) {
int i;
bool change = false;
for (i = len - 1; i >= 0; i--) {
bbts[i] = v;
}
return change;
}
// Merge the monitor stack state from cts into bbts.
CellTypeState* bbts) {
bool change = false;
// If there are no monitors in the program, or there has been
// a monitor matching error before this point in the program,
// then we do not merge in the monitor state.
// Can we prove that, when there has been a change, it will already
// have been detected at this point? That would make this equal
// check here unnecessary.
bbts[i] = v;
}
}
return change;
}
for (int i = 0; i < len; i++) {
if (src[i].is_nonlock_reference()) {
} else {
}
}
}
}
}
// Merge the states for the current block and the next. As long as a
// block is reachable the locals and stack must be merged. If the
// stack heights don't match then this is a verification error and
// it's impossible to interpret the code. Simultaneously monitor
// states are being check to see if they nest statically. If monitor
// depths match up then their states are merged. Otherwise the
// mismatch is simply recorded and interpretation continues since
// monitor matching is purely informational and doesn't say anything
// about the correctness of the code.
// always merge local state even if monitors don't match.
bb->set_changed(true);
}
// monitors still match so continue merging monitor states.
bb->set_changed(true);
}
} else {
if (TraceMonitorMismatch) {
report_monitor_mismatch("monitor stack height merge conflict");
}
// When the monitor stacks are not matched, we set _monitor_top to
// bad_monitors. This signals that, from here on, the monitor stack cannot
// be trusted. In particular, monitorexit bytecodes may throw
// exceptions. We mark this block as changed so that the change
// propagates properly.
bb->set_changed(true);
_monitor_safe = false;
}
} else if (!bb->is_reachable()) {
// First time we look at this BB
bb->set_changed(true);
} else {
}
}
}
"wrong celltypestate");
return;
}
}
return valCTS; // just to pick something;
}
}
if ( _stack_top <= 0) {
verify_error("stack underflow");
return valCTS; // just to pick something
}
return stack()[--_stack_top];
}
if ( _stack_top >= _max_stack) {
verify_error("stack overflow");
return;
}
}
if (_monitor_top == 0) {
// We have detected a pop of an empty monitor stack.
_monitor_safe = false;
if (TraceMonitorMismatch) {
report_monitor_mismatch("monitor stack underflow");
}
}
return monitors()[--_monitor_top];
}
if (_monitor_top >= _max_monitors) {
// Some monitorenter is being executed more than once.
// This means that the monitor stack cannot be simulated.
_monitor_safe = false;
if (TraceMonitorMismatch) {
report_monitor_mismatch("monitor stack overflow");
}
return;
}
}
//
// Interpretation handling methods
//
void GenerateOopMap::do_interpretation()
{
// "i" is just for debugging, so we can detect cases where this loop is
// iterated more than once.
int i = 0;
do {
#ifndef PRODUCT
if (TraceNewOopMapGeneration) {
}
#endif
_conflict = false;
_monitor_safe = true;
// init_state is now called from init_basic_blocks. The length of a
// state vector cannot be determined until we have made a pass through
// the bytecodes counting the possible monitor entries.
if (!_got_error) init_basic_blocks();
if (!_got_error) setup_method_entry_state();
if (!_got_error) interp_all();
if (!_got_error) rewrite_refval_conflicts();
i++;
} while (_conflict && !_got_error);
}
void GenerateOopMap::init_basic_blocks() {
// Note: Could consider reserving only the needed space for each BB's state
// (entry stack may not be of maximal height for every basic block).
// But cumbersome since we don't know the stack heights yet. (Nor the
// monitor stack heights...)
// Make a pass through the bytecodes. Count the number of monitorenters.
// This can be used an upper bound on the monitor stack depth in programs
// which obey stack discipline with their monitor usage. Initialize the
// known information about basic blocks.
int bbNo = 0;
int monitor_count = 0;
int prev_bci = -1;
}
if (is_bb_header(bci)) {
// Initialize the basicblock structure
bb->set_changed(false);
if (bbNo > 0) {
}
bbNo++;
}
// Remember prevous bci.
}
// Set
// Check that the correct number of basicblocks was found
verify_error("jump into the middle of instruction?");
return;
} else {
verify_error("extra basic blocks - should not happen?");
return;
}
}
// Now that we have a bound on the depth of the monitor stack, we can
// initialize the CellTypeState-related information.
init_state();
// We allocate space for all state-vectors for all basicblocks in one huge chuck.
// Then in the next part of the code, we set a pointer in each _basic_block that
// points to each piece.
// Make a pass over the basicblocks and assign their state vectors.
#ifdef ASSERT
}
#endif
}
#ifdef ASSERT
}
#endif
// Mark all alive blocks
}
void GenerateOopMap::setup_method_entry_state() {
// Initialize all locals to 'uninit' and set stack-height to 0
// Initialize CellState type of arguments
// If some references must be pre-assigned to null, then set that up
// This is the start state
}
// The instruction at bci is changing size by "delta". Update the basic blocks.
int new_method_size) {
"new method size is too small");
for(int k = 0; k < _bb_count; k++) {
}
}
}
//
// Initvars handling
//
void GenerateOopMap::initialize_vars() {
for (int k = 0; k < _init_vars->length(); k++)
}
// Is it already in the set?
return;
}
//
// Interpreration code
//
void GenerateOopMap::interp_all() {
bool change = true;
while (change && !_got_error) {
change = false;
for (int i = 0; i < _bb_count && !_got_error; i++) {
if (_got_error) return;
change = true;
bb->set_changed(false);
}
}
}
}
// We do not want to do anything in case the basic-block has not been initialized. This
// will happen in the case where there is dead-code hang around in a method.
// Set iterator interval to be the current basicblock
// Iterates through all bytecodes except the last in a basic block.
// We handle the last one special, since there is controlflow change.
if (_has_exceptions || _monitor_top != 0) {
// We do not need to interpret the results of exceptional
// continuation from this instruction when the method has no
// exception handlers and the monitor stack is currently
// empty.
}
}
// Handle last instruction.
if (!_got_error) {
if (_has_exceptions || _monitor_top != 0) {
}
if (_got_error) return;
// Automatically handles 'wide' ret indicies
} else if (fall_through) {
// Hit end of BB, but the instr. was a fall-through instruction,
// so perform transition as if the BB ended in a "jump".
verify_error("bytecodes fell through last instruction");
return;
}
}
}
}
// Only check exception edge, if bytecode can trap
// These bytecodes can trap for rewriting. We need to assume that
// they do not throw exceptions to make the monitor analysis work.
return;
// If the monitor stack height is not zero when we leave the method,
// then we are either exiting with a non-empty stack or we have
// found monitor trouble earlier in our analysis. In either case,
// assume an exception could be taken here.
if (_monitor_top == 0) {
return;
}
break;
case Bytecodes::_monitorexit:
// If the monitor stack height is bad_monitors, then we have detected a
// monitor matching problem earlier in the analysis. If the
// monitor stack height is 0, we are about to pop a monitor
// off of an empty stack. In either case, the bytecode
// could throw an exception.
return;
}
break;
}
if (_has_exceptions) {
int cOpStackTop = _stack_top;
// Exception stacks are always the same.
// We remembered the size and first element of "cOpStck"
// above; now we temporarily set them to the appropriate
// values for an exception handler. */
_stack_top = 1;
// Now undo the temporary change.
// If this is a "catch all" handler, then we do not need to
// consider any additional handlers.
if (catch_type == 0) {
return;
}
}
}
}
// It is possible that none of the exception handlers would have caught
// the exception. In this case, we will exit the method. We must
// ensure that the monitor stack is empty in this case.
if (_monitor_top == 0) {
return;
}
// We pessimistically assume that this exception can escape the
// method. (It is possible that it will always be caught, but
// we don't care to analyse the types of the catch clauses.)
// We don't set _monitor_top to bad_monitors because there are no successors
// to this exceptional exit.
if (TraceMonitorMismatch && _monitor_safe) {
// We check _monitor_safe so that we only report the first mismatched
// exceptional exit.
report_monitor_mismatch("non-empty monitor stack at exceptional exit");
}
_monitor_safe = false;
}
#ifndef PRODUCT
#endif
}
for (int i = 0; i < num; i++) {
}
}
// Print the state values at the current bytecode.
bool detailed) {
if (detailed) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface:
}
if (_monitor_top != bad_monitors) {
} else {
}
} else {
if (_monitor_top != bad_monitors) {
os->print(" monitors = '%s' \t%s", state_vec_to_string(monitors(), _monitor_top), Bytecodes::name(currentBC->code()));
} else {
}
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface:
}
}
}
// Sets the current state to be the state after executing the
// current instruction, starting in the current state.
if (TraceNewOopMapGeneration) {
}
// Should we report the results? Result is reported *before* the instruction at the current bci is executed.
// However, not for calls. For calls we do not want to include the arguments, so we postpone the reporting until
// they have been popped (in method ppl).
if (_report_result == true) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface:
_report_result_for_send = true;
break;
default:
break;
}
}
// abstract interpretation of current opcode
break;
case Bytecodes::_aconst_null:
break;
case Bytecodes::_iconst_m1:
break;
break;
case Bytecodes::_if_icmpeq:
case Bytecodes::_if_icmpne:
case Bytecodes::_if_icmplt:
case Bytecodes::_if_icmpge:
case Bytecodes::_if_icmpgt:
break;
break;
break;
case Bytecodes::_if_acmpeq:
itr->get_index_big(),
case Bytecodes::_invokevirtual:
case Bytecodes::_arraylength:
// vlh(apple): do_exception_edge() does not get
// called if method has no exception handlers
if ((!_has_exceptions) && (_monitor_top > 0)) {
_monitor_safe = false;
}
break;
break;
// Java opcodes
default:
break;
}
}
}
}
}
}
"can only load refs. and values.");
if (out1.is_reference()) {
if (!vcts.is_reference()) {
// We were asked to push a reference, but the type of the
// variable can be something else
_conflict = true;
if (vcts.can_be_uninit()) {
// It is a ref-uninit conflict (at least). If there are other
// problems, we'll get them in the next round
} else {
// It wasn't a ref-uninit conflict. So must be a
// ref-val or ref-pc conflict. Split the variable.
}
} else {
}
// Otherwise it is a conflict, but one that verification would
// have caught if illegal. In particular, it can't be a topCTS
// resulting from mergeing two difference pcCTS's since the verifier
// would have rejected any use of such a merge.
} else {
}
loc_no++;
}
}
// pop all arguments
// put them back
while (push_ch != '\0') {
}
}
}
}
}
}
}
}
}
}
if (_stack_top >= poplen) {
_stack_top -= poplen;
} else {
verify_error("stack underflow");
}
}
// Replace all occurences of the state 'match' with the state 'replace'
// in our current state vector.
int i;
bool change = false;
for (i = len - 1; i >= 0; i--) {
}
}
if (_monitor_top > 0) {
}
}
}
}
void GenerateOopMap::do_checkcast() {
}
if (_monitor_top == bad_monitors) {
return;
}
// Bail out when we get repeated locks on an identical monitor. This case
// isn't too hard to handle and can be made to work if supporting nested
// redundant synchronized statements becomes a priority.
//
// See also "Note" in do_monitorexit(), below.
if (actual.is_lock_reference()) {
_monitor_safe = false;
if (TraceMonitorMismatch) {
report_monitor_mismatch("nested redundant lock -- bailout...");
}
return;
}
if (!actual.is_info_top()) {
}
}
if (_monitor_top == bad_monitors) {
return;
}
// The monitor we are exiting is not verifiably the one
// on the top of our monitor stack. This causes a monitor
// mismatch.
_monitor_safe = false;
// We need to mark this basic block as changed so that
// this monitorexit will be visited again. We need to
// do this to ensure that we have accounted for the
// possibility that this bytecode will throw an
// exception.
bb->set_changed(true);
if (TraceMonitorMismatch) {
report_monitor_mismatch("improper monitor pair");
}
} else {
// This code is a fix for the case where we have repeated
// locking of the same object in straightline code. We clear
// out the lock when it is popped from the monitor stack
// and replace it with an unobtrusive reference value that can
// be locked again.
//
// Note: when generateOopMap is fixed to properly handle repeated,
// nested, redundant locks on the same object, then this
// fix will need to be removed at that time.
}
}
void GenerateOopMap::do_return_monitor_check() {
if (_monitor_top > 0) {
// The monitor stack must be empty when we leave the method
// for the monitors to be properly matched.
_monitor_safe = false;
// Since there are no successors to the *return bytecode, it
// isn't necessary to set _monitor_top to bad_monitors.
if (TraceMonitorMismatch) {
report_monitor_mismatch("non-empty monitor stack at return");
}
}
}
}
}
for(int i = dims -1; i >=0; i--) {
}
}
// We actually expected ref or pc, but we only report that we expected a ref. It does not
// really matter (at least for now)
return;
}
}
// Does NOT terminate with a bottom. Returns the number of cells copied.
int idx = 0;
idx++;
}
return idx;
}
// Dig up signature for field in constant pool
// Parse signature (espcially simple for fields)
// The signature is UFT8 encoded, but the first char is always ASCII for signatures.
int i = 0;
if (is_get) {
} else {
out = epsilonCTS;
}
}
// Dig up signature for field in constant pool
// Parse method signature
// Compute return type
// Temporary hack.
}
// Compute arguments
// Pop arguments
// Report results
if (_report_result_for_send == true) {
_report_result_for_send = false;
}
// Push return address
}
// This is used to parse the signature for fields, since they are very simple...
// Object and array
return out;
}
return vCTS; // Otherwise
}
long GenerateOopMap::_total_byte_count = 0;
// This function assumes "bcs" is at a "ret" instruction and that the vars
// state is valid for that instruction. Furthermore, the ret instruction
// must be the last instruction in "bb" (we store information about the
// "ret" in "bb").
void GenerateOopMap::ret_jump_targets_do(BytecodeStream *bcs, jmpFct_t jmpFct, int varNo, int *data) {
if (!ra.is_good_address()) {
verify_error("ret returns from two jsr subroutines?");
return;
}
// Make sure a jrtRet does not set the changed bit for dead basicblock.
if (TraceNewOopMapGeneration) {
}
}
}
//
// Debug method
//
#ifdef ASSERT
#endif
_state_vec_buf[len] = 0;
return _state_vec_buf;
}
void GenerateOopMap::print_time() {
}
//
// ============ Main Entry Point ===========
//
// We have to initialize all variables here, that can be queried directly
_max_locals=0;
_init_vars = NULL;
#ifndef PRODUCT
// If we are doing a detailed trace, include the regular trace information.
TraceNewOopMapGeneration = true;
}
#endif
}
#ifndef PRODUCT
if (TimeOopMap2) {
}
if (TimeOopMap) {
}
#endif
// Initialize values
_got_error = false;
_conflict = false;
_report_result = false;
_report_result_for_send = false;
_new_var_map = NULL;
_did_rewriting = false;
_did_relocation = false;
if (TraceNewOopMapGeneration) {
if (Verbose) {
_method->print_codes();
}
}
}
// if no code - do nothing
// compiler needs info
return;
}
// Step 1: Compute all jump targets and their return value
if (!_got_error)
// Step 2: Find all basic blocks and count GC points
if (!_got_error)
// Step 3: Calculate stack maps
if (!_got_error)
// Step 4:Return results
if (!_got_error && report_results())
if (_got_error) {
}
}
// Error handling methods
// These methods create an exception for the current thread which is thrown
// at the bottom of the call stack, when it returns to compute_map(). The
// _got_error flag controls execution. NOT TODO: The VM exception propagation
// to be added as a parameter to every function and checked for every call.
// The tons of extra code it would generate didn't seem worth the change.
//
_got_error = true;
char msg_buffer[512];
// Append method name
char msg_buffer2[512];
jio_snprintf(msg_buffer2, sizeof(msg_buffer2), "%s in method %s", msg_buffer, method()->name()->as_C_string());
}
}
// We do not distinguish between different types of errors for verification
// errors. Let the verifier give a better message.
const char *msg = "Illegal class file encountered. Try running with -Xverify:all";
}
//
// Report result opcodes
//
void GenerateOopMap::report_result() {
// We now want to report the result of the parse
_report_result = true;
// Prolog code
// Mark everything changed, then do one interpretation pass.
for (int i = 0; i<_bb_count; i++) {
if (_basic_blocks[i].is_reachable()) {
_basic_blocks[i].set_changed(true);
interp_bb(&_basic_blocks[i]);
}
}
// Note: Since we are skipping dead-code when we are reporting results, then
// the no. of encountered gc-points might be fewer than the previously number
// we have counted. (dead-code is a pain - it should be removed before we get here)
// Report initvars
_report_result = false;
}
// We now want to report the result of the parse
_report_result = true;
// Find basicblock and report results
bb->set_changed(true);
}
//
// Conflict handling code
//
if (TraceOopMapRewrites) {
}
if (!_new_var_map) {
for (int k = 0; k < _max_locals; k++) _new_var_map[k] = k;
}
// Check if max. number of locals has been reached
report_error("Rewriting exceeded local variable limit");
return;
}
}
}
{
// We can get here two ways: Either a rewrite conflict was detected, or
// an uninitialize reference was detected. In the second case, we do not
// do any rewriting, we just want to recompute the reference set with the
// new information
int nof_conflicts = 0; // Used for debugging only
if ( _nof_refval_conflicts == 0 )
return;
// Check if rewrites are allowed in this parse.
if (!allow_rewrites() && !IgnoreRewrites) {
fatal("Rewriting method not allowed at this stage");
}
// This following flag is to tempoary supress rewrites. The locals that might conflict will
// all be set to contain values. This is UNSAFE - however, until the rewriting has been completely
// tested it is nice to have.
if (IgnoreRewrites) {
if (Verbose) {
for (int l = 0; l < _max_locals; l++) {
if (_new_var_map[l] != l) {
}
}
}
// That was that...
_new_var_map = NULL;
_conflict = false;
return;
}
// Tracing flag
_did_rewriting = true;
if (TraceOopMapRewrites) {
tty->print_cr("ref/value conflict for method %s - bytecodes are getting rewritten", method()->name()->as_C_string());
method()->print_codes();
}
if (!_got_error) {
for (int k = 0; k < _max_locals && !_got_error; k++) {
if (_new_var_map[k] != k) {
if (TraceOopMapRewrites) {
}
rewrite_refval_conflict(k, _new_var_map[k]);
if (_got_error) return;
}
}
}
// Adjust the number of locals
// That was that...
_new_var_map = NULL;
}
bool startOver;
do {
// Make sure that the BytecodeStream is constructed in the loop, since
// during rewriting a new method oop is going to be used, and the next time
// around we want to use that.
startOver = false;
}
} while (startOver && !_got_error);
}
/* If the current instruction is one that uses local variable "from"
in a ref way, change it to use "to". There's a subtle reason why we
renumber the ref uses and not the non-ref uses: non-ref uses may be
2 slots wide (double, long) which would necessitate keeping track of
whether we should add one or two variables to the method. If the change
affected the width of some instruction, returns "TRUE"; otherwise, returns "FALSE".
Another reason for moving ref's value is for solving (addr, ref) conflicts, which
*/
int index;
if (TraceOopMapRewrites) {
}
}
if (!stack_top_holds_ret_addr(bci)) {
if (TraceOopMapRewrites) {
}
} else {
if (TraceOopMapRewrites) {
}
}
}
return false;
}
// The argument to this method is:
// bc : Current bytecode
// bcN : either _aload or _astore
// bc0 : either _aload_0 or _astore_0
bool GenerateOopMap::rewrite_load_or_store(BytecodeStream *bcs, Bytecodes::Code bcN, Bytecodes::Code bc0, unsigned int varNo) {
int newIlen;
if (ilen == 4) {
// Original instruction was wide; keep it wide for simplicity
newIlen = 4;
} else if (varNo < 4)
newIlen = 1;
else if (varNo >= 256)
newIlen = 4;
else
newIlen = 2;
// If we need to relocate in order to patch the byte, we
// do the patching in a temp. buffer, that is passed to the reloc.
// The patching of the bytecode stream is then done by the Relocator.
// This is neccesary, since relocating the instruction at a certain bci, might
// also relocate that instruction, e.g., if a _goto before it gets widen to a _goto_w.
// Hence, we do not know which bci to patch after relocation.
// Relocation needed do patching in temp. buffer
} else {
}
// Patch either directly in methodOop or in temp. buffer
if (newIlen == 1) {
} else if (newIlen == 2) {
} else {
}
}
}
class RelocCallback : public RelocatorListener {
private:
public:
// Callback method
}
};
// Returns true if expanding was succesful. Otherwise, reports an error and
// returns false.
RelocCallback rcb(this);
if (m.is_null() || HAS_PENDING_EXCEPTION) {
report_error("could not rewrite method - exception occurred or bytecode buffer overflow");
return;
}
// Relocator returns a new method oop.
_did_relocation = true;
_method = m;
}
switch(bc) {
return true;
return true;
}
return false;
}
switch(bc) {
return true;
return true;
}
return false;
}
// Return true iff the top of the operand stack holds a return address at
// the current instruction
for(int i = 0; i < _ret_adr_tos->length(); i++) {
return true;
}
return false;
}
void GenerateOopMap::compute_ret_adr_at_TOS() {
_ret_adr_tos->clear();
for (int i = 0; i < bb_count(); i++) {
// Make sure to only check basicblocks that are reachable
if (bb->is_reachable()) {
// For each Basic block we check all instructions
// TDT: should this be is_good_address() ?
if (TraceNewOopMapGeneration) {
}
}
}
}
}
}
for(int i = 0; i < _ret_adr_tos->length(); i++) {
int v = _ret_adr_tos->at(i);
}
}
// ===================================================================
#ifndef PRODUCT
int ResolveOopMapConflicts::_nof_invocations = 0;
int ResolveOopMapConflicts::_nof_rewrites = 0;
int ResolveOopMapConflicts::_nof_relocations = 0;
#endif
#ifndef PRODUCT
// Tracking and statistics
if (PrintRewrites) {
if (did_rewriting()) {
if (did_relocation()) _nof_relocations++;
}
}
#endif
}