parse2.cpp revision 2273
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "ci/ciMethodData.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileLog.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/universe.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/divnode.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/matcher.hpp"
#include "opto/memnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/runtime.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/sharedRuntime.hpp"
extern int explicit_null_checks_inserted,
//---------------------------------array_load----------------------------------
if (stopped()) return; // guaranteed null or range check
}
//--------------------------------array_store----------------------------------
if (stopped()) return; // guaranteed null or range check
}
//------------------------------array_addressing-------------------------------
// Pull array and index from the stack. Compute pointer-to-element.
// Null check the array base, with correct stack contents
// Compile-time detect of null-exception?
// If we load from "AbstractClass[]" we must see "ConcreteSubClass".
}
}
}
// Check for big class initializers with all constant offsets
// feeding into a known-size array.
// See if the highest idx value is less than the lowest array bound,
// and if the idx value cannot be negative:
bool need_range_check = true;
need_range_check = false;
}
// Only fails for some -Xcomp runs
// The class is unloaded. We have to run this bytecode in the interpreter.
return top();
}
// Do the range check
if (GenerateRangeChecks && need_range_check) {
// The greatest array bound is negative, so we can conclude that we're
// compiling unreachable code, but the unsigned compare trick used below
// only works with non-negative lengths. Instead, hack "tst" to be zero so
// the uncommon_trap path will always be taken.
} else {
// Range is constant in array-oop, so we can use the original state of mem
// Test length vs index (standard trick using unsigned compare)
}
// Branch to failure if out of bounds
if (C->allow_range_check_smearing()) {
// Do not use builtin_throw, since range checks are sometimes
// made more stringent by an optimistic transformation.
// This creates "tentative" range checks at this point,
// which are not guaranteed to throw exceptions.
// See IfNode::Ideal, is_range_check, adjust_check.
NULL, "range_check");
} else {
// If we have already recompiled with the range-check-widening
// heroic optimization turned off, then we must really be throwing
// range check exceptions.
}
}
}
// Check for always knowing you are throwing a range-check exception
return ptr;
}
// returns IfNode
Node *cmp = _gvn.transform( new (C, 3) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
return iff;
}
// return Region node
return region;
}
//------------------------------helper for tableswitch-------------------------
// True branch, use existing map info
{ PreserveJVMState pjvms(this);
set_control( iftrue );
}
// False branch
set_control( iffalse );
}
// True branch, use existing map info
{ PreserveJVMState pjvms(this);
set_control( iffalse );
}
// False branch
set_control( iftrue );
}
// False branch, use existing map and control()
}
extern "C" {
static int jint_cmp(const void *i, const void *j) {
int a = *(jint *)i;
int b = *(jint *)j;
return a > b ? 1 : a < b ? -1 : 0;
}
}
// Default value for methodData switch indexing. Must be a negative value to avoid
// conflict with any legal switch index.
#define NullTableIndex -1
class SwitchRange : public StackObj {
// a range of integers coupled with a bci destination
int _dest;
int _table_index; // index into method data table
public:
int table_index() const { return _table_index; }
}
return true;
}
return false;
}
}
}
if (is_singleton())
else
}
};
//-------------------------------do_tableswitch--------------------------------
void Parse::do_tableswitch() {
// Get information about tableswitch
if (len < 1) {
// If this is a backward branch, add safepoint
return;
}
// generate decision tree, using trichotomy when possible
bool makes_backward_branch = false;
int rp = -1;
}
for (int j = 0; j < len; j++) {
}
}
}
// Safepoint in case if backward branch observed
if( makes_backward_branch && UseLoopSafepoints )
}
//------------------------------do_lookupswitch--------------------------------
void Parse::do_lookupswitch() {
// Get information about lookupswitch
return;
}
// generate decision tree, using trichotomy when possible
{
for( int j = 0; j < len; j++ ) {
}
}
bool makes_backward_branch = false;
int rp = -1;
for( int j = 0; j < len; j++ ) {
}
}
}
}
// Safepoint in case backward branch observed
if( makes_backward_branch && UseLoopSafepoints )
}
//----------------------------create_jump_tables-------------------------------
// Are jumptables enabled
if (!UseJumpTables) return false;
// Are jumptables supported
// Don't make jump table if profiling
if (method_data_update()) return false;
// Decide if a guard is needed to lop off big ranges at either (or
// both) end(s) of the input set. We'll call this the default target
// even though we can't be sure that it is the true "default".
bool needs_guard = false;
int default_dest;
int64 total_outlier_size = 0;
} else {
}
// If a guard test will eliminate very sparse end ranges, then
// it is worth the cost of an extra jump.
needs_guard = true;
}
// Find the total number of cases and ranges
// Don't create table if: too large, too small, or too sparse.
return false;
return false;
// Normalize table lookups to zero
// Generate a guard to protect against input keyvals that aren't
// in the switch domain.
if (needs_guard) {
}
// Create an ideal node JumpTable that has projections
// of all possible ranges for a switch statement
// The key_val input must be converted to a pointer offset and scaled.
// Compare Parse::array_addressing above.
#ifdef _LP64
// Clean the 32-bit int into a real 64-bit offset.
// Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
#endif
// Shift the value by wordsize so we have an index into the table, rather
// than a switch value
// Create the JumpNode
// These are the switch destinations hanging off the jumpnode
int i = 0;
{
PreserveJVMState pjvms(this);
}
}
}
stop_and_kill_map(); // no more uses for this JVMS
return true;
}
//----------------------------jump_switch_ranges-------------------------------
if (switch_depth == 0) {
// Do special processing for the top-level call.
// Decrement pred-numbers for the unique set of nodes.
#ifdef ASSERT
// Ensure that the block's successors are a (duplicate-free) set.
int successors_counted = 0; // block occurrences in [hi..lo]
for (int i = 0; i < unique_successors; i++) {
// Check that the set of successors is the same in both places.
int successors_found = 0;
}
}
#endif
// Maybe prune the inputs, based on the type of key_val.
}
}
#ifndef PRODUCT
if (switch_depth == 0) {
_max_switch_depth = 0;
}
#endif
} else {
// if there is an easy choice, pivot at a singleton:
if (mid->is_singleton()) {
// Special Case: If there are exactly three ranges, and the high
// and low range each go to the same place, omit the "gt" test,
// since it will not discriminate anything.
if (eq_test_only) {
}
// if there is a higher range, test for it and process it:
// two comparisons of same values--should enable 1 test for 2 branches
// Use BoolTest::le instead of BoolTest::gt
{ PreserveJVMState pjvms(this);
}
}
} else {
// mid is a range, not a singleton, so treat mid..hi as a unit
// if there is a higher range, test for it and process it:
} else {
{ PreserveJVMState pjvms(this);
}
}
}
// in any case, process the lower range
}
// Decrease pred_count for each successor after all is done.
if (switch_depth == 0) {
for (int i = 0; i < unique_successors; i++) {
// Throw away the pre-allocated path for each unique successor.
target->next_path_num();
}
}
#ifndef PRODUCT
SwitchRange* r;
int nsing = 0;
if( r->is_singleton() ) nsing++;
}
if (_max_switch_depth > _est_switch_depth) {
}
}
}
#endif
}
}
#ifdef ASSERT
#endif
}
}
// Must keep both values on the expression-stack during null-check
// Compile-time detect of null-exception?
if (stopped()) return;
// check for positive power of 2
if (divisor > 0 &&
// yes !
// Sigh, must handle negative dividends
// Fast positive case
// Push the merge
return;
}
}
}
// Default case
}
// Handle jsr and jsr_w bytecode
// Store information about current state, tagged with new _jsr_bci
// Update method data
// The way we do things now, there is only one successor block
// for the jsr, because the target code is cloned by ciTypeFlow.
// What got pushed?
// Effect on jsr on stack
// Flow to the jsr.
}
// Handle ret bytecode
// Find to whom we return.
#if 0 // %%%% MAKE THIS WORK
#else
#endif
}
//--------------------------dynamic_branch_prediction--------------------------
// Try to gather dynamic branch prediction behavior. Return a probability
// of the branch being taken and set the "cnt" field. Returns a -1.0
// if we need to use static prediction for some reason.
cnt = COUNT_UNKNOWN;
// Use MethodData information if it is available
// FIXME: free the ProfileData structure
// get taken and not taken values
int not_taken = 0;
if (data->is_BranchData()) {
}
// scale the counts to be commensurate with invocation counts:
// Give up if too few counts to be meaningful
C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
}
return PROB_UNKNOWN;
}
// Compute frequency that we arrive here
// Adjust, if this block is a cloned private block but the
// Jump counts are shared. Taken the private counts for
// just this path instead of the shared counts.
// Pin probability to sane limits
float prob;
if( !taken )
else if( !not_taken )
else { // Compute probability of true path
}
"Bad frequency assignment in if");
char prob_str_buf[30];
}
}
return prob;
}
//-----------------------------branch_prediction-------------------------------
int target_bci) {
// If prob is unknown, switch to static prediction
// If this is a conditional test guarding a backwards branch,
// assume its a loop-back edge. Make it a likely taken branch.
if (target_bci < bci()) {
if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
// Since it's an OSR, we probably have profile data, but since
// branch_prediction returned PROB_UNKNOWN, the counts are too small.
// Let's make a special check here for completely zero counts.
if (!methodData->is_empty()) {
// Only stop for truly zero counts, which mean an unknown part
// of the OSR-ed method, and we want to deopt to gather more stats.
// If you have ANY counts, then this loop is simply 'cold' relative
// to the OSR loop.
// This is the only way to return PROB_UNKNOWN:
return PROB_UNKNOWN;
}
}
}
}
return prob;
}
// The magic constants are chosen so as to match the output of
// branch_prediction() when the profile reports a zero taken count.
// It is important to distinguish zero counts unambiguously, because
// some branches (e.g., _213_javac.Assembler.eliminate) validly produce
// very small but nonzero probabilities, which if confused with zero
// counts would keep the program recompiling indefinitely.
}
// True if the comparison seems to be the kind that will not change its
// statistics from true to false. See comments in adjust_map_after_if.
// This question is only asked along paths which are already
// classifed as untaken (by seems_never_taken), so really,
// if a path is never taken, its controlling comparison is
// already acting in a stable fashion. If the comparison
// seems stable, we will put an expensive uncommon trap
// on the untaken path. To be conservative, and to allow
// partially executed counted loops to be compiled fully,
// we will plant uncommon traps only after pointer comparisons.
// The following switch can find CmpP here over half the time for
// dynamic language code rich with type tests.
// Code using counted loops or array manipulations (typical
// of benchmarks) will have many (>80%) CmpI instructions.
case Op_CmpP:
// These certainly should be closed off as uncommon traps.
return true;
// Let's put traps on those, too, so that we don't have to compile
// unused paths with indeterminate dynamic type information.
if (ProfileDynamicTypes)
return true;
return false;
case Op_CmpI:
// A small minority (< 10%) of CmpP are masked as CmpI,
// as if by boolean conversion ((p == q? 1: 0) != 0).
// Detect that here, even if it hasn't optimized away yet.
// Specifically, this covers the 'instanceof' operator.
if (true_path > 0 &&
// phi->region->if_proj->ifnode->bool->cmp
continue;
}
}
}
return false;
}
}
return false;
}
//-------------------------------repush_if_args--------------------------------
// Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
inline int Parse::repush_if_args() {
#ifndef PRODUCT
if (PrintOpto && WizardMode) {
}
#endif
return bc_depth;
}
//----------------------------------do_ifnull----------------------------------
float cnt;
if (prob == PROB_UNKNOWN) {
// (An earlier version of do_ifnull omitted this trap for OSR methods.)
#ifndef PRODUCT
#endif
repush_if_args(); // to gather stats on loop
// We need to mark this branch as taken so that if we recompile we will
// see that it is possible. In the tiered system the interpreter doesn't
// do profiling and by the time we get to the lower tier from the interpreter
// the path may be cold again. Make sure it doesn't look untaken
NULL, "cold");
if (EliminateAutoBox) {
// Mark the successor blocks as parsed
}
return;
}
// Generate real control flow
// Sanity check the probability value
// Need xform to put node in hash table
// True branch
{ PreserveJVMState pjvms(this);
if (stopped()) { // Path is dead?
if (EliminateAutoBox) {
// Mark the successor block as parsed
}
} else { // Path is live.
// Update method data
if (!stopped()) {
}
}
}
// False branch
if (stopped()) { // Path is dead?
if (EliminateAutoBox) {
// Mark the successor block as parsed
}
} else { // Path is live.
// Update method data
}
}
//------------------------------------do_if------------------------------------
float cnt;
if (prob == PROB_UNKNOWN) {
#ifndef PRODUCT
#endif
repush_if_args(); // to gather stats on loop
// We need to mark this branch as taken so that if we recompile we will
// see that it is possible. In the tiered system the interpreter doesn't
// do profiling and by the time we get to the lower tier from the interpreter
// the path may be cold again. Make sure it doesn't look untaken
NULL, "cold");
if (EliminateAutoBox) {
// Mark the successor blocks as parsed
}
return;
}
// Sanity check the probability value
bool taken_if_true = true;
// Convert BoolTest to canonical form:
taken_if_true = false;
// prob is NOT updated here; it remains the probability of the taken
// path (as opposed to the prob of the path guarded by an 'IfTrueNode').
}
// Refresh c from the transformed bool node, since it may be
// simpler than the original c. Also re-canonicalize btest.
// This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
// That can arise from statements like: if (x instanceof C) ...
// Canonicalize one more time since transform can change it.
// Reverse edges one more time...
}
}
}
// Generate real control flow
if (!taken_if_true) { // Finish conversion to canonical form
}
// Branch is taken:
{ PreserveJVMState pjvms(this);
if (stopped()) {
if (EliminateAutoBox) {
// Mark the successor block as parsed
}
} else {
// Update method data
if (!stopped()) {
}
}
}
// Branch not taken.
if (stopped()) {
if (EliminateAutoBox) {
// Mark the successor block as parsed
}
} else {
// Update method data
}
}
//----------------------------adjust_map_after_if------------------------------
// Adjust the JVM state to reflect the result of taking this path.
// Basically, it means inspecting the CmpNode controlling this
// branch, seeing how it constrains a tested value, and then
// deciding if it's worth our while to encode this constraint
// as graph nodes in the current abstract interpretation map.
return; // nothing to do
// If this might possibly turn into an implicit null check,
// and the null has never yet been seen, we need to generate
// an uncommon trap, so as to recompile instead of suffering
// with very slow branches. (We'll get the slow branches if
// the program ever changes phase and starts seeing nulls here.)
//
// We do not inspect for a null constant, since a node may
// optimize to 'null' later on.
//
// Null checks, and other tests which expect inequality,
// show btest == BoolTest::eq along the non-taken branch.
// On the other hand, type tests, must-be-null tests,
// and other tests which expect pointer equality,
// show btest == BoolTest::ne along the non-taken branch.
// We prune both types of branches if they look unused.
// We need to mark this branch as taken so that if we recompile we will
// see that it is possible. In the tiered system the interpreter doesn't
// do profiling and by the time we get to the lower tier from the interpreter
// the path may be cold again. Make sure it doesn't look untaken
if (is_fallthrough) {
} else {
}
NULL,
return;
}
if (!have_con) {
// Swap, so constant is in con.
have_con = true;
} else {
// Do we have two constants? Then leave well enough alone.
have_con = false;
}
}
if (!have_con) // remaining adjustments need a con
return;
if (val_in_map < 0) return; // replace_in_map would be useless
{
return; // again, it would be useless
}
// Check for a comparison to a constant, and "know" that the compared
// value is constrained on this path.
switch (btest) {
{
// Cast to null, but keep the pointer identity temporarily live.
} else {
// either +0 or -0. Just because you are equal to +0
// doesn't mean you ARE +0!
}
}
break;
}
break;
default:
// (At this point we could record int range types with CastII.)
break;
}
// Delay transform() call to allow recovery of pre-cast value
// at the control merge.
}
}
}
//------------------------------do_one_bytecode--------------------------------
// Parse this bytecode, and alter the Parsers JVM->Node mapping
void Parse::do_one_bytecode() {
Node *a, *b, *c, *d; // Handy temps
int i;
"out of nodes parsing method")) {
return;
}
#ifdef ASSERT
// for setting breakpoints
if (TraceOptoParse) {
}
#endif
switch (bc()) {
// do nothing
break;
break;
break;
break;
break;
break;
break;
break;
// If the constant is unresolved, run this BC once in the interpreter.
{
index),
break;
}
"must be java_mirror of klass");
}
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
push_pair_local( 0 );
break;
push_pair_local( 1 );
break;
push_pair_local( 2 );
break;
push_pair_local( 3 );
break;
break;
push_pair_local(0);
break;
push_pair_local(1);
break;
push_pair_local(2);
break;
push_pair_local(3);
break;
break;
break;
break;
break;
break;
break;
// long stores
set_pair_local( 0, pop_pair() );
break;
break;
break;
break;
break;
// double stores
break;
break;
break;
break;
break;
a = pop();
b = pop();
push(a);
push(b);
break;
a = pop();
push(a);
push(a);
break;
a = pop();
b = pop();
push( a );
push( b );
push( a );
break;
a = pop();
b = pop();
c = pop();
push( a );
push( c );
push( b );
push( a );
break;
a = pop();
b = pop();
push( b );
push( a );
push( b );
push( a );
break;
// before: .. c, b, a
// after: .. b, a, c, b, a
// not tested
a = pop();
b = pop();
c = pop();
push( b );
push( a );
push( c );
push( b );
push( a );
break;
// before: .. d, c, b, a
// after: .. b, a, d, c, b, a
// not tested
a = pop();
b = pop();
c = pop();
d = pop();
push( b );
push( a );
push( d );
push( c );
push( b );
push( a );
break;
case Bytecodes::_arraylength: {
// Must do null-check with value on expression stack
// Compile-time detect of null-exception?
if (stopped()) return;
a = pop();
push(load_array_length(a));
break;
}
a = array_addressing(T_LONG, 0);
if (stopped()) return; // guaranteed null or range check
break;
}
a = array_addressing(T_DOUBLE, 0);
if (stopped()) return; // guaranteed null or range check
break;
}
if (stopped()) return; // guaranteed null or range check
c = pop(); // Oop to store
b = pop(); // index (already used)
a = pop(); // the array itself
break;
}
if (stopped()) return; // guaranteed null or range check
c = pop_pair();
break;
}
if (stopped()) return; // guaranteed null or range check
c = pop_pair();
c = dstore_rounding(c);
break;
}
do_getfield();
break;
case Bytecodes::_getstatic:
do_getstatic();
break;
do_putfield();
break;
case Bytecodes::_putstatic:
do_putstatic();
break;
do_irem();
break;
// Must keep both values on the expression-stack during null-check
// Compile-time detect of null-exception?
if (stopped()) return;
b = pop();
a = pop();
break;
break;
break;
a = pop();
break;
break;
break;
break;
break;
break;
break;
break;
a = pop();
push(b);
break;
b = pop();
a = pop();
d = precision_rounding(c);
push( d );
break;
b = pop();
a = pop();
d = precision_rounding(c);
push( d );
break;
b = pop();
a = pop();
d = precision_rounding(c);
push( d );
break;
b = pop();
a = pop();
d = precision_rounding(c);
push( d );
break;
// Generate a ModF node.
b = pop();
a = pop();
d = precision_rounding(c);
push( d );
}
else {
// Generate a call.
modf();
}
break;
b = pop();
a = pop();
push(c);
break;
b = pop();
a = pop();
// Same as fcmpl but need to flip the unordered case. Swap the inputs,
// which negates the result sign except for unordered. Flip the unordered
// as well by using CmpF3 which implements unordered-lesser instead of
// unordered-greater semantics. Finally, commute the result bits. Result
// is same as using a CmpF3Greater except we did it with CmpF3 alone.
push(c);
break;
a = pop();
break;
a = pop_pair();
push( b );
break;
a = pop();
push_pair( b );
break;
a = pop_pair();
// This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
//b = _gvn.transform(new (C, 2) RoundFloatNode(0, b) );
push( b );
break;
if (Matcher::convL2FSupported()) {
a = pop_pair();
// For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
// Rather than storing the result into an FP register then pushing
// out to memory to round, the machine instruction that implements
// ConvL2D is responsible for rounding.
// c = precision_rounding(b);
push(c);
} else {
l2f();
}
break;
a = pop_pair();
// For i486.ad, rounding is always necessary (see _l2f above).
// c = dprecision_rounding(b);
push_pair(c);
break;
a = pop();
push_pair(b);
break;
a = pop_pair();
push_pair(b);
break;
b = pop_pair();
a = pop_pair();
d = dprecision_rounding(c);
push_pair( d );
break;
b = pop_pair();
a = pop_pair();
d = dprecision_rounding(c);
push_pair( d );
break;
b = pop_pair();
a = pop_pair();
d = dprecision_rounding(c);
push_pair( d );
break;
b = pop_pair();
a = pop_pair();
d = dprecision_rounding(c);
push_pair( d );
break;
a = pop_pair();
push_pair(b);
break;
// Generate a ModD node.
b = pop_pair();
a = pop_pair();
// a % b
d = dprecision_rounding(c);
push_pair( d );
}
else {
// Generate a call.
modd();
}
break;
b = pop_pair();
a = pop_pair();
push(c);
break;
b = pop_pair();
a = pop_pair();
// Same as dcmpl but need to flip the unordered case.
// Commute the inputs, which negates the result sign except for unordered.
// Flip the unordered as well by using CmpD3 which implements
// unordered-lesser instead of unordered-greater semantics.
// Finally, negate the result bits. Result is same as using a
// CmpD3Greater except we did it with CmpD3 alone.
push(c);
break;
// Note for longs -> lo word is on TOS, hi word is on TOS - 1
b = pop_pair();
a = pop_pair();
push_pair(c);
break;
b = pop_pair();
a = pop_pair();
push_pair(c);
break;
b = pop_pair();
a = pop_pair();
push_pair(c);
break;
b = pop(); // the shift count
a = pop_pair(); // value to be shifted
push_pair(c);
break;
b = pop(); // the shift count
a = pop_pair(); // value to be shifted
push_pair(c);
break;
b = pop(); // the shift count
a = pop_pair(); // value to be shifted
push_pair(c);
break;
b = pop_pair();
a = pop_pair();
push_pair(c);
break;
// Must keep both values on the expression-stack during null-check
// Compile-time detect of null-exception?
if (stopped()) return;
b = pop_pair();
a = pop_pair();
push_pair(c);
break;
// Must keep both values on the expression-stack during null-check
// Compile-time detect of null-exception?
if (stopped()) return;
b = pop_pair();
a = pop_pair();
push_pair(c);
break;
b = pop_pair();
a = pop_pair();
push_pair(c);
break;
b = pop_pair();
a = pop_pair();
push_pair(c);
break;
// Safepoints are now inserted _before_ branches. The long-compare
// bytecode painfully produces a 3-way value (-1,0,+1) which requires a
// slew of control flow. These are usually followed by a CmpI vs zero and
// a branch; this pattern then optimizes to the obvious long-compare and
// branch. However, if the branch is backwards there's a Safepoint
// inserted. The inserted Safepoint captures the JVM state at the
// pre-branch point, i.e. it captures the 3-way value. Thus if a
// long-compare is used to control a loop the debug info will force
// computation of the 3-way value, even though the generated code uses a
// long-compare and branch. We try to rectify the situation by inserting
// a SafePoint here and have it dominate and kill the safepoint added at a
// following backwards branch. At this point the JVM state merely holds 2
// longs but not the 3-way value.
if( UseLoopSafepoints ) {
// If this is a backwards branch in the bytecodes, add Safepoint
}
}
b = pop_pair();
a = pop_pair();
push(c);
break;
a = pop_pair();
push_pair(b);
break;
a = pop_pair();
break;
a = pop();
push_pair(b);
break;
// Sign extend
a = pop();
push( a );
break;
a = pop();
push( a );
break;
a = pop();
break;
a = pop();
c = precision_rounding(b);
push (b);
break;
a = pop();
push_pair(b);
break;
set_local( i, _gvn.transform( new (C, 3) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
break;
// Exit points of synchronized methods must have an unlock node
break;
return_current(pop());
break;
break;
break;
// null exception oop throws NULL pointer exception
if (stopped()) return;
// Hook the thrown exception directly to subsequent handlers.
// Keep method interpreted from now on.
return;
}
if (env()->jvmti_can_post_on_exceptions()) {
// check if we must post exception events, take uncommon trap if so (with must_throw = false)
}
// Here if either can_post_on_exceptions or should_post_on_exceptions is false
break;
// If this is a backwards branch in the bytecodes, add Safepoint
// Update method data
// Merge the current control into the target basic block
// See if we can get some profile data and hand it off to the next block
if (!methodData->is_mature()) break;
break;
}
// If this is a backwards branch in the bytecodes, add Safepoint
a = null();
b = pop();
break;
// If this is a backwards branch in the bytecodes, add Safepoint
a = pop();
b = pop();
break;
// If this is a backwards branch in the bytecodes, add Safepoint
b = pop();
break;
// If this is a backwards branch in the bytecodes, add Safepoint
a = pop();
b = pop();
break;
case Bytecodes::_tableswitch:
break;
case Bytecodes::_lookupswitch:
break;
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
do_call();
break;
case Bytecodes::_checkcast:
do_checkcast();
break;
case Bytecodes::_instanceof:
break;
case Bytecodes::_anewarray:
do_anewarray();
break;
break;
case Bytecodes::_multianewarray:
break;
do_new();
break;
do_jsr();
break;
do_ret();
break;
case Bytecodes::_monitorenter:
break;
case Bytecodes::_monitorexit:
break;
case Bytecodes::_breakpoint:
// Breakpoint set concurrently to compile
// %%% use an uncommon trap?
C->record_failure("breakpoint in method");
return;
default:
#ifndef PRODUCT
#endif
}
#ifndef PRODUCT
if(printer) {
char buffer[256];
printer->set_traverse_outs(true);
}
#endif
}