/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_CFGPrinter.hpp"
#include "c1/c1_Canonicalizer.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_GraphBuilder.hpp"
#include "c1/c1_InstructionPrinter.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciField.hpp"
#include "ci/ciKlass.hpp"
#include "ci/ciMemberName.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/bytecode.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/compilationPolicy.hpp"
#include "utilities/bitMap.inline.hpp"
private:
// fields used by mark_loops
// accessors
// unified bailout support
// helper functions
void set_entries(int osr_bci);
void set_leaders();
void mark_loops();
// debugging
#ifndef PRODUCT
void print();
#endif
public:
// creation
// accessors for GraphBuilder
};
// Implementation of BlockListBuilder
, _blocks(16)
, _next_block_number(0)
, _active() // size not known yet
, _visited() // size not known yet
, _next_loop_index(0)
, _loop_map() // size not known yet
{
set_leaders();
mark_loops();
#ifndef PRODUCT
if (PrintCFGToFile) {
}
#endif
}
// generate start blocks
}
if (osr_bci != -1) {
}
// generate exception entry blocks
for (int i = 0; i < n; i++) {
h->set_entry_block(entry);
}
}
assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
}
if (predecessor != NULL) {
}
}
return block;
}
}
}
// Draws edges from a block to its exception handlers
for (int i = 0; i < n; i++) {
// add each exception handler only once
}
// stop when reaching catchall
if (h->catch_type() == 0) break;
}
}
}
// start a new block after jsr-bytecode and link this block into cfg
// start a new block at the subroutine entry at mark it with special flag
}
}
// The information which bci starts a new block simplifies the analysis
// Without it, backward branches could jump to a bci where no block was created
// during bytecode iteration. This would require the creation of a new block at the
// branch target and a modification of the successor lists.
ciBytecodeStream s(method());
}
}
switch (s.cur_bc()) {
// track stores to local variables for selective creation of phi functions
// track bytecodes that affect the control flow
break;
case Bytecodes::_ifnonnull:
break;
break;
break;
break;
break;
case Bytecodes::_tableswitch: {
// set block for each case
Bytecode_tableswitch sw(&s);
for (int i = 0; i < l; i++) {
}
break;
}
case Bytecodes::_lookupswitch: {
// set block for each case
Bytecode_lookupswitch sw(&s);
int l = sw.number_of_pairs();
for (int i = 0; i < l; i++) {
}
break;
}
}
}
}
_next_loop_index = 0;
// recursively iterate the control flow graph
}
// exception edges may look like loops but don't mark them as such
// since it screws up block ordering.
return;
}
assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer");
} else {
// block already marked as loop header
assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
}
}
// reached block via backward branch
}
// return cached loop information for this block
}
in_subroutine = true;
}
// set active and visited bits before successors are processed
// recursively process all successors
}
// clear active-bit after all successors are processed
// reverse-post-order numbering of all blocks
if (loop_state != 0 || in_subroutine ) {
// block is contained at least in one loop, so phi functions are necessary
// phi functions are also necessary for all locals stored in a subroutine
}
// If the highest bit is set (i.e. when integer value is negative), the method
// has 32 or more loops. This bit is never cleared because it is used for multiple loops
if (header_loop_state >= 0) {
}
}
// cache and return loop information for this block
return loop_state;
}
#ifndef PRODUCT
return (*a)->depth_first_number() - (*b)->depth_first_number();
}
method()->print_short_name();
// better readability if blocks are sorted in processing order
tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
if (cur->number_of_sux() > 0) {
for (int j = 0; j < cur->number_of_sux(); j++) {
}
}
}
}
#endif
// A simple growable array of Values indexed by ciFields
private:
public:
FieldBuffer() {}
void kill() {
}
} else {
return NULL;
}
}
}
};
// MemoryBuffer is fairly simple model of the current state of memory.
// It partitions memory into several pieces. The first piece is
// generic memory where little is known about the owner of the memory.
// This is conceptually represented by the tuple <O, F, V> which says
// that the field F of object O has value V. This is flattened so
// that F is represented by the offset of the field and the parallel
// arrays _objects and _values are used for O and V. Loads of O.F can
// simply use V. Newly allocated objects are kept in a separate list
// along with a parallel array for each object which represents the
// current value of its fields. Stores of the default value to fields
// which have never been stored to before are eliminated since they
// are redundant. Once newly allocated objects are stored into
// another object or they are passed out of the current compile they
// are treated like generic memory.
private:
public:
MemoryBuffer() {}
if (!EliminateFieldAccess) {
return st;
}
if (index != -1) {
// newly allocated object with no other stores performed on this field
#ifndef PRODUCT
if (PrintIRDuringConstruction && Verbose) {
st->print_line();
}
#endif
return NULL;
} else {
}
} else {
}
} else {
// if we held onto field names we could alias based on names but
// we don't know what's being stored to so kill it all.
kill();
}
return st;
}
// return true if this value correspond to the default value of a field.
if (con) {
default: ShouldNotReachHere();
}
}
return false;
}
// return either the actual value of a load or the load itself
if (!EliminateFieldAccess) {
return load;
}
// can't skip load since value might get rounded as a side effect
return load;
}
if (index != -1) {
}
#ifndef PRODUCT
if (PrintIRDuringConstruction && Verbose) {
load->print_line();
}
#endif
return result;
}
}
return load;
}
// Record this newly allocated object
} else {
}
}
if (index != -1) {
// stored a newly allocated object into another object.
// Assume we've lost track of it as separate slice of memory.
// We could do better by keeping track of whether individual
// fields could alias each other.
// pull out the field info and store it at the end up the list
// of field info list to be reused later.
}
}
void kill() {
_newobjects.trunc_to(0);
}
};
// Implementation of GraphBuilder's ScopeData
, _bci2block(NULL)
, _has_handler(false)
, _work_list(NULL)
, _parsing_jsr(false)
, _caller_stack_size(-1)
, _num_returns(0)
{
_max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
} else {
}
if (_max_inline_size < MaxTrivialSize) {
}
}
if (UseLocalValueNumbering) {
}
}
if (parsing_jsr()) {
// It is necessary to clone all blocks associated with a
// subroutine, including those for exception handlers in the scope
// of the method containing the jsr (because those exception
// handlers may contain ret instructions in some cases).
#ifndef PRODUCT
if (PrintInitialBlockList) {
}
#endif
// copy data from cloned blocked
if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
// Preserve certain flags for assertion checking
if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag);
// copy was_visited_flag to allow early detection of bailouts
// if a block that is used in a jsr has already been visited before,
// it is shared between the normal control flow and a subroutine
// BlockBegin::try_merge returns false when the flag is set, this leads
// to a compilation bailout
}
return block;
} else {
}
}
if (_jsr_xhandlers == NULL) {
}
return _jsr_xhandlers;
}
bool parent_has_handler = false;
}
}
}
if (_work_list == NULL) {
_work_list = new BlockList();
}
// Do not start parsing the continuation block while in a
// sub-scope
if (parsing_jsr()) {
if (block == jsr_continuation()) {
return;
}
} else {
if (block == continuation()) {
return;
}
}
}
}
// sort block descending into work list
while (i >= 0) {
if (b->depth_first_number() < dfn) {
} else {
break;
}
i --;
}
}
if (is_work_list_empty()) {
return NULL;
}
return _work_list->pop();
}
}
// clone all the exception handlers from the scope
for (int i = 0; i < n; i++) {
// The XHandlers need to be adjusted to dispatch to the cloned
// handler block instead of the default one but the synthetic
// unlocker needs to be handled specially. The synthetic unlocker
// should be left alone since there can be only one and all code
// should dispatch to the same one.
}
}
if (parsing_jsr()) {
return parent()->num_returns();
}
return _num_returns;
}
if (parsing_jsr()) {
parent()->incr_num_returns();
} else {
++_num_returns;
}
}
// Implementation of GraphBuilder
BAILOUT("could not resolve a constant");
} else {
ValueType* t = illegalType;
switch (con.basic_type()) {
case T_OBJECT :
{
t = new ObjectConstant(obj);
} else {
}
break;
}
default : ShouldNotReachHere();
}
Value x;
if (patch_state != NULL) {
x = new Constant(t, patch_state);
} else {
x = new Constant(t);
}
}
}
}
}
if (parsing_jsr()) {
// We need to do additional tracking of the location of the return
// constructs. Here we are figuring out in which circumstances we
// need to bail out.
if (x->type()->is_address()) {
// Also check parent jsrs (if any) at this time to see whether
// they are using this local. We don't handle skipping over a
// ret.
BAILOUT("subroutine overwrites return address from previous subroutine");
}
}
}
}
}
if (CSEArrayLength ||
(array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
}
}
if (CSEArrayLength ||
(array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) {
}
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_checkcasts()) {
result->set_should_profile(true);
}
}
}
switch (code) {
}
break;
}
break;
}
break;
}
break;
}
break;
}
break;
}
break;
}
break;
}
break;
default:
break;
}
}
// NOTE: strictfp can be queried from current method since we don't
// inline methods with differing strictfp bits
// Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
}
}
}
// try to simplify
// Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node.
// pattern: x >>> s
// pattern: x >>> s1, with s1 constant
ShiftOp* l = x->as_ShiftOp();
// pattern: (a << b) >>> s1
// pattern: (a << s0) >>> s1
if (s0c == 0) {
// pattern: (a << 0) >>> 0 => simplify to: a
ipush(l->x());
} else {
// pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
}
return;
}
}
}
}
}
// could not simplify
}
}
}
}
int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
}
if (is_profiling()) {
compilation()->set_would_profile(true);
x->set_profiled_bci(bci());
if (profile_branches()) {
x->set_profiled_method(method());
x->set_should_profile(true);
}
}
append(x);
}
(i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
(i->as_Goto()->sux_at(0) == fsux && i->as_Goto()->is_safepoint() == fsux->bci() < stream()->cur_bci()),
"safepoint state of Goto returned by canonicalizer incorrect");
if (is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
// At level 2 we need the proper bci to count backedges
if (profile_branches()) {
// Successors can be rotated by the canonicalizer, check for this case.
if_node->set_should_profile(true);
if_node->set_swapped(true);
}
}
return;
}
// Check if this If was reduced to Goto.
compilation()->set_would_profile(true);
if (profile_branches()) {
goto_node->set_should_profile(true);
// Find out which successor is used.
} else {
}
}
return;
}
}
}
}
}
}
// We only handle well-formed jsrs (those which are "block-structured").
// If the bytecodes are strange (jumping out of a jsr block) then we
// might end up trying to re-parse a block containing a jsr which
// has already been activated. Watch for this case and bail out.
}
}
if (!try_inline_jsr(dest)) {
return; // bailed out while parsing and inlining subroutine
}
}
}
// Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
}
if (CanonicalizeNodes && l == 1) {
// total of 2 successors => use If instead of switch
// Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node.
} else {
// collect successors
int i;
bool has_bb = false;
for (i = 0; i < l; i++) {
}
// add default successor
#ifdef ASSERT
for (i = 0; i < l; i++) {
assert(res->as_Goto()->is_safepoint() == sw.dest_offset_at(i) < 0, "safepoint state of Goto returned by canonicalizer incorrect");
}
}
}
#endif
}
}
void GraphBuilder::lookup_switch() {
const int l = sw.number_of_pairs();
if (CanonicalizeNodes && l == 1) {
// total of 2 successors => use If instead of switch
// Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node.
// simplify to If
} else {
// collect successors & keys
int i;
bool has_bb = false;
for (i = 0; i < l; i++) {
}
// add default successor
#ifdef ASSERT
for (i = 0; i < l; i++) {
assert(res->as_Goto()->is_safepoint() == sw.pair_at(i).offset() < 0, "safepoint state of Goto returned by canonicalizer incorrect");
}
}
}
#endif
}
}
void GraphBuilder::call_register_finalizer() {
// If the receiver requires finalization then emit code to perform
// the registration on return.
// Gather some type information about the receiver
if (exact_type == NULL &&
exact_type = ik;
// test class is leaf class
exact_type = ik;
} else {
declared_type = ik;
}
}
// see if we know statically that registration isn't required
bool needs_check = true;
if (exact_type != NULL) {
} else if (declared_type != NULL) {
needs_check = false;
}
}
if (needs_check) {
// Perform the registration of finalizable objects.
load_local(objectType, 0);
true, state_before, true));
}
}
if (RegisterFinalizersAtInit &&
}
bool need_mem_bar = false;
scope()->wrote_final()) {
need_mem_bar = true;
}
// Check to see whether we are inlining. If so, Return
// instructions become Gotos to the continuation point.
if (continuation() != NULL) {
assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
// Report exit from inline methods
append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
}
// If the inlined method is synchronized, the monitor must be
// released before we jump to the continuation block.
if (method()->is_synchronized()) {
}
if (need_mem_bar) {
}
// State at end of inlined method is the state of the caller
// without the method parameters on stack, including the
// return value, if any, of the inlined method on operand stack.
if (x != NULL) {
}
// See whether this is the first return; if so, store off some
// of the state for later examination
if (num_returns() == 0) {
}
// The current bci() is in the wrong scope, so use the bci() of
// the continuation point.
return;
}
state()->truncate_stack(0);
if (method()->is_synchronized()) {
// perform the unlocking before exiting the method
} else {
}
}
if (need_mem_bar) {
}
}
bool will_link;
// call will_link again to determine if the field is valid.
// save state before instruction for debug info when
// deoptimization happens during patching
}
if (state_before != NULL) {
// build a patching constant
} else {
}
}
scope()->set_wrote_final();
}
switch (code) {
case Bytecodes::_getstatic: {
// check for compile-time constants, i.e., initialized static final fields
switch (field_type) {
case T_ARRAY:
case T_OBJECT:
}
break;
default:
}
}
} else {
if (state_before == NULL) {
}
}
break;
}
case Bytecodes::_putstatic:
if (state_before == NULL) {
}
}
break;
// Check for compile-time constants, i.e., trusted final non-static fields.
if (!const_oop->is_null_object()) {
if (field->is_constant()) {
switch (field_type) {
case T_ARRAY:
case T_OBJECT:
}
break;
default:
}
} else {
// For CallSite objects treat the target field as a compile time constant.
if (const_oop->is_call_site()) {
if (field->is_call_site_target()) {
// Add a dependence for invalidation of the optimization.
if (!call_site->is_constant_call_site()) {
}
}
}
}
}
}
}
} else {
if (state_before == NULL) {
}
if (replacement != load) {
} else {
}
}
break;
}
if (state_before == NULL) {
}
}
break;
}
default:
break;
}
}
return compilation()->dependency_recorder();
}
bool will_link;
// FIXME bail out for now
BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
}
// we have to make sure the argument size (incl. the receiver)
// is correct for compilation (the call would fail later during
// linkage anyway) - was bug (gri 7/28/99)
{
// Use raw to get rewritten bytecode.
const bool allow_static =
BAILOUT("will cause link error");
}
}
}
// check if CHA possible: if so, change the code to invoke_special
// Some methods are obviously bindable without any type checks so
// convert them directly to an invokespecial or invokestatic.
switch (bc_raw) {
case Bytecodes::_invokevirtual:
break;
case Bytecodes::_invokehandle:
break;
}
}
// Push appendix argument (MethodType, CallSite, etc.), if one.
if (stream()->has_appendix()) {
}
// NEEDS_CLEANUP
// I've added the target->is_loaded() test below but I don't really understand
// how klass->is_loaded() can be true and yet target->is_loaded() is false.
// this happened while running the JCK invokevirtual tests under doit. TKR
!(// %%% FIXME: Are both of these relevant?
target->is_compiled_lambda_form())) {
bool type_is_exact = false;
// try to find a precise receiver type
type_is_exact = true;
}
// Insert a dependency on this type since
// find_monomorphic_target may assume it's already done.
type_is_exact = true;
}
}
}
}
// If we have the exact receiver type we can bind directly to
// the method to call.
if (exact_target != NULL) {
}
}
if (receiver_klass != NULL &&
actual_recv->is_initialized()) {
}
(code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) {
// Use CHA on the receiver to select a more precise method.
cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv);
// if there is only one implementor of this interface then we
// may be able bind this invoke directly to the implementing
// klass but we need both a dependence on the single interface
// and on the method we bind to. Additionally since all we know
// about the receiver type is the it's supposed to implement the
// interface we have to insert a check that it's the class we
// expect. Interface types are not checked by the verifier so
// they are roughly equivalent to Object.
"just checking");
// the number of implementors for decl_interface is less or
// equal to the number of implementors for target->holder() so
// if number of implementors of target->holder() == 1 then
// number of implementors for decl_interface is 0 or 1. If
// it's 0 then no class implements decl_interface and there's
// no point in inlining.
}
}
if (singleton) {
cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
if (cha_monomorphic_target != NULL) {
// If CHA is able to bind this invoke then update the class
// to match that class, otherwise klass will refer to the
// interface.
// insert a check it's really the expected class.
// pass the result of the checkcast so that the compiler has
// more accurate type info in the inlinee
better_receiver = append_split(c);
}
}
}
}
if (cha_monomorphic_target != NULL) {
if (cha_monomorphic_target->is_abstract()) {
// Do not optimize for abstract methods
}
}
if (cha_monomorphic_target != NULL) {
if (!(target->is_final_method())) {
// If we inlined because CHA revealed only a single target method,
// then we are dependent on that target method not getting overridden
// by dynamic class loading. Be sure to test the "static" receiver
// dest_method here, as opposed to the actual receiver, which may
// falsely lead us to believe that the receiver is final or private.
}
}
// check if we could do inlining
// callee is known => check if we have static binding
// static binding => check if callee is ok
bool success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), code, better_receiver);
if (success) {
// Register dependence if JVMTI has either breakpoint
// setting or hotswapping of methods capabilities since they may
// cause deoptimization.
}
return;
}
} else {
}
} else {
}
// If we attempted an inline which did not succeed because of a
// bailout during construction of the callee graph, the entire
// compilation has to be aborted. This is fairly rare and currently
// seems to only occur for jasm-generated classes which contain
// do not have exception handlers in the containing method, and are
// therefore not caught early enough to abort the inlining without
// corrupting the graph. (We currently bail out with a non-empty
// stack at a ret in these situations.)
// inlining not successful => standard invoke
// The bytecode (code) might change in this method so we are checking this very late.
const bool has_receiver =
#ifdef SPARC
// Currently only supported on Sparc.
// The UseInlineCaches only controls dispatch to invokevirtuals for
// loaded classes which we weren't able to statically bind.
&& !target->can_be_statically_bound()) {
// Find a vtable index if one is available
}
#endif
// invokespecial always needs a NULL check. invokevirtual where
// the target is final or where it's not known that whether the
// target is final requires a NULL check. Otherwise normal
// invokevirtual will perform the null check during the lookup
// logic or the unverified entry point. Profiling of calls
// requires that the null check is performed in all cases.
}
if (is_profiling()) {
}
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_calls()) {
if (cha_monomorphic_target != NULL) {
} else if (exact_target != NULL) {
}
}
}
// push result
if (result_type != voidType) {
} else {
}
}
}
bool will_link;
}
void GraphBuilder::new_type_array() {
}
void GraphBuilder::new_object_array() {
bool will_link;
ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
apush(append_split(n));
}
return true;
} else {
// test class is leaf class
return true;
}
}
}
return false;
}
bool will_link;
ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception();
apush(append_split(c));
if (is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_checkcasts()) {
c->set_profiled_method(method());
c->set_profiled_bci(bci());
c->set_should_profile(true);
}
}
}
bool will_link;
ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
ipush(append_split(i));
if (is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_checkcasts()) {
i->set_profiled_method(method());
i->set_profiled_bci(bci());
i->set_should_profile(true);
}
}
}
// save state before locking in case of deoptimization after a NullPointerException
kill_all();
}
kill_all();
}
bool will_link;
ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling();
// fill in all dimensions
int i = dimensions;
// create array
apush(append_split(n));
}
// We require that the debug info for a Throw be the "state before"
// the Throw (i.e., exception oop is still on TOS)
// operand stack not needed after a throw
state()->truncate_stack(0);
append_with_bci(t, bci);
}
// no rounding needed if SSE2 is used
// Must currently insert rounding node for doubleword values that
// are results of expressions (i.e., not loads from memory or
// constants)
}
}
return fp_value;
}
// Canonicalizer returned an instruction which was already
// appended so simply return it.
return i1;
}
if (UseLocalValueNumbering) {
// Lookup the instruction in the ValueMap and add it to the map if
// it's not found.
// found an entry in the value map, so just return it.
return i2;
}
}
// i1 was not eliminated => append it
// set the bailout state but complete normal processing. We
// might do a little more work before noticing the bailout so we
// want processing to continue normally until it's noticed.
}
#ifndef PRODUCT
if (PrintIRDuringConstruction) {
if (Verbose) {
}
}
#endif
// save state after modification of operand stack for StateSplit instructions
if (s != NULL) {
if (EliminateFieldAccess) {
}
}
}
// set up exception handlers for this instruction if necessary
assert(i1->exception_state() != NULL || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state");
}
return i1;
}
}
}
return;
} else {
if (con) {
if (c && c->is_loaded()) {
return;
}
}
}
}
}
if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != NULL)) {
|| (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->jvmti_can_access_local_variables()),
"exception_state should be of exception kind");
return new XHandlers();
}
int scope_count = 0;
do {
assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci");
// join with all potential exception handlers
for (int i = 0; i < n; i++) {
// h is a potential exception handler => join it
compilation()->set_has_exception_handlers(true);
// It's acceptable for an exception handler to cover itself
// but we don't handle that in the parser currently. It's
// very rare so we bailout instead of trying to handle it.
}
assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond");
// previously this was a BAILOUT, but this is not necessary
// now because asynchronous exceptions are not handled this way.
assert(entry->state() == NULL || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match");
// xhandler start with an empty expression stack
if (cur_state->stack_size() != 0) {
}
}
// Note: Usually this join must work. However, very
// complicated jsr-ret structures where we don't ret from
// the subroutine can cause the objects on the monitor
// stacks to not match because blocks can be parsed twice.
// The only test case we've seen so far which exhibits this
// problem is caught by the infinite recursion test in
// GraphBuilder::jsr() if the join doesn't work.
BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets", exception_handlers);
}
// add current state for correct handling of phi functions at begin of xhandler
// add entry to the list of xhandlers of this block
// add back-edge from xhandler entry to this block
}
// clone XHandler because phi_operand and scope_count can not be shared
// fill in exception handler subgraph lazily
// stop when reaching catchall
if (h->catch_type() == 0) {
return exception_handlers;
}
}
}
if (exception_handlers->length() == 0) {
// This scope and all callees do not handle exceptions, so the local
// variables of this scope are not needed. However, the scope itself is
// required for a correct exception stack trace -> clear out the locals.
} else {
}
if (prev_state != NULL) {
}
}
}
// Set up iteration for next time.
// If parsing a jsr, do not grab exception handlers from the
// parent scopes for this method (already got them, and they
// needed to be cloned)
while (cur_scope_data->parsing_jsr()) {
}
assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler");
scope_count++;
} while (cur_scope_data != NULL);
return exception_handlers;
}
// Helper class for simplifying Phis.
class PhiSimplifier : public BlockClosure {
private:
bool _has_substitutions;
public:
start->iterate_preorder(this);
if (_has_substitutions) {
}
}
void block_do(BlockBegin* b);
bool has_substitutions() const { return _has_substitutions; }
};
// no phi function
return v;
} else if (v->has_subst()) {
// already substituted; subst can be phi itself -> simplify
// already tried to simplify phi before
return phi;
// break cycles in phi functions
return phi;
// illegal phi functions are ignored anyway
return phi;
} else {
// mark phi function as processed to break cycles in phi functions
// simplify x = [y, x] and x = [y, y] to y
for (int i = 0; i < opd_count; i++) {
// if one operand is illegal, the entire phi function is illegal
phi->make_illegal();
return phi;
}
} else {
// no simplification possible
return phi;
}
}
}
// sucessfully simplified phi function
_has_substitutions = true;
#ifndef PRODUCT
if (PrintPhiFunctions) {
tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)", phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id());
}
#endif
return subst;
}
}
for_each_phi_fun(b, phi,
);
#ifdef ASSERT
for_each_phi_fun(b, phi,
);
);
#endif
}
// This method is called after all blocks are filled with HIR instructions
// It eliminates all Phi functions of the form x = [y, y] and x = [y, x]
}
// setup iteration
kill_all();
}
#ifndef PRODUCT
if (PrintIRDuringConstruction) {
ip.print_head();
}
#endif
_skip_block = false;
ciBytecodeStream s(method());
s.reset_to_bci(bci);
scope_data()->set_stream(&s);
// iterate
bool push_exception = false;
// first thing in the exception entry block should be the exception object.
push_exception = true;
}
// Check for active jsr during OSR compilation
if (compilation()->is_osr_compile()
&& scope()->is_top_scope()
&& parsing_jsr()
bailout("OSR not supported while a jsr is active");
}
if (push_exception) {
push_exception = false;
}
// handle bytecode
switch (code) {
case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break;
case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break;
case Bytecodes::_arraylength : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; }
default : ShouldNotReachHere(); break;
}
// save current bci to setup Goto at the end
}
// stop processing of this block (see try_inline_full)
if (_skip_block) {
_skip_block = false;
return _last->as_BlockEnd();
}
// if there are any, check if last instruction is a BlockEnd instruction
// all blocks must end with a BlockEnd instruction => add a Goto
}
assert(end->as_Return() == NULL || end->as_Throw() == NULL || end->state()->stack_size() == 0, "stack not needed for return and throw");
// connect to begin & set state
// NOTE that inlining may have changed the block we are parsing
// propagate state
// be careful, bailout if bytecodes are strange
}
// done
return end;
}
do {
if (start_in_current_block_for_inlining && !bailed_out()) {
start_in_current_block_for_inlining = false;
} else {
BlockBegin* b;
// we're about to parse the osr entry block, so make sure
// we setup the OSR edge leading into this block so that
// Phis get setup correctly.
// this is no longer the osr entry block, so clear it.
}
connect_to_end(b);
}
}
}
}
// the following bytecodes are assumed to potentially
// throw exceptions in compiled code - note that e.g.
// monitorexit & the return bytecodes do not throw
// exceptions since monitor pairing proved that they
// succeed (if monitor pairing succeeded)
};
// inititialize trap tables
for (int i = 0; i < Bytecodes::number_of_java_codes; i++) {
_can_trap[i] = false;
}
// set standard trap info
_can_trap[can_trap_list[j]] = true;
}
}
// create header block
h->set_depth_first_number(0);
Value l = h;
h->set_end(g);
h->set(f);
// setup header block end state
ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis)
g->set_state(s);
return h;
}
BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) {
// This code eliminates the empty start block at the beginning of
// each method. Previously, each method started with the
// start-block created below, and this block was followed by the
// header block that was always empty. This header block is only
// necesary if std_entry is also a backward branch target because
// then phi functions may be necessary in the header block. It's
// also necessary when profiling so that there's a single block that
// can increment the interpreter_invocation_count.
} else {
}
// setup start block (root for the IR graph)
new Base(
);
// create & setup state for start block
// setup states for header blocks
}
return start;
}
ciBytecodeStream s(method());
s.reset_to_bci(osr_bci);
s.next();
scope_data()->set_stream(&s);
// create a new block to be the osr setup code
// the osr entry has no values for locals
kill_all();
_block = _osr_entry;
_last = _osr_entry;
e->set_needs_null_check(false);
// OSR buffer is
//
// locals[nlocals-1..0]
// monitors[number_of_locks-1..0]
//
// locals is a direct copy of the interpreter frame so in the osr buffer
// so first slot in the local array is the last local from the interpreter
// and last slot is local[0] (receiver) from the interpreter
//
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
// in the interpreter frame (the method lock if a sync method)
// Initialize monitors in the compiled activation.
int index;
// find all the locals that the interpreter thinks contain live oops
// compute the offset into the locals so that we can treat the buffer
// as if the locals were still in the interpreter frame
// The interpreter thinks this local is dead but the compiler
// doesn't so pretend that the interpreter passed in null.
} else {
0,
true /*unaligned*/, true /*wide*/));
}
}
// the storage for the OSR buffer is freed manually in the LIRGenerator.
state->clear_locals();
append(g);
_osr_entry->set_end(g);
}
// Set up locals for receiver
int idx = 0;
// we should always see the receiver
idx = 1;
}
// Set up locals for incoming arguments
// don't allow T_ARRAY to propagate into locals types
}
// lock synchronized method
if (method()->is_synchronized()) {
}
return state;
}
: _scope_data(NULL)
, _instruction_count(0)
, _osr_entry(NULL)
, _memory(new MemoryBuffer())
{
// determine entry points and bci2block mapping
// setup state for std entry
// complete graph
{
// Compiles where the root method is an intrinsic need a special
// compilation environment because the bytecodes for the method
// shouldn't be parsed during the compilation, only the special
// Intrinsic node should be emitted. If this isn't done the the
// code for the inlined version will be different than the root
// compiled version which could lead to monotonicity problems on
// intel.
// Set up a stream so that appending instructions works properly.
s.reset_to_bci(0);
scope_data()->set_stream(&s);
s.next();
// setup the initial block state
_last = start_block;
load_local(doubleType, 0);
}
// Emit the intrinsic node.
method_return(dpop());
// connect the begin and end blocks and we're all done.
break;
}
case vmIntrinsics::_Reference_get:
{
{
// With java.lang.ref.reference.get() we must go through the
// intrinsic - when G1 is enabled - even when get() is the root
// method of the compile so that, if necessary, the value in
// the referent field of the reference object gets recorded by
// the pre-barrier code.
// Specifically, if G1 is enabled, the value in the referent
// field is recorded by the G1 SATB pre barrier. This will
// result in the referent being marked live and the reference
// object removed from the list of discovered references during
// reference processing.
// Also we need intrinsic to prevent commoning reads from this field
// across safepoint since GC can change its value.
// Set up a stream so that appending instructions works properly.
s.reset_to_bci(0);
scope_data()->set_stream(&s);
s.next();
// setup the initial block state
_last = start_block;
load_local(objectType, 0);
// Emit the intrinsic node.
method_return(apop());
// connect the begin and end blocks and we're all done.
break;
}
// Otherwise, fall thru
}
default:
break;
}
// for osr compile, bailout if some requirements are not fulfilled
if (osr_bci != -1) {
assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile");
// check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points
BAILOUT("stack not empty at OSR entry point");
}
}
#ifndef PRODUCT
#endif
}
return copy_state_before_with_bci(bci());
}
return copy_state_exhandling_with_bci(bci());
}
return copy_state_for_exception_with_bci(bci());
}
}
if (!has_handler()) return NULL;
}
if (s == NULL) {
} else {
}
}
return s;
}
int recur_level = 0;
if (s->method() == cur_callee) {
++recur_level;
}
}
return recur_level;
}
bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) {
// clear out any existing inline bailout condition
// exclude methods we don't want to inline
return false;
}
// method handle invokes
if (callee->is_method_handle_intrinsic()) {
return try_method_handle_inline(callee);
}
// handle intrinsics
if (try_inline_intrinsics(callee)) {
return true;
}
// try normal inlining
}
// certain methods cannot be parsed at all
return false;
}
// If bytecode not set use the current one.
}
return true;
// Entire compilation could fail during try_inline_full call.
// In that case printing inlining decision info is useless.
if (!bailed_out())
return false;
}
// Certain methods cannot be parsed at all:
return NULL;
}
// negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg
return NULL;
}
if (callee->is_synchronized()) {
// We don't currently support any synchronized intrinsics
return false;
}
// callee seems like a good candidate
// determine id
// InlineNatives does not control Reference.get
INLINE_BAILOUT("intrinsic method inlining disabled");
}
bool preserves_state = false;
bool cantrap = true;
switch (id) {
case vmIntrinsics::_arraycopy:
if (!InlineArrayCopy) return false;
break;
#ifdef TRACE_HAVE_INTRINSICS
case vmIntrinsics::_classID:
case vmIntrinsics::_threadID:
preserves_state = true;
cantrap = true;
break;
case vmIntrinsics::_counterTime:
preserves_state = true;
cantrap = false;
break;
#endif
case vmIntrinsics::_currentTimeMillis:
case vmIntrinsics::_nanoTime:
preserves_state = true;
cantrap = false;
break;
case vmIntrinsics::_floatToRawIntBits :
case vmIntrinsics::_intBitsToFloat :
case vmIntrinsics::_doubleToRawLongBits :
case vmIntrinsics::_longBitsToDouble :
if (!InlineMathNatives) return false;
preserves_state = true;
cantrap = false;
break;
case vmIntrinsics::_getClass :
case vmIntrinsics::_isInstance :
if (!InlineClassNatives) return false;
preserves_state = true;
break;
case vmIntrinsics::_currentThread :
if (!InlineThreadNatives) return false;
preserves_state = true;
cantrap = false;
break;
if (!InlineMathNatives) return false;
cantrap = false;
preserves_state = true;
break;
// Use special nodes for Unsafe instructions so we can more easily
// perform an address-mode optimization on the raw variants
case vmIntrinsics::_checkIndex :
if (!InlineNIOCheckIndex) return false;
preserves_state = true;
break;
case vmIntrinsics::_compareAndSwapLong:
if (!VM_Version::supports_cx8()) return false;
// fall through
case vmIntrinsics::_compareAndSwapInt:
return true;
case vmIntrinsics::_getAndAddInt:
if (!VM_Version::supports_atomic_getadd4()) {
return false;
}
return append_unsafe_get_and_set_obj(callee, true);
case vmIntrinsics::_getAndAddLong:
if (!VM_Version::supports_atomic_getadd8()) {
return false;
}
return append_unsafe_get_and_set_obj(callee, true);
case vmIntrinsics::_getAndSetInt:
if (!VM_Version::supports_atomic_getset4()) {
return false;
}
return append_unsafe_get_and_set_obj(callee, false);
case vmIntrinsics::_getAndSetLong:
if (!VM_Version::supports_atomic_getset8()) {
return false;
}
return append_unsafe_get_and_set_obj(callee, false);
case vmIntrinsics::_getAndSetObject:
#ifdef _LP64
return false;
}
return false;
}
#else
if (!VM_Version::supports_atomic_getset4()) {
return false;
}
#endif
return append_unsafe_get_and_set_obj(callee, false);
case vmIntrinsics::_Reference_get:
// Use the intrinsic version of Reference.get() so that the value in
// the referent field can be registered by the G1 pre-barrier code.
// Also to prevent commoning reads from this field across safepoint
// since GC can change its value.
preserves_state = true;
break;
default : return false; // do not inline
}
// create intrinsic node
if (is_profiling()) {
// Don't profile in the special case where the root method
// is the intrinsic
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_calls()) {
if (has_receiver) {
}
}
}
}
// append instruction & push result
// done
return true;
}
// Introduce a new callee continuation point - all Ret instructions
// will be replaced with Gotos to this point.
// Note: can not assign state to continuation yet, as we have to
// pick up the state from the Ret instructions.
// Push callee scope
// Temporarily set up bytecode stream so we can append instructions
// (only using the bci of this stream)
// Must copy state to avoid wrong sharing when parsing bytecodes
// Clear out bytecode stream
// Ready to resume parsing in subroutine
// If we bailed out during parsing, return immediately (this is bad news)
CHECK_BAILOUT_(false);
// Detect whether the continuation can actually be reached. If not,
// it has not had state set by the join() operations in
// iterate_bytecodes_for_block()/ret() and we should not touch the
// iteration state. The calling activation of
// iterate_bytecodes_for_block will then complete normally.
// add continuation to work list instead of parsing it immediately
}
}
"continuation can only be visited in case of backward branches");
// continuation is in work list, so end iteration of current block
_skip_block = true;
return true;
}
// Inline the entry of a synchronized method as a monitor enter and
// register the exception handler which releases the monitor if an
// exception is thrown within the callee. Note that the monitor enter
// cannot throw an exception itself, because the receiver is
// guaranteed to be non-null by the explicit null check at the
// beginning of inlining.
_last->set_needs_null_check(false);
ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
scope_data()->set_has_handler();
}
// If an exception is thrown and not handled within an inlined
// synchronized method, the monitor must be released before the
// exception is rethrown in the outer scope. Generate the appropriate
// instructions here.
// Report exit from inline methods. We don't have a stream here
// so pass an explicit bci of SynchronizationEntryBCI.
append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci);
}
if (lock) {
assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing");
}
// exit the monitor in the context of the synchronized method
// exit the context of the synchronized method
if (!default_handler) {
pop_scope();
}
}
// perform the throw as if at the the call site
_block = orig_block;
_state = orig_state;
}
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecodes::Code bc, Value receiver) {
INLINE_BAILOUT("inlining prohibited by policy");
}
// first perform tests of things it's not possible to inline
if (callee->has_exception_handlers() &&
if (callee->is_synchronized() &&
// Proper inlining of methods with jsrs requires a little more work.
// When SSE2 is used on intel, then no special handling is needed
// for strictfp because the enum-constant is fixed at compile time,
// the check for UseSSE2 is needed here
if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) {
INLINE_BAILOUT("caller and callee have different strict fp requirements");
}
INLINE_BAILOUT("mdo allocation failed");
}
// now perform tests that are based on flag settings
if (callee->force_inline()) {
} else if (callee->should_inline()) {
} else {
// use heuristic controls on inlining
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
// don't inline throwable methods unless the inlining tree is rooted in a throwable class
// Throwable constructor call
}
INLINE_BAILOUT("don't inline Throwable constructors");
}
}
INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
}
// printing
}
// NOTE: Bailouts from this point on, which occur at the
// GraphBuilder level, do not cause bailout just of the inlining but
// in fact of the entire compilation.
// Insert null check if necessary
if (has_receiver) {
// note: null check must happen even if first instruction of callee does
// an implicit null check since the callee is in a different scope
// and we must make sure exception handling does the right thing
}
if (is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
// this may be redundant here...
compilation()->set_would_profile(true);
if (profile_calls()) {
}
}
// Introduce a new callee continuation point - if the callee has
// more than one return instruction or the return does not allow
// fall-through of control flow, all return instructions of the
// callee will need to be replaced by Goto's pointing to this
// continuation point.
bool continuation_existed = true;
// low number so that continuation gets parsed as early as possible
#ifndef PRODUCT
if (PrintInitialBlockList) {
}
#endif
continuation_existed = false;
}
// Record number of predecessors of continuation block before
// inlining, to detect if inlined method has edges to its
// continuation after inlining.
// Push callee scope
// the BlockListBuilder for the callee could have bailed out
if (bailed_out())
return false;
// Temporarily set up bytecode stream so we can append instructions
// (only using the bci of this stream)
// Pass parameters into callee state: add assignments
// note: this will also ensure that all arguments are computed before being passed
}
// Remove args from stack.
// Note that we preserve locals state in case we can use it later
// (see use of pop_scope() below)
// Inline the locking of the receiver if the callee is synchronized
if (callee->is_synchronized()) {
lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror())))
}
append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
}
if (profile_inlined_calls()) {
}
if (callee_start_block != NULL) {
// The state for this goto is in the scope of the callee, so use
// the entry bci for the callee instead of the call site bci.
}
// Clear out bytecode stream
// Ready to resume parsing in callee (either in the same block we
// were in before or in the callee's start block)
// If we bailed out during parsing, return immediately (this is bad news)
if (bailed_out())
return false;
// iterate_all_blocks theoretically traverses in random order; in
// practice, we have only traversed the continuation if we are
// inlining into a subroutine
"continuation should not have been parsed yet if we created it");
// At this point we are almost ready to return and resume parsing of
// the caller back in the GraphBuilder. The only thing we want to do
// first is an optimization: during parsing of the callee we
// generated at least one Goto to the continuation block. If we
// generated exactly one, and if the inlined method spanned exactly
// one block (and we didn't have to Goto its entry), then we snip
// off the Goto to the continuation, allowing control to fall
// through back into the caller block and effectively performing
// block merging. This allows load elimination and CSE to take place
// across multiple callee scopes if they are relatively simple, and
// is currently essential to making inlining profitable.
if (num_returns() == 1
&& block() == orig_block
&& block() == inline_cleanup_block()) {
// Inlining caused that the instructions after the invoke in the
// caller are not reachable any more. So skip filling this block
// with instructions!
_skip_block = true;
} else {
// Resume parsing in continuation block unless it was already parsed.
// Note that if we don't change _last here, iteration in
// iterate_bytecodes_for_block will stop when we return.
// add continuation to work list instead of parsing it immediately
_skip_block = true;
}
}
// Fill the exception handler for synchronized methods with instructions
} else {
pop_scope();
}
return true;
}
switch (iid) {
case vmIntrinsics::_invokeBasic:
{
// get MethodHandle receiver
if (type->is_constant()) {
// We don't do CHA here so only inline static and statically bindable methods.
return true;
}
} else {
}
} else {
}
}
break;
case vmIntrinsics::_linkToVirtual:
case vmIntrinsics::_linkToStatic:
case vmIntrinsics::_linkToSpecial:
case vmIntrinsics::_linkToInterface:
{
// pop MemberName argument
if (type->is_constant()) {
// If the target is another method handle invoke try recursivly to get
// a better target.
if (target->is_method_handle_intrinsic()) {
if (try_method_handle_inline(target)) {
return true;
}
} else {
// Cast receiver to its type.
append(c);
}
}
// Cast reference arguments to its type.
if (t->is_klass()) {
append(c);
}
}
j += t->size(); // long and double take two slots
}
// We don't do CHA here so only inline static and statically bindable methods.
return true;
}
} else {
}
}
} else {
}
}
break;
default:
break;
}
return false;
}
}
}
_scope_data = data;
}
// this scope can be inlined directly into the caller so remove
// the block at bci 0.
}
_scope_data = data;
}
data->set_parsing_jsr();
// Must clone bci2block list as we will be mutating it in order to
// properly clone all blocks in jsr region as well as exception
// handlers containing rets
_scope_data = data;
}
// accumulate minimum number of monitor slots to be reserved
}
}
if (InlineUnsafeOps) {
#ifndef _LP64
#endif
compilation()->set_has_unsafe_access(true);
}
return InlineUnsafeOps;
}
if (InlineUnsafeOps) {
#ifndef _LP64
#endif
compilation()->set_has_unsafe_access(true);
kill_all();
}
return InlineUnsafeOps;
}
if (InlineUnsafeOps) {
compilation()->set_has_unsafe_access(true);
}
return InlineUnsafeOps;
}
if (InlineUnsafeOps) {
compilation()->set_has_unsafe_access(true);
}
return InlineUnsafeOps;
}
if (InlineUnsafeOps) {
if (is_static) {
obj_arg_index = 0;
} else {
}
#ifndef _LP64
#endif
compilation()->set_has_unsafe_access(true);
}
return InlineUnsafeOps;
}
// Pop off some args to speically handle, then push back
// Separately handle the unsafe arg. It is not needed for code
// generation, but must be null checked
#ifndef _LP64
#endif
// An unsafe CAS can alias with other field accesses, but we don't
// know which ones so mark the state as no preserved. This will
// cause CSE to invalidate memory across it.
bool preserves_state = false;
Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state);
compilation()->set_has_unsafe_access(true);
}
if (success) {
else
} else {
else
}
}
if (!PrintInlining) return;
if (success && CIPrintMethodCodes) {
callee->print_codes();
}
}
if (InlineUnsafeOps) {
#ifndef _LP64
#endif
compilation()->set_has_unsafe_access(true);
kill_all();
}
return InlineUnsafeOps;
}
#ifndef PRODUCT
}
#endif // PRODUCT
}
}