/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "ci/ciCPCache.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciMethodHandle.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "interpreter/linkResolver.hpp"
#include "opto/addnode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/cfgnode.hpp"
#include "opto/mulnode.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/sharedRuntime.hpp"
void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
if (!PrintInlining) {
}
} else {
out = C->print_inlining_stream();
}
}
}
// Dtrace currently doesn't work unless all calls are vanilla
if (env()->dtrace_method_probes()) {
allow_inline = false;
}
// Note: When we get profiling during stage-1 compiles, we want to pull
// from more specific profile data which pertains to this inlining.
// Right now, ignore the information in jvms->caller(), and do method[bci].
// See how many times this site has been invoked.
// Receivers in the profile structure are ordered by call counts
// so that the most called (major) receiver is profile.receiver(0).
}
if (receiver_count >= 0) {
}
}
}
// Special case the handling of certain common, profitable library
// methods. If these methods are replaced with specialized code,
// then we return it as the inlined version of the call.
// We do this before the strict f.p. check below because the
// intrinsics handle strict f.p. correctly.
if (allow_inline && allow_intrinsics) {
if (cg->is_predicted()) {
// Code without intrinsic but, hopefully, inlined.
}
}
return cg;
}
}
// Do method handle calls.
// NOTE: This must happen before normal inlining logic below since
// MethodHandle.invoke* are native methods which obviously don't
// have bytecodes and so normal inlining fails.
if (callee->is_method_handle_intrinsic()) {
assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator");
return cg;
}
// Do not inline strict fp into non-strict code, or the reverse
allow_inline = false;
}
// Attempt to inline...
if (allow_inline) {
// The profile data is only partly attributable to this caller,
// scale back the call site information.
// This is the number of times we expect the call code to be used.
// Try inlining a bytecoded method:
if (!call_does_dispatch) {
if (UseOldInlining) {
} else {
// Make a disembodied, stateless ILT.
// TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
// Note: ilt is for the root of this parse, not the present call site.
}
if (!UseOldInlining)
bool should_delay = false;
if (allow_inline) {
// Delay the inlining of this method to give us the
// opportunity to perform some high level optimizations
// first.
}
}
// Fall through.
} else if (require_inline || !InlineWarmCalls) {
return cg;
} else {
CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
}
}
}
// Try using the type profile.
// The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
// receiver_method = profile.method();
// Profiles do not suggest methods now. Look it up in the major receiver.
}
if (receiver_method != NULL) {
// The single majority receiver sufficiently outweighs the minority.
// Look up second receiver.
if (next_receiver_method != NULL) {
// Skip if we can't inline second receiver's method
next_hit_cg = NULL;
}
}
}
) {
// Generate uncommon trap for class check failure path
// in case of monomorphic or bimorphic virtual call site.
} else {
// Generate virtual call for class check failure path
// in case of polymorphic virtual call site.
}
if (next_hit_cg != NULL) {
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
// We don't need to record dependency on a receiver here and below.
// Whenever we inline, the dependency is added by Parse::Parse().
}
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
}
}
}
}
}
}
// There was no special inlining tactic, or it bailed out.
// Use a more generic tactic, like a simple call.
if (call_does_dispatch) {
} else {
// Class Hierarchy Analysis or Type Profile reveals a unique target,
// or it is a static or special call.
}
}
// Return true for methods that shouldn't be inlined early so that
// they are easier to analyze and optimize as intrinsics.
if (has_stringbuilder()) {
// Delay SB calls only when called from non-SB code
return false;
}
switch (call_method->intrinsic_id()) {
case vmIntrinsics::_StringBuilder_void:
case vmIntrinsics::_StringBuilder_int:
case vmIntrinsics::_StringBuffer_void:
case vmIntrinsics::_StringBuffer_int:
case vmIntrinsics::_StringBuffer_String:
case vmIntrinsics::_Integer_toString:
return true;
case vmIntrinsics::_String_String:
{
if (m != NULL &&
// Delay String.<init>(new SB())
return true;
}
return false;
}
default:
return false;
}
}
return false;
}
// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
// Additional inputs to consider...
// bc = bc()
// caller = method()
// iter().get_method_holder_index()
// Interface classes can be loaded & linked and never get around to
// being initialized. Uncommon-trap for not-initialized static or
// v-calls. Let interface calls happen.
if (!holder_klass->is_being_initialized() &&
!holder_klass->is_initialized() &&
!holder_klass->is_interface()) {
return true;
}
return false;
}
//------------------------------do_call----------------------------------------
// Handle your basic call. Inline if we can & want to, else just setup call.
// It's likely we are going to add debug info soon.
// Also, if we inline a guy who eventually needs debug info for this JVMS,
// our contribution to it is cleaned up right here.
// Set frequently used booleans
// Find target being called
bool will_link;
ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode
// uncommon-trap when callee is unloaded, uninitialized or will not link
// bailout when too many arguments for register representation
#ifndef PRODUCT
}
#endif
return;
}
//assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)
// which should be invokevirtuals but according to the VM spec may be invokeinterfaces
assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
// Note: In the absence of miranda methods, an abstract class K can perform
// an invokevirtual directly on an interface method I.m if K implements I.
// orig_callee is the resolved callee which's signature includes the
// appendix argument.
const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());
// Push appendix argument (MethodType, CallSite, etc.), if one.
if (iter().has_appendix()) {
}
// ---------------------
// Does Class Hierarchy Analysis reveal only a single target of a v-call?
// Then we may inline or make a static call, but become dependent on there being only 1 target.
// Does the call-site type profile reveal only one receiver?
// Then we may introduce a run-time check and inline on the path where it succeeds.
// The other path may uncommon_trap, check for another receiver, or do a v-call.
// Try to get the most accurate receiver type
bool call_does_dispatch = false;
if (is_virtual_or_interface) {
// call_does_dispatch and vtable_index are out-parameters. They might be changed.
}
// Note: It's OK to try to inline a virtual call.
// The call generator will not attempt to inline a polymorphic call
// unless it knows how to optimize the receiver dispatch.
// ---------------------
// ---------------------
// Decide call tactic.
// This call checks with CHA, the interpreter profile, intrinsics table, etc.
// It decides whether inlining is desirable or not.
CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor());
// NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
// ---------------------
// Round double arguments before call
#ifndef PRODUCT
// bump global counters for calls
// Record first part of parsing work for this call
parse_histogram()->record_change();
#endif // not PRODUCT
// save across call, for a subsequent cast_not_null.
// Bump method data counters (We profile *before* the call is made
// because exceptions don't return to the call site.)
// When inlining attempt fails (e.g., too many arguments),
// it may contaminate the current compile state, making it
// impossible to pull back and try again. Once we call
// cg->generate(), we are committed. If it fails, the whole
// compilation task is compromised.
if (failing()) return;
// This can happen if a library intrinsic is available, but refuses
// the call site, perhaps because it did not match a pattern the
// intrinsic was expecting to optimize. Should always be possible to
// get a normal java call that may inline in that case
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
return;
}
}
// Accumulate has_loops estimate
}
// Reset parser state from [new_]jvms, which now carries results of the call.
// Return value (if any) is already pushed on the stack by the cg.
} else {
}
if (!stopped()) {
// This was some sort of virtual call, which did a null check for us.
// Now we can assert receiver-not-null, on the normal return path.
// %%% assert(receiver == cast, "should already have cast the receiver");
}
// Round double result after a call from strict to non-strict code
// Be careful here with return types.
// It's OK for a method to return a value that is discarded.
// The discarding does not require any special action from the caller.
// The Java code knows this, at VerifyType.isNullConversion.
// Nothing. These cases are handled in lambda form bytecode.
assert(ct == T_INT || is_subword_type(ct), err_msg_res("must match: rt=%s, ct=%s", type2name(rt), type2name(ct)));
}
}
} else {
}
// Now that the value is well-behaved, continue with the call-site type.
}
} else {
// Symbolic resolution enforces the types to be the same.
// NOTE: We must relax the assert for unloaded types because two
// different ciType instances of the same unloaded class type
// can appear to be "loaded" by different loaders (depending on
// the accessing class).
}
// If the return type of the method is not loaded, assert that the
// value we got is a null. Otherwise, we need to recompile.
#ifndef PRODUCT
}
#endif
}
// If there is going to be a trap, put it at the next bytecode:
null_assert(peek());
}
}
// Restart record of parsing work after possible inlining of call
#ifndef PRODUCT
#endif
}
//---------------------------catch_call_exceptions-----------------------------
// Put a Catch and CatchProj nodes behind a just-created call.
// Send their caught exceptions to the proper handler.
// This may be used after a call to the rethrow VM stub,
// when it is needed to process unloaded exception classes.
// Exceptions are delivered through this channel:
// Add a CatchNode.
GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
// Do not introduce unloaded exception types into the graph:
/* We've already seen an unloaded exception with h_bci,
so don't duplicate. Duplication will cause the CatchNode to be
unnecessarily large. See 4713716. */
continue;
} else {
}
}
// (We use make_from_klass because it respects UseUniqueSubclasses.)
// Note: It's OK if the BCIs repeat themselves.
}
// now branch with the exception state to each of the (potential)
// handlers
for(int i=0; i < len; i++) {
// Setup JVM state to enter the handler.
PreserveJVMState pjvms(this);
// Locals are just copied from before the call.
// Get control from the CatchNode.
// This handler cannot happen?
// Create exception oop
// Handle unloaded exception classes.
// An unloaded exception type is coming here. Do an uncommon trap.
#ifndef PRODUCT
// We do not expect the same handler bci to take both cold unloaded
// and hot loaded exceptions. But, watch for it.
}
#endif
// Emit an uncommon trap instead of processing the block.
continue;
}
// go to the exception handler
if (handler_bci < 0) { // merge with corresponding rethrow node
} else { // Else jump to corresponding handle
}
}
// The first CatchProj is for the normal return.
// (Note: If this is a call to rethrow_Java, this node goes dead.)
set_control(_gvn.transform( new (C) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
}
//----------------------------catch_inline_exceptions--------------------------
// Handle all exceptions thrown by an inlined method or individual bytecode.
// Common case 1: we have no handler, so all exceptions merge right into
// the rethrow case.
// Case 2: we have some handlers, with loaded exception klasses that have
// no subklasses. We do a Deutsch-Shiffman style type-check on the incoming
// exception oop and branch to the handler directly.
// Case 3: We have some handlers with subklasses or are not loaded at
// compile-time. We have to call the runtime to resolve the exception.
// So we insert a RethrowCall and all the logic that goes with it.
// Caller is responsible for saving away the map for normal control flow!
// No action needed.
return;
}
// determine potential exception handlers
ex_type->klass_is_exact());
// Start executing from the given throw state. (Keep its stack, for now.)
// Get the exception oop as known at compile time.
// Get the exception oop klass from its header
ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
// Compute the exception klass a little more cleverly.
// Obvious solution is to simple do a LoadKlass from the 'ex_node'.
// However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
// each arm of the Phi. If I know something clever about the exceptions
// I'm loading the class from, I can replace the LoadKlass with the
// klass constant for the exception oop.
Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
ex_klass_node->init_req( i, k );
}
}
}
// Scan the exception table for applicable handlers.
// If none, we can call rethrow() and be done!
// If precise (loaded with no subklasses), insert a D.S. style
// pointer compare to the correct handler and loop back.
// If imprecise, switch to the Rethrow VM-call style handling.
// iterate through all entries sequentially
if (handler->is_rethrow()) {
// If we fell off the end of the table without finding an imprecise
// exception klass (and without finding a generic handler) then we
// know this exception is not handled in this method. We just rethrow
// the exception into the caller.
return;
}
// exception handler bci range covers throw_bci => investigate further
if (remaining == 1) {
#ifndef PRODUCT
if (PrintOpto && WizardMode) {
}
#endif
return; // No more handling to be done here!
}
// Get the handler's klass
// fall through into catch_call_exceptions which will emit a
// handler with an uncommon trap.
break;
}
break; // bail out
// Check the type of the exception against the catch type
if (!stopped()) {
PreserveJVMState pjvms(this);
const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
#ifndef PRODUCT
if (PrintOpto && WizardMode) {
klass->print_name();
}
#endif
}
// Come here if exception does not match handler.
// Carry on with more handler checks.
--remaining;
}
// Oops, need to call into the VM to resolve the klasses at runtime.
// Note: This call must not deoptimize, since it is not a real at this bci!
ex_node);
// Rethrow is a pure call, no side effects, only a result.
// The result cannot be allocated, so we use I_O
// Catch exceptions from the rethrow
}
// (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)
#ifndef PRODUCT
if( CountCompiledCalls ) {
if( at_method_entry ) {
// bump invocation counter if top method (for statistics)
Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset()));
}
} else if (is_inline) {
switch (bc()) {
case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
default: fatal("unexpected call bytecode");
}
} else {
switch (bc()) {
case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
default: fatal("unexpected call bytecode");
}
}
}
}
#endif //PRODUCT
bool is_virtual,
bool& call_does_dispatch, int& vtable_index) {
// Set default values for out-parameters.
call_does_dispatch = true;
// Choose call strategy.
// Have the call been sufficiently improved such that it is no longer a virtual?
if (optimized_virtual_method != NULL) {
call_does_dispatch = false;
// We can make a vtable call at this site
}
return callee;
}
// Identify possible target method and inlining style
// only use for virtual or interface calls
// If it is obviously final, do not bother to call find_monomorphic_target,
// because the class hierarchy checks are not needed, and may fail due to
// incompletely loaded classes. Since we do our own class loading checks
// in this module, we may confidently bind to any method.
if (callee->can_be_statically_bound()) {
return callee;
}
// Attempt to improve the receiver
bool actual_receiver_is_exact = false;
if (receiver_type != NULL) {
// Array methods are all inherited from Object, and are monomorphic.
if (receiver_type->isa_aryptr() &&
return callee;
}
// All other interesting cases are instance klasses.
if (!receiver_type->isa_instptr()) {
return NULL;
}
// ikl is a same or better type than the original actual_receiver,
// e.g. static receiver from bytecodes.
// Is the actual_receiver exact?
}
}
ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver);
if (cha_monomorphic_target != NULL) {
// Look at the method-receiver type. Does it add "too much information"?
// Calling this method would include an implicit cast to its holder.
// %%% Not yet implemented. Would throw minor asserts at present.
// %%% The most common wins are already gained by +UseUniqueSubclasses.
// To fix, put the higher_equal check at the call of this routine,
// and add a CheckCastPP to the receiver.
if (TraceDependencies) {
}
}
}
}
if (cha_monomorphic_target != NULL) {
// Hardwiring a virtual.
// If we inlined because CHA revealed only a single target method,
// then we are dependent on that target method not getting overridden
// by dynamic class loading. Be sure to test the "static" receiver
// dest_method here, as opposed to the actual receiver, which may
// falsely lead us to believe that the receiver is final or private.
return cha_monomorphic_target;
}
// If the type is exact, we can still bind the method w/o a vcall.
// (This case comes after CHA so we can see how much extra work it does.)
if (actual_receiver_is_exact) {
// In case of evolution, there is a dependence on every inlined method, since each
// such method can be changed when its class is redefined.
if (exact_method != NULL) {
#ifndef PRODUCT
if (PrintOpto) {
}
#endif
return exact_method;
}
}
return NULL;
}