callGenerator.cpp revision 4127
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciCPCache.hpp"
#include "ci/ciMemberName.hpp"
#include "ci/ciMethodHandle.hpp"
#include "classfile/javaClasses.hpp"
#include "compiler/compileLog.hpp"
#include "opto/addnode.hpp"
#include "opto/callGenerator.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/connode.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
// Utility function.
}
//-----------------------------ParseGenerator---------------------------------
// Internal class which handles all direct bytecode traversal.
class ParseGenerator : public InlineCallGenerator {
private:
bool _is_osr;
float _expected_uses;
public:
{
}
virtual bool is_parse() const { return true; }
};
if (is_osr()) {
// The JVMS for a OSR has a single argument (see its TypeFunc).
}
if (C->failing()) {
return NULL; // bailing out of the compile; do not try to parse
}
// Grab signature for matching/allocation
#ifdef ASSERT
"Must invalidate if TypeFuncs differ");
}
#endif
if (C->failing()) {
return NULL;
}
// Simply return the exit state of the parser,
// augmented by any exceptional states.
return exits.transfer_exceptions_into_jvms();
}
//---------------------------DirectCallGenerator------------------------------
// Internal class which handles all out-of-line calls w/o receiver type checks.
class DirectCallGenerator : public CallGenerator {
private:
// Force separate memory and I/O projections for the exceptional
// paths to facilitate late inlinig.
bool _separate_io_proj;
public:
: CallGenerator(method),
{
}
};
}
if (!is_static) {
// Make an explicit receiver null_check as part of this call.
// Since we share a map with the caller, his JVMS gets adjusted.
// And dump it back to the caller, decorated with any exceptions:
return kit.transfer_exceptions_into_jvms();
}
// Mark the call node as virtual, sort of:
call->set_optimized_virtual(true);
if (method()->is_method_handle_intrinsic() ||
method()->is_compiled_lambda_form()) {
call->set_method_handle_invoke(true);
}
}
return kit.transfer_exceptions_into_jvms();
}
//--------------------------VirtualCallGenerator------------------------------
// Internal class which handles all out-of-line calls checking receiver type.
class VirtualCallGenerator : public CallGenerator {
private:
int _vtable_index;
public:
{
vtable_index >= 0, "either invalid or usable");
}
virtual bool is_virtual() const { return true; }
};
}
// If the receiver is a constant null, do not torture the system
// by attempting to call through it. The compile will proceed
// correctly, but may bail out in final_graph_reshaping, because
// the call instruction will have a seemingly deficient out-count.
// (The bailout says something misleading about an "infinite loop".)
NULL, "null receiver");
return kit.transfer_exceptions_into_jvms();
}
// Ideally we would unconditionally do a null check here and let it
// be converted to an implicit check based on profile information.
// However currently the conversion to implicit null checks in
// Block::implicit_null_check() only looks for loads and stores, not calls.
if (!UseInlineCaches || !ImplicitNullChecks ||
((ImplicitNullCheckThreshold > 0) && caller_md &&
>= (uint)ImplicitNullCheckThreshold))) {
// Make an explicit receiver null_check as part of this call.
// Since we share a map with the caller, his JVMS gets adjusted.
// And dump it back to the caller, decorated with any exceptions:
return kit.transfer_exceptions_into_jvms();
}
}
"no vtable calls if +UseInlineCaches ");
// Normal inline cache used for call
CallDynamicJavaNode *call = new (kit.C) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
// Represent the effect of an implicit receiver null_check
// as part of this call. Since we share a map with the caller,
// his JVMS gets adjusted.
return kit.transfer_exceptions_into_jvms();
}
return new ParseGenerator(m, expected_uses);
}
// As a special case, the JVMS passed to this CallGenerator is
// for the method execution already in progress, not just the JVMS
// of the caller. Thus, this CallGenerator cannot be mixed with others!
float past_uses = m->interpreter_invocation_count();
float expected_uses = past_uses;
return new ParseGenerator(m, expected_uses, true);
}
return new DirectCallGenerator(m, separate_io_proj);
}
return new VirtualCallGenerator(m, vtable_index);
}
// Allow inlining decisions to be delayed
class LateInlineCallGenerator : public DirectCallGenerator {
public:
virtual bool is_late_inline() const { return true; }
// Convert the CallStaticJava into an inline
virtual void do_late_inline();
C->print_inlining_skip(this);
// Record that this call site should be revisited once the main
// parse is finished.
// Emit the CallStaticJava and request separate projections so
// that the late inlining logic can distinguish between fall
// through and exceptional uses of the memory and io projections
// as is done for allocations and macro expansion.
}
};
void LateInlineCallGenerator::do_late_inline() {
// Can't inline it
return;
// Make a clone of the JVMState that appropriate to use for driving a parse
}
// Make sure the state is a MergeMem for parsing.
}
// Make enough space for the expression stack and transfer the incoming arguments
if (nargs > 0) {
}
}
C->print_inlining_insert(this);
while (p != NULL) {
p = p->caller();
}
}
// Setup default node notes to be picked up by the inlining
}
// Now perform the inling using the synthesized JVMState
if (C->failing()) return;
// Capture any exceptional control flow
// Find the result object
}
}
}
//---------------------------WarmCallGenerator--------------------------------
// Internal class which handles initial deferral of inlining decisions.
class WarmCallGenerator : public CallGenerator {
bool _is_virtual; // caches virtuality of if_cold
bool _is_inline; // caches inline-ness of if_hot
public:
{
_call_info = ci;
}
virtual bool is_inline() const { return _is_inline; }
virtual bool is_virtual() const { return _is_virtual; }
virtual bool is_deferred() const { return true; }
};
CallGenerator* if_hot) {
}
}
if (m->is_CallJava()) {
#ifndef PRODUCT
if (PrintOpto || PrintOptoInlining) {
_call_info->print();
}
#endif
}
}
return jvms;
}
void WarmCallInfo::make_hot() {
}
void WarmCallInfo::make_cold() {
// No action: Just dequeue.
}
//------------------------PredictedCallGenerator------------------------------
// Internal class which handles all out-of-line calls checking receiver type.
class PredictedCallGenerator : public CallGenerator {
float _hit_prob;
public:
{
// The call profile data may predict the hit_prob as extreme as 0 or 1.
// Remove the extremes values from the range.
}
virtual bool is_virtual() const { return true; }
};
float hit_prob) {
}
// We need an explicit receiver null_check before checking its type.
// We share a map with the caller, so his JVMS gets adjusted.
}
return kit.transfer_exceptions_into_jvms();
}
return NULL; // might happen because of NodeCountInliningCutoff
}
}
// Instance exactly does not matches the desired type.
return kit.transfer_exceptions_into_jvms();
}
// fall through if the instance exactly matches the desired type
// Make the hot call:
// Inline failed, so make a direct call.
}
// Need to merge slow and fast?
// The fast path is the only path remaining.
return kit.transfer_exceptions_into_jvms();
}
// Inlined method threw an exception, so it's just the slow path after all.
return kit.transfer_exceptions_into_jvms();
}
// Finish the diamond.
// Skip unused stack slots; fast forward to monoff();
if (i == tos) {
if( i >= limit ) break;
}
if (m != n) {
}
}
return kit.transfer_exceptions_into_jvms();
}
CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
return cg;
}
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
switch (iid) {
case vmIntrinsics::_invokeBasic:
{
// Get MethodHandle receiver:
return cg;
} else {
if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
}
}
break;
case vmIntrinsics::_linkToVirtual:
case vmIntrinsics::_linkToStatic:
case vmIntrinsics::_linkToSpecial:
case vmIntrinsics::_linkToInterface:
{
// Get MemberName argument:
// In lamda forms we erase signature types to avoid resolving issues
// involving class loaders. When we optimize a method handle invoke
// to a direct call we must cast the receiver and arguments to its
// actual types.
// Cast receiver to its type.
}
}
// Cast reference arguments to its type.
if (t->is_klass()) {
}
}
}
CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS);
return cg;
}
}
break;
default:
break;
}
return NULL;
}
//------------------------PredictedIntrinsicGenerator------------------------------
// Internal class which handles all predicted Intrinsic calls.
class PredictedIntrinsicGenerator : public CallGenerator {
public:
{
}
virtual bool is_virtual() const { return true; }
virtual bool is_inlined() const { return true; }
virtual bool is_intrinsic() const { return true; }
};
CallGenerator* cg) {
}
}
return NULL; // might happen because of NodeCountInliningCutoff
return NULL; // might happen because of NodeCountInliningCutoff
}
}
// Predicate is always false.
return kit.transfer_exceptions_into_jvms();
}
// Generate intrinsic code:
// Intrinsic failed, so use slow code or make a direct call.
} else {
return kit.transfer_exceptions_into_jvms();
}
}
// Need to merge slow and fast?
// The fast path is the only path remaining.
return kit.transfer_exceptions_into_jvms();
}
// Intrinsic method threw an exception, so it's just the slow path after all.
return kit.transfer_exceptions_into_jvms();
}
// Finish the diamond.
// Skip unused stack slots; fast forward to monoff();
if (i == tos) {
if( i >= limit ) break;
}
if (m != n) {
}
}
return kit.transfer_exceptions_into_jvms();
}
//-------------------------UncommonTrapCallGenerator-----------------------------
// Internal class which handles all out-of-line calls checking receiver type.
class UncommonTrapCallGenerator : public CallGenerator {
public:
: CallGenerator(m)
{
}
virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
virtual bool is_trap() const { return true; }
};
}
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
// Temp fix for 6529811
// Don't allow uncommon_trap to override our decision to recompile in the event
// of a class cast failure for a monomorphic call as it will never let us convert
// the call to either bi-morphic or megamorphic and can lead to unc-trap loops
bool keep_exact_action = true;
} else {
}
return kit.transfer_exceptions_into_jvms();
}
// (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
// (Node: Merged hook_up_exits into ParseGenerator::generate.)
#define NODES_OVERHEAD_PER_METHOD (30.0)
#define NODES_PER_BYTECODE (9.5)
void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
// Expected execution count is based on the historical count:
// Expected profit from inlining, in units of simple call-overheads.
_profit = 1.0;
// Expected work performed by the call in units of call-overheads.
// %%% need an empirical curve fit for "work" (time in call)
float bytecodes_per_call = 3;
// Expected size of compilation graph:
// -XX:+PrintParseStatistics once reported:
// Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
// Histogram of 144298 parsed bytecodes:
// %%% Need an better predictor for graph size.
}
// is_cold: Return true if the node should never be inlined.
// This is true if any of the key metrics are extreme.
bool WarmCallInfo::is_cold() const {
if (count() < WarmCallMinCount) return true;
if (profit() < WarmCallMinProfit) return true;
if (work() > WarmCallMaxWork) return true;
if (size() > WarmCallMaxSize) return true;
return false;
}
// is_hot: Return true if the node should be inlined immediately.
// This is true if any of the key metrics are extreme.
bool WarmCallInfo::is_hot() const {
if (count() >= HotCallCountThreshold) return true;
if (profit() >= HotCallProfitThreshold) return true;
if (work() <= HotCallTrivialWork) return true;
if (size() <= HotCallTrivialSize) return true;
return false;
}
// compute_heat:
float WarmCallInfo::compute_heat() const {
float size_factor;
}
// Equal heat. Break the tie some other way.
}
//#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
}
// Install this between prev_p and next_p.
head = this;
else
return head;
}
while (next_p != this) {
}
// Remove this from between prev_p and next_p.
else
return head;
}
return &_always_hot;
}
return &_always_cold;
}
#ifndef PRODUCT
void WarmCallInfo::print() const {
}
}
void WarmCallInfo::print_all() const {
p->print();
}
int WarmCallInfo::count_all() const {
int cnt = 0;
cnt++;
return cnt;
}
#endif //PRODUCT