callGenerator.hpp revision 0
0N/A/*
0N/A * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
0N/A * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
0N/A * CA 95054 USA or visit www.sun.com if you need additional information or
0N/A * have any questions.
0N/A *
0N/A */
0N/A
0N/A//---------------------------CallGenerator-------------------------------------
0N/A// The subclasses of this class handle generation of ideal nodes for
0N/A// call sites and method entry points.
0N/A
0N/Aclass CallGenerator : public ResourceObj {
0N/A public:
0N/A enum {
0N/A xxxunusedxxx
0N/A };
0N/A
0N/A private:
0N/A ciMethod* _method; // The method being called.
0N/A
0N/A protected:
0N/A CallGenerator(ciMethod* method);
0N/A
0N/A public:
0N/A // Accessors
0N/A ciMethod* method() const { return _method; }
0N/A
0N/A // is_inline: At least some code implementing the method is copied here.
0N/A virtual bool is_inline() const { return false; }
0N/A // is_intrinsic: There's a method-specific way of generating the inline code.
0N/A virtual bool is_intrinsic() const { return false; }
0N/A // is_parse: Bytecodes implementing the specific method are copied here.
0N/A virtual bool is_parse() const { return false; }
0N/A // is_virtual: The call uses the receiver type to select or check the method.
0N/A virtual bool is_virtual() const { return false; }
0N/A // is_deferred: The decision whether to inline or not is deferred.
0N/A virtual bool is_deferred() const { return false; }
0N/A // is_predicted: Uses an explicit check against a predicted type.
0N/A virtual bool is_predicted() const { return false; }
0N/A // is_trap: Does not return to the caller. (E.g., uncommon trap.)
0N/A virtual bool is_trap() const { return false; }
0N/A
0N/A // Note: It is possible for a CG to be both inline and virtual.
0N/A // (The hashCode intrinsic does a vtable check and an inlined fast path.)
0N/A
0N/A // Utilities:
0N/A const TypeFunc* tf() const;
0N/A
0N/A // The given jvms has state and arguments for a call to my method.
0N/A // Edges after jvms->argoff() carry all (pre-popped) argument values.
0N/A //
0N/A // Update the map with state and return values (if any) and return it.
0N/A // The return values (0, 1, or 2) must be pushed on the map's stack,
0N/A // and the sp of the jvms incremented accordingly.
0N/A //
0N/A // The jvms is returned on success. Alternatively, a copy of the
0N/A // given jvms, suitably updated, may be returned, in which case the
0N/A // caller should discard the original jvms.
0N/A //
0N/A // The non-Parm edges of the returned map will contain updated global state,
0N/A // and one or two edges before jvms->sp() will carry any return values.
0N/A // Other map edges may contain locals or monitors, and should not
0N/A // be changed in meaning.
0N/A //
0N/A // If the call traps, the returned map must have a control edge of top.
0N/A // If the call can throw, the returned map must report has_exceptions().
0N/A //
0N/A // If the result is NULL, it means that this CallGenerator was unable
0N/A // to handle the given call, and another CallGenerator should be consulted.
0N/A virtual JVMState* generate(JVMState* jvms) = 0;
0N/A
0N/A // How to generate a call site that is inlined:
0N/A static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
0N/A // How to generate code for an on-stack replacement handler.
0N/A static CallGenerator* for_osr(ciMethod* m, int osr_bci);
0N/A
0N/A // How to generate vanilla out-of-line call sites:
0N/A static CallGenerator* for_direct_call(ciMethod* m); // static, special
0N/A static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
0N/A
0N/A // How to make a call but defer the decision whether to inline or not.
0N/A static CallGenerator* for_warm_call(WarmCallInfo* ci,
0N/A CallGenerator* if_cold,
0N/A CallGenerator* if_hot);
0N/A
0N/A // How to make a call that optimistically assumes a receiver type:
0N/A static CallGenerator* for_predicted_call(ciKlass* predicted_receiver,
0N/A CallGenerator* if_missed,
0N/A CallGenerator* if_hit,
0N/A float hit_prob);
0N/A
0N/A // How to make a call that gives up and goes back to the interpreter:
0N/A static CallGenerator* for_uncommon_trap(ciMethod* m,
0N/A Deoptimization::DeoptReason reason,
0N/A Deoptimization::DeoptAction action);
0N/A
0N/A // Registry for intrinsics:
0N/A static CallGenerator* for_intrinsic(ciMethod* m);
0N/A static void register_intrinsic(ciMethod* m, CallGenerator* cg);
0N/A};
0N/A
0N/Aclass InlineCallGenerator : public CallGenerator {
0N/A virtual bool is_inline() const { return true; }
0N/A
0N/A protected:
0N/A InlineCallGenerator(ciMethod* method) : CallGenerator(method) { }
0N/A};
0N/A
0N/A
0N/A//---------------------------WarmCallInfo--------------------------------------
0N/A// A struct to collect information about a given call site.
0N/A// Helps sort call sites into "hot", "medium", and "cold".
0N/A// Participates in the queueing of "medium" call sites for possible inlining.
0N/Aclass WarmCallInfo : public ResourceObj {
0N/A private:
0N/A
0N/A CallNode* _call; // The CallNode which may be inlined.
0N/A CallGenerator* _hot_cg;// CG for expanding the call node
0N/A
0N/A // These are the metrics we use to evaluate call sites:
0N/A
0N/A float _count; // How often do we expect to reach this site?
0N/A float _profit; // How much time do we expect to save by inlining?
0N/A float _work; // How long do we expect the average call to take?
0N/A float _size; // How big do we expect the inlined code to be?
0N/A
0N/A float _heat; // Combined score inducing total order on call sites.
0N/A WarmCallInfo* _next; // Next cooler call info in pending queue.
0N/A
0N/A // Count is the number of times this call site is expected to be executed.
0N/A // Large count is favorable for inlining, because the extra compilation
0N/A // work will be amortized more completely.
0N/A
0N/A // Profit is a rough measure of the amount of time we expect to save
0N/A // per execution of this site if we inline it. (1.0 == call overhead)
0N/A // Large profit favors inlining. Negative profit disables inlining.
0N/A
0N/A // Work is a rough measure of the amount of time a typical out-of-line
0N/A // call from this site is expected to take. (1.0 == call, no-op, return)
0N/A // Small work is somewhat favorable for inlining, since methods with
0N/A // short "hot" traces are more likely to inline smoothly.
0N/A
0N/A // Size is the number of graph nodes we expect this method to produce,
0N/A // not counting the inlining of any further warm calls it may include.
0N/A // Small size favors inlining, since small methods are more likely to
0N/A // inline smoothly. The size is estimated by examining the native code
0N/A // if available. The method bytecodes are also examined, assuming
0N/A // empirically observed node counts for each kind of bytecode.
0N/A
0N/A // Heat is the combined "goodness" of a site's inlining. If we were
0N/A // omniscient, it would be the difference of two sums of future execution
0N/A // times of code emitted for this site (amortized across multiple sites if
0N/A // sharing applies). The two sums are for versions of this call site with
0N/A // and without inlining.
0N/A
0N/A // We approximate this mythical quantity by playing with averages,
0N/A // rough estimates, and assumptions that history repeats itself.
0N/A // The basic formula count * profit is heuristically adjusted
0N/A // by looking at the expected compilation and execution times of
0N/A // of the inlined call.
0N/A
0N/A // Note: Some of these metrics may not be present in the final product,
0N/A // but exist in development builds to experiment with inline policy tuning.
0N/A
0N/A // This heuristic framework does not model well the very significant
0N/A // effects of multiple-level inlining. It is possible to see no immediate
0N/A // profit from inlining X->Y, but to get great profit from a subsequent
0N/A // inlining X->Y->Z.
0N/A
0N/A // This framework does not take well into account the problem of N**2 code
0N/A // size in a clique of mutually inlinable methods.
0N/A
0N/A WarmCallInfo* next() const { return _next; }
0N/A void set_next(WarmCallInfo* n) { _next = n; }
0N/A
0N/A static WarmCallInfo* _always_hot;
0N/A static WarmCallInfo* _always_cold;
0N/A
0N/A public:
0N/A // Because WarmInfo objects live over the entire lifetime of the
0N/A // Compile object, they are allocated into the comp_arena, which
0N/A // does not get resource marked or reset during the compile process
0N/A void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
0N/A void operator delete( void * ) { } // fast deallocation
0N/A
0N/A static WarmCallInfo* always_hot();
0N/A static WarmCallInfo* always_cold();
0N/A
0N/A WarmCallInfo() {
0N/A _call = NULL;
0N/A _hot_cg = NULL;
0N/A _next = NULL;
0N/A _count = _profit = _work = _size = _heat = 0;
0N/A }
0N/A
0N/A CallNode* call() const { return _call; }
0N/A float count() const { return _count; }
0N/A float size() const { return _size; }
0N/A float work() const { return _work; }
0N/A float profit() const { return _profit; }
0N/A float heat() const { return _heat; }
0N/A
0N/A void set_count(float x) { _count = x; }
0N/A void set_size(float x) { _size = x; }
0N/A void set_work(float x) { _work = x; }
0N/A void set_profit(float x) { _profit = x; }
0N/A void set_heat(float x) { _heat = x; }
0N/A
0N/A // Load initial heuristics from profiles, etc.
0N/A // The heuristics can be tweaked further by the caller.
0N/A void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
0N/A
0N/A static float MAX_VALUE() { return +1.0e10; }
0N/A static float MIN_VALUE() { return -1.0e10; }
0N/A
0N/A float compute_heat() const;
0N/A
0N/A void set_call(CallNode* call) { _call = call; }
0N/A void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
0N/A
0N/A // Do not queue very hot or very cold calls.
0N/A // Make very cold ones out of line immediately.
0N/A // Inline very hot ones immediately.
0N/A // These queries apply various tunable limits
0N/A // to the above metrics in a systematic way.
0N/A // Test for coldness before testing for hotness.
0N/A bool is_cold() const;
0N/A bool is_hot() const;
0N/A
0N/A // Force a warm call to be hot. This worklists the call node for inlining.
0N/A void make_hot();
0N/A
0N/A // Force a warm call to be cold. This worklists the call node for out-of-lining.
0N/A void make_cold();
0N/A
0N/A // A reproducible total ordering, in which heat is the major key.
0N/A bool warmer_than(WarmCallInfo* that);
0N/A
0N/A // List management. These methods are called with the list head,
0N/A // and return the new list head, inserting or removing the receiver.
0N/A WarmCallInfo* insert_into(WarmCallInfo* head);
0N/A WarmCallInfo* remove_from(WarmCallInfo* head);
0N/A
0N/A#ifndef PRODUCT
0N/A void print() const;
0N/A void print_all() const;
0N/A int count_all() const;
0N/A#endif
0N/A};