0N/A/*
3619N/A * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#ifndef SHARE_VM_OPTO_CALLNODE_HPP
1879N/A#define SHARE_VM_OPTO_CALLNODE_HPP
1879N/A
1879N/A#include "opto/connode.hpp"
1879N/A#include "opto/mulnode.hpp"
1879N/A#include "opto/multnode.hpp"
1879N/A#include "opto/opcodes.hpp"
1879N/A#include "opto/phaseX.hpp"
1879N/A#include "opto/type.hpp"
1879N/A
0N/A// Portions of code courtesy of Clifford Click
0N/A
0N/A// Optimization - Graph Style
0N/A
0N/Aclass Chaitin;
0N/Aclass NamedCounter;
0N/Aclass MultiNode;
0N/Aclass SafePointNode;
0N/Aclass CallNode;
0N/Aclass CallJavaNode;
0N/Aclass CallStaticJavaNode;
0N/Aclass CallDynamicJavaNode;
0N/Aclass CallRuntimeNode;
0N/Aclass CallLeafNode;
0N/Aclass CallLeafNoFPNode;
0N/Aclass AllocateNode;
33N/Aclass AllocateArrayNode;
0N/Aclass LockNode;
0N/Aclass UnlockNode;
0N/Aclass JVMState;
0N/Aclass OopMap;
0N/Aclass State;
0N/Aclass StartNode;
0N/Aclass MachCallNode;
0N/Aclass FastLockNode;
0N/A
0N/A//------------------------------StartNode--------------------------------------
0N/A// The method start node
0N/Aclass StartNode : public MultiNode {
0N/A virtual uint cmp( const Node &n ) const;
0N/A virtual uint size_of() const; // Size is bigger
0N/Apublic:
0N/A const TypeTuple *_domain;
0N/A StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
0N/A init_class_id(Class_Start);
0N/A init_req(0,this);
0N/A init_req(1,root);
0N/A }
0N/A virtual int Opcode() const;
0N/A virtual bool pinned() const { return true; };
0N/A virtual const Type *bottom_type() const;
0N/A virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
0N/A virtual const Type *Value( PhaseTransform *phase ) const;
0N/A virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
0N/A virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
0N/A virtual const RegMask &in_RegMask(uint) const;
0N/A virtual Node *match( const ProjNode *proj, const Matcher *m );
0N/A virtual uint ideal_reg() const { return 0; }
0N/A#ifndef PRODUCT
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
0N/A//------------------------------StartOSRNode-----------------------------------
0N/A// The method start node for on stack replacement code
0N/Aclass StartOSRNode : public StartNode {
0N/Apublic:
0N/A StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
0N/A virtual int Opcode() const;
0N/A static const TypeTuple *osr_domain();
0N/A};
0N/A
0N/A
0N/A//------------------------------ParmNode---------------------------------------
0N/A// Incoming parameters
0N/Aclass ParmNode : public ProjNode {
0N/A static const char * const names[TypeFunc::Parms+1];
0N/Apublic:
33N/A ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
33N/A init_class_id(Class_Parm);
33N/A }
0N/A virtual int Opcode() const;
0N/A virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
0N/A virtual uint ideal_reg() const;
0N/A#ifndef PRODUCT
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
0N/A
0N/A//------------------------------ReturnNode-------------------------------------
0N/A// Return from subroutine node
0N/Aclass ReturnNode : public Node {
0N/Apublic:
0N/A ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
0N/A virtual int Opcode() const;
0N/A virtual bool is_CFG() const { return true; }
0N/A virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
0N/A virtual bool depends_only_on_test() const { return false; }
0N/A virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
0N/A virtual const Type *Value( PhaseTransform *phase ) const;
0N/A virtual uint ideal_reg() const { return NotAMachineReg; }
0N/A virtual uint match_edge(uint idx) const;
0N/A#ifndef PRODUCT
4312N/A virtual void dump_req(outputStream *st = tty) const;
0N/A#endif
0N/A};
0N/A
0N/A
0N/A//------------------------------RethrowNode------------------------------------
0N/A// Rethrow of exception at call site. Ends a procedure before rethrowing;
0N/A// ends the current basic block like a ReturnNode. Restores registers and
0N/A// unwinds stack. Rethrow happens in the caller's method.
0N/Aclass RethrowNode : public Node {
0N/A public:
0N/A RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
0N/A virtual int Opcode() const;
0N/A virtual bool is_CFG() const { return true; }
0N/A virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
0N/A virtual bool depends_only_on_test() const { return false; }
0N/A virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
0N/A virtual const Type *Value( PhaseTransform *phase ) const;
0N/A virtual uint match_edge(uint idx) const;
0N/A virtual uint ideal_reg() const { return NotAMachineReg; }
0N/A#ifndef PRODUCT
4312N/A virtual void dump_req(outputStream *st = tty) const;
0N/A#endif
0N/A};
0N/A
0N/A
0N/A//------------------------------TailCallNode-----------------------------------
0N/A// Pop stack frame and jump indirect
0N/Aclass TailCallNode : public ReturnNode {
0N/Apublic:
0N/A TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
0N/A : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
0N/A init_req(TypeFunc::Parms, target);
0N/A init_req(TypeFunc::Parms+1, moop);
0N/A }
0N/A
0N/A virtual int Opcode() const;
0N/A virtual uint match_edge(uint idx) const;
0N/A};
0N/A
0N/A//------------------------------TailJumpNode-----------------------------------
0N/A// Pop stack frame and jump indirect
0N/Aclass TailJumpNode : public ReturnNode {
0N/Apublic:
0N/A TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
0N/A : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
0N/A init_req(TypeFunc::Parms, target);
0N/A init_req(TypeFunc::Parms+1, ex_oop);
0N/A }
0N/A
0N/A virtual int Opcode() const;
0N/A virtual uint match_edge(uint idx) const;
0N/A};
0N/A
0N/A//-------------------------------JVMState-------------------------------------
0N/A// A linked list of JVMState nodes captures the whole interpreter state,
0N/A// plus GC roots, for all active calls at some call site in this compilation
0N/A// unit. (If there is no inlining, then the list has exactly one link.)
0N/A// This provides a way to map the optimized program back into the interpreter,
0N/A// or to let the GC mark the stack.
0N/Aclass JVMState : public ResourceObj {
2772N/A friend class VMStructs;
900N/Apublic:
900N/A typedef enum {
900N/A Reexecute_Undefined = -1, // not defined -- will be translated into false later
900N/A Reexecute_False = 0, // false -- do not reexecute
900N/A Reexecute_True = 1 // true -- reexecute the bytecode
900N/A } ReexecuteState; //Reexecute State
900N/A
0N/Aprivate:
0N/A JVMState* _caller; // List pointer for forming scope chains
3932N/A uint _depth; // One more than caller depth, or one.
0N/A uint _locoff; // Offset to locals in input edge mapping
0N/A uint _stkoff; // Offset to stack in input edge mapping
0N/A uint _monoff; // Offset to monitors in input edge mapping
63N/A uint _scloff; // Offset to fields of scalar objs in input edge mapping
0N/A uint _endoff; // Offset to end of input edge mapping
0N/A uint _sp; // Jave Expression Stack Pointer for this state
0N/A int _bci; // Byte Code Index of this JVM point
900N/A ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
0N/A ciMethod* _method; // Method Pointer
0N/A SafePointNode* _map; // Map node associated with this scope
0N/Apublic:
0N/A friend class Compile;
900N/A friend class PreserveReexecuteState;
0N/A
0N/A // Because JVMState objects live over the entire lifetime of the
0N/A // Compile object, they are allocated into the comp_arena, which
0N/A // does not get resource marked or reset during the compile process
0N/A void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
0N/A void operator delete( void * ) { } // fast deallocation
0N/A
0N/A // Create a new JVMState, ready for abstract interpretation.
0N/A JVMState(ciMethod* method, JVMState* caller);
0N/A JVMState(int stack_size); // root state; has a null method
0N/A
0N/A // Access functions for the JVM
3932N/A // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
3932N/A // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff
0N/A uint locoff() const { return _locoff; }
0N/A uint stkoff() const { return _stkoff; }
0N/A uint argoff() const { return _stkoff + _sp; }
0N/A uint monoff() const { return _monoff; }
63N/A uint scloff() const { return _scloff; }
0N/A uint endoff() const { return _endoff; }
0N/A uint oopoff() const { return debug_end(); }
0N/A
3932N/A int loc_size() const { return stkoff() - locoff(); }
3932N/A int stk_size() const { return monoff() - stkoff(); }
3932N/A int arg_size() const { return monoff() - argoff(); }
3932N/A int mon_size() const { return scloff() - monoff(); }
3932N/A int scl_size() const { return endoff() - scloff(); }
0N/A
3932N/A bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
3932N/A bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
3932N/A bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }
3932N/A bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }
0N/A
900N/A uint sp() const { return _sp; }
900N/A int bci() const { return _bci; }
900N/A bool should_reexecute() const { return _reexecute==Reexecute_True; }
900N/A bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
900N/A bool has_method() const { return _method != NULL; }
900N/A ciMethod* method() const { assert(has_method(), ""); return _method; }
900N/A JVMState* caller() const { return _caller; }
900N/A SafePointNode* map() const { return _map; }
900N/A uint depth() const { return _depth; }
900N/A uint debug_start() const; // returns locoff of root caller
900N/A uint debug_end() const; // returns endoff of self
900N/A uint debug_size() const {
63N/A return loc_size() + sp() + mon_size() + scl_size();
63N/A }
0N/A uint debug_depth() const; // returns sum of debug_size values at all depths
0N/A
0N/A // Returns the JVM state at the desired depth (1 == root).
0N/A JVMState* of_depth(int d) const;
0N/A
0N/A // Tells if two JVM states have the same call chain (depth, methods, & bcis).
0N/A bool same_calls_as(const JVMState* that) const;
0N/A
0N/A // Monitors (monitors are stored as (boxNode, objNode) pairs
0N/A enum { logMonitorEdges = 1 };
0N/A int nof_monitors() const { return mon_size() >> logMonitorEdges; }
0N/A int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
0N/A int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
0N/A int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
0N/A bool is_monitor_box(uint off) const {
0N/A assert(is_mon(off), "should be called only for monitor edge");
0N/A return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
0N/A }
0N/A bool is_monitor_use(uint off) const { return (is_mon(off)
0N/A && is_monitor_box(off))
0N/A || (caller() && caller()->is_monitor_use(off)); }
0N/A
0N/A // Initialization functions for the JVM
0N/A void set_locoff(uint off) { _locoff = off; }
0N/A void set_stkoff(uint off) { _stkoff = off; }
0N/A void set_monoff(uint off) { _monoff = off; }
63N/A void set_scloff(uint off) { _scloff = off; }
0N/A void set_endoff(uint off) { _endoff = off; }
63N/A void set_offsets(uint off) {
63N/A _locoff = _stkoff = _monoff = _scloff = _endoff = off;
63N/A }
0N/A void set_map(SafePointNode *map) { _map = map; }
0N/A void set_sp(uint sp) { _sp = sp; }
900N/A // _reexecute is initialized to "undefined" for a new bci
900N/A void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
900N/A void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
0N/A
0N/A // Miscellaneous utility functions
0N/A JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
0N/A JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
0N/A
0N/A#ifndef PRODUCT
0N/A void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
0N/A void dump_spec(outputStream *st) const;
0N/A void dump_on(outputStream* st) const;
0N/A void dump() const {
0N/A dump_on(tty);
0N/A }
0N/A#endif
0N/A};
0N/A
0N/A//------------------------------SafePointNode----------------------------------
0N/A// A SafePointNode is a subclass of a MultiNode for convenience (and
0N/A// potential code sharing) only - conceptually it is independent of
0N/A// the Node semantics.
0N/Aclass SafePointNode : public MultiNode {
0N/A virtual uint cmp( const Node &n ) const;
0N/A virtual uint size_of() const; // Size is bigger
0N/A
0N/Apublic:
0N/A SafePointNode(uint edges, JVMState* jvms,
0N/A // A plain safepoint advertises no memory effects (NULL):
0N/A const TypePtr* adr_type = NULL)
0N/A : MultiNode( edges ),
0N/A _jvms(jvms),
0N/A _oop_map(NULL),
0N/A _adr_type(adr_type)
0N/A {
0N/A init_class_id(Class_SafePoint);
0N/A }
0N/A
0N/A OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC
0N/A JVMState* const _jvms; // Pointer to list of JVM State objects
0N/A const TypePtr* _adr_type; // What type of memory does this node produce?
0N/A
0N/A // Many calls take *all* of memory as input,
0N/A // but some produce a limited subset of that memory as output.
0N/A // The adr_type reports the call's behavior as a store, not a load.
0N/A
0N/A virtual JVMState* jvms() const { return _jvms; }
0N/A void set_jvms(JVMState* s) {
0N/A *(JVMState**)&_jvms = s; // override const attribute in the accessor
0N/A }
0N/A OopMap *oop_map() const { return _oop_map; }
0N/A void set_oop_map(OopMap *om) { _oop_map = om; }
0N/A
4122N/A private:
4122N/A void verify_input(JVMState* jvms, uint idx) const {
4122N/A assert(verify_jvms(jvms), "jvms must match");
4122N/A Node* n = in(idx);
4122N/A assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
4122N/A in(idx + 1)->is_top(), "2nd half of long/double");
4122N/A }
4122N/A
4122N/A public:
0N/A // Functionality from old debug nodes which has changed
0N/A Node *local(JVMState* jvms, uint idx) const {
4122N/A verify_input(jvms, jvms->locoff() + idx);
0N/A return in(jvms->locoff() + idx);
0N/A }
0N/A Node *stack(JVMState* jvms, uint idx) const {
4122N/A verify_input(jvms, jvms->stkoff() + idx);
0N/A return in(jvms->stkoff() + idx);
0N/A }
0N/A Node *argument(JVMState* jvms, uint idx) const {
4122N/A verify_input(jvms, jvms->argoff() + idx);
0N/A return in(jvms->argoff() + idx);
0N/A }
0N/A Node *monitor_box(JVMState* jvms, uint idx) const {
0N/A assert(verify_jvms(jvms), "jvms must match");
0N/A return in(jvms->monitor_box_offset(idx));
0N/A }
0N/A Node *monitor_obj(JVMState* jvms, uint idx) const {
0N/A assert(verify_jvms(jvms), "jvms must match");
0N/A return in(jvms->monitor_obj_offset(idx));
0N/A }
0N/A
0N/A void set_local(JVMState* jvms, uint idx, Node *c);
0N/A
0N/A void set_stack(JVMState* jvms, uint idx, Node *c) {
0N/A assert(verify_jvms(jvms), "jvms must match");
0N/A set_req(jvms->stkoff() + idx, c);
0N/A }
0N/A void set_argument(JVMState* jvms, uint idx, Node *c) {
0N/A assert(verify_jvms(jvms), "jvms must match");
0N/A set_req(jvms->argoff() + idx, c);
0N/A }
0N/A void ensure_stack(JVMState* jvms, uint stk_size) {
0N/A assert(verify_jvms(jvms), "jvms must match");
0N/A int grow_by = (int)stk_size - (int)jvms->stk_size();
0N/A if (grow_by > 0) grow_stack(jvms, grow_by);
0N/A }
0N/A void grow_stack(JVMState* jvms, uint grow_by);
0N/A // Handle monitor stack
0N/A void push_monitor( const FastLockNode *lock );
0N/A void pop_monitor ();
0N/A Node *peek_monitor_box() const;
0N/A Node *peek_monitor_obj() const;
0N/A
0N/A // Access functions for the JVM
0N/A Node *control () const { return in(TypeFunc::Control ); }
0N/A Node *i_o () const { return in(TypeFunc::I_O ); }
0N/A Node *memory () const { return in(TypeFunc::Memory ); }
0N/A Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
0N/A Node *frameptr () const { return in(TypeFunc::FramePtr ); }
0N/A
0N/A void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
0N/A void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
0N/A void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
0N/A
0N/A MergeMemNode* merged_memory() const {
0N/A return in(TypeFunc::Memory)->as_MergeMem();
0N/A }
0N/A
0N/A // The parser marks useless maps as dead when it's done with them:
0N/A bool is_killed() { return in(TypeFunc::Control) == NULL; }
0N/A
0N/A // Exception states bubbling out of subgraphs such as inlined calls
0N/A // are recorded here. (There might be more than one, hence the "next".)
0N/A // This feature is used only for safepoints which serve as "maps"
0N/A // for JVM states during parsing, intrinsic expansion, etc.
0N/A SafePointNode* next_exception() const;
0N/A void set_next_exception(SafePointNode* n);
0N/A bool has_exceptions() const { return next_exception() != NULL; }
0N/A
0N/A // Standard Node stuff
0N/A virtual int Opcode() const;
0N/A virtual bool pinned() const { return true; }
0N/A virtual const Type *Value( PhaseTransform *phase ) const;
0N/A virtual const Type *bottom_type() const { return Type::CONTROL; }
0N/A virtual const TypePtr *adr_type() const { return _adr_type; }
0N/A virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
0N/A virtual Node *Identity( PhaseTransform *phase );
0N/A virtual uint ideal_reg() const { return 0; }
0N/A virtual const RegMask &in_RegMask(uint) const;
0N/A virtual const RegMask &out_RegMask() const;
0N/A virtual uint match_edge(uint idx) const;
0N/A
0N/A static bool needs_polling_address_input();
0N/A
0N/A#ifndef PRODUCT
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
63N/A//------------------------------SafePointScalarObjectNode----------------------
63N/A// A SafePointScalarObjectNode represents the state of a scalarized object
63N/A// at a safepoint.
63N/A
63N/Aclass SafePointScalarObjectNode: public TypeNode {
63N/A uint _first_index; // First input edge index of a SafePoint node where
63N/A // states of the scalarized object fields are collected.
63N/A uint _n_fields; // Number of non-static fields of the scalarized object.
74N/A DEBUG_ONLY(AllocateNode* _alloc;)
2958N/A
2958N/A virtual uint hash() const ; // { return NO_HASH; }
2958N/A virtual uint cmp( const Node &n ) const;
2958N/A
63N/Apublic:
63N/A SafePointScalarObjectNode(const TypeOopPtr* tp,
63N/A#ifdef ASSERT
63N/A AllocateNode* alloc,
63N/A#endif
63N/A uint first_index, uint n_fields);
63N/A virtual int Opcode() const;
63N/A virtual uint ideal_reg() const;
63N/A virtual const RegMask &in_RegMask(uint) const;
63N/A virtual const RegMask &out_RegMask() const;
63N/A virtual uint match_edge(uint idx) const;
63N/A
63N/A uint first_index() const { return _first_index; }
63N/A uint n_fields() const { return _n_fields; }
63N/A
2958N/A#ifdef ASSERT
2958N/A AllocateNode* alloc() const { return _alloc; }
2958N/A#endif
601N/A
63N/A virtual uint size_of() const { return sizeof(*this); }
63N/A
63N/A // Assumes that "this" is an argument to a safepoint node "s", and that
63N/A // "new_call" is being created to correspond to "s". But the difference
63N/A // between the start index of the jvmstates of "new_call" and "s" is
63N/A // "jvms_adj". Produce and return a SafePointScalarObjectNode that
63N/A // corresponds appropriately to "this" in "new_call". Assumes that
63N/A // "sosn_map" is a map, specific to the translation of "s" to "new_call",
63N/A // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
63N/A SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
63N/A
63N/A#ifndef PRODUCT
63N/A virtual void dump_spec(outputStream *st) const;
63N/A#endif
63N/A};
63N/A
1080N/A
1080N/A// Simple container for the outgoing projections of a call. Useful
1080N/A// for serious surgery on calls.
1080N/Aclass CallProjections : public StackObj {
1080N/Apublic:
1080N/A Node* fallthrough_proj;
1080N/A Node* fallthrough_catchproj;
1080N/A Node* fallthrough_memproj;
1080N/A Node* fallthrough_ioproj;
1080N/A Node* catchall_catchproj;
1080N/A Node* catchall_memproj;
1080N/A Node* catchall_ioproj;
1080N/A Node* resproj;
1080N/A Node* exobj;
1080N/A};
1080N/A
4132N/Aclass CallGenerator;
1080N/A
0N/A//------------------------------CallNode---------------------------------------
0N/A// Call nodes now subsume the function of debug nodes at callsites, so they
0N/A// contain the functionality of a full scope chain of debug nodes.
0N/Aclass CallNode : public SafePointNode {
2772N/A friend class VMStructs;
0N/Apublic:
0N/A const TypeFunc *_tf; // Function type
0N/A address _entry_point; // Address of method being called
0N/A float _cnt; // Estimate of number of times called
4132N/A CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
0N/A
0N/A CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
0N/A : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
0N/A _tf(tf),
0N/A _entry_point(addr),
4132N/A _cnt(COUNT_UNKNOWN),
4132N/A _generator(NULL)
0N/A {
0N/A init_class_id(Class_Call);
0N/A }
0N/A
4132N/A const TypeFunc* tf() const { return _tf; }
4132N/A const address entry_point() const { return _entry_point; }
4132N/A const float cnt() const { return _cnt; }
4132N/A CallGenerator* generator() const { return _generator; }
0N/A
4132N/A void set_tf(const TypeFunc* tf) { _tf = tf; }
4132N/A void set_entry_point(address p) { _entry_point = p; }
4132N/A void set_cnt(float c) { _cnt = c; }
4132N/A void set_generator(CallGenerator* cg) { _generator = cg; }
0N/A
0N/A virtual const Type *bottom_type() const;
0N/A virtual const Type *Value( PhaseTransform *phase ) const;
4132N/A virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
0N/A virtual Node *Identity( PhaseTransform *phase ) { return this; }
0N/A virtual uint cmp( const Node &n ) const;
0N/A virtual uint size_of() const = 0;
0N/A virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
0N/A virtual Node *match( const ProjNode *proj, const Matcher *m );
0N/A virtual uint ideal_reg() const { return NotAMachineReg; }
0N/A // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
0N/A // for some macro nodes whose expansion does not have a safepoint on the fast path.
0N/A virtual bool guaranteed_safepoint() { return true; }
0N/A // For macro nodes, the JVMState gets modified during expansion, so when cloning
0N/A // the node the JVMState must be cloned.
0N/A virtual void clone_jvms() { } // default is not to clone
0N/A
65N/A // Returns true if the call may modify n
65N/A virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase);
65N/A // Does this node have a use of n other than in debug information?
168N/A bool has_non_debug_use(Node *n);
65N/A // Returns the unique CheckCastPP of a call
65N/A // or result projection is there are several CheckCastPP
65N/A // or returns NULL if there is no one.
65N/A Node *result_cast();
3619N/A // Does this node returns pointer?
3619N/A bool returns_pointer() const {
3619N/A const TypeTuple *r = tf()->range();
3619N/A return (r->cnt() > TypeFunc::Parms &&
3619N/A r->field_at(TypeFunc::Parms)->isa_ptr());
3619N/A }
65N/A
1080N/A // Collect all the interesting edges from a call for use in
1080N/A // replacing the call by something else. Used by macro expansion
1080N/A // and the late inlining support.
1080N/A void extract_projections(CallProjections* projs, bool separate_io_proj);
1080N/A
0N/A virtual uint match_edge(uint idx) const;
0N/A
0N/A#ifndef PRODUCT
4312N/A virtual void dump_req(outputStream *st = tty) const;
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
1080N/A
0N/A//------------------------------CallJavaNode-----------------------------------
0N/A// Make a static or dynamic subroutine call node using Java calling
0N/A// convention. (The "Java" calling convention is the compiler's calling
0N/A// convention, as opposed to the interpreter's or that of native C.)
0N/Aclass CallJavaNode : public CallNode {
2772N/A friend class VMStructs;
0N/Aprotected:
0N/A virtual uint cmp( const Node &n ) const;
0N/A virtual uint size_of() const; // Size is bigger
0N/A
0N/A bool _optimized_virtual;
1137N/A bool _method_handle_invoke;
0N/A ciMethod* _method; // Method being direct called
0N/Apublic:
0N/A const int _bci; // Byte Code Index of call byte code
0N/A CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
0N/A : CallNode(tf, addr, TypePtr::BOTTOM),
1137N/A _method(method), _bci(bci),
1137N/A _optimized_virtual(false),
1137N/A _method_handle_invoke(false)
0N/A {
0N/A init_class_id(Class_CallJava);
0N/A }
0N/A
0N/A virtual int Opcode() const;
0N/A ciMethod* method() const { return _method; }
0N/A void set_method(ciMethod *m) { _method = m; }
0N/A void set_optimized_virtual(bool f) { _optimized_virtual = f; }
0N/A bool is_optimized_virtual() const { return _optimized_virtual; }
1137N/A void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
1137N/A bool is_method_handle_invoke() const { return _method_handle_invoke; }
0N/A
0N/A#ifndef PRODUCT
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
0N/A//------------------------------CallStaticJavaNode-----------------------------
0N/A// Make a direct subroutine call using Java calling convention (for static
0N/A// calls and optimized virtual calls, plus calls to wrappers for run-time
0N/A// routines); generates static stub.
0N/Aclass CallStaticJavaNode : public CallJavaNode {
0N/A virtual uint cmp( const Node &n ) const;
0N/A virtual uint size_of() const; // Size is bigger
0N/Apublic:
0N/A CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
0N/A : CallJavaNode(tf, addr, method, bci), _name(NULL) {
0N/A init_class_id(Class_CallStaticJava);
0N/A }
0N/A CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
0N/A const TypePtr* adr_type)
0N/A : CallJavaNode(tf, addr, NULL, bci), _name(name) {
0N/A init_class_id(Class_CallStaticJava);
0N/A // This node calls a runtime stub, which often has narrow memory effects.
0N/A _adr_type = adr_type;
0N/A }
0N/A const char *_name; // Runtime wrapper name
0N/A
0N/A // If this is an uncommon trap, return the request code, else zero.
0N/A int uncommon_trap_request() const;
0N/A static int extract_uncommon_trap_request(const Node* call);
0N/A
0N/A virtual int Opcode() const;
0N/A#ifndef PRODUCT
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
0N/A//------------------------------CallDynamicJavaNode----------------------------
0N/A// Make a dispatched call using Java calling convention.
0N/Aclass CallDynamicJavaNode : public CallJavaNode {
0N/A virtual uint cmp( const Node &n ) const;
0N/A virtual uint size_of() const; // Size is bigger
0N/Apublic:
0N/A CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
0N/A init_class_id(Class_CallDynamicJava);
0N/A }
0N/A
0N/A int _vtable_index;
0N/A virtual int Opcode() const;
0N/A#ifndef PRODUCT
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
0N/A//------------------------------CallRuntimeNode--------------------------------
0N/A// Make a direct subroutine call node into compiled C++ code.
0N/Aclass CallRuntimeNode : public CallNode {
0N/A virtual uint cmp( const Node &n ) const;
0N/A virtual uint size_of() const; // Size is bigger
0N/Apublic:
0N/A CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
0N/A const TypePtr* adr_type)
0N/A : CallNode(tf, addr, adr_type),
0N/A _name(name)
0N/A {
0N/A init_class_id(Class_CallRuntime);
0N/A }
0N/A
0N/A const char *_name; // Printable name, if _method is NULL
0N/A virtual int Opcode() const;
0N/A virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
0N/A
0N/A#ifndef PRODUCT
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
0N/A//------------------------------CallLeafNode-----------------------------------
0N/A// Make a direct subroutine call node into compiled C++ code, without
0N/A// safepoints
0N/Aclass CallLeafNode : public CallRuntimeNode {
0N/Apublic:
0N/A CallLeafNode(const TypeFunc* tf, address addr, const char* name,
0N/A const TypePtr* adr_type)
0N/A : CallRuntimeNode(tf, addr, name, adr_type)
0N/A {
0N/A init_class_id(Class_CallLeaf);
0N/A }
0N/A virtual int Opcode() const;
0N/A virtual bool guaranteed_safepoint() { return false; }
0N/A#ifndef PRODUCT
0N/A virtual void dump_spec(outputStream *st) const;
0N/A#endif
0N/A};
0N/A
0N/A//------------------------------CallLeafNoFPNode-------------------------------
0N/A// CallLeafNode, not using floating point or using it in the same manner as
0N/A// the generated code
0N/Aclass CallLeafNoFPNode : public CallLeafNode {
0N/Apublic:
0N/A CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
0N/A const TypePtr* adr_type)
0N/A : CallLeafNode(tf, addr, name, adr_type)
0N/A {
0N/A }
0N/A virtual int Opcode() const;
0N/A};
0N/A
0N/A
0N/A//------------------------------Allocate---------------------------------------
0N/A// High-level memory allocation
0N/A//
0N/A// AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
0N/A// get expanded into a code sequence containing a call. Unlike other CallNodes,
0N/A// they have 2 memory projections and 2 i_o projections (which are distinguished by
0N/A// the _is_io_use flag in the projection.) This is needed when expanding the node in
0N/A// order to differentiate the uses of the projection on the normal control path from
0N/A// those on the exception return path.
0N/A//
0N/Aclass AllocateNode : public CallNode {
0N/Apublic:
0N/A enum {
0N/A // Output:
0N/A RawAddress = TypeFunc::Parms, // the newly-allocated raw address
0N/A // Inputs:
0N/A AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
0N/A KlassNode, // type (maybe dynamic) of the obj.
0N/A InitialTest, // slow-path test (may be constant)
0N/A ALength, // array length (or TOP if none)
0N/A ParmLimit
0N/A };
0N/A
0N/A static const TypeFunc* alloc_type() {
0N/A const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
0N/A fields[AllocSize] = TypeInt::POS;
0N/A fields[KlassNode] = TypeInstPtr::NOTNULL;
0N/A fields[InitialTest] = TypeInt::BOOL;
0N/A fields[ALength] = TypeInt::INT; // length (can be a bad length)
0N/A
0N/A const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
0N/A
0N/A // create result type (range)
0N/A fields = TypeTuple::fields(1);
0N/A fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
0N/A
0N/A const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
0N/A
0N/A return TypeFunc::make(domain, range);
0N/A }
0N/A
39N/A bool _is_scalar_replaceable; // Result of Escape Analysis
39N/A
0N/A virtual uint size_of() const; // Size is bigger
0N/A AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
0N/A Node *size, Node *klass_node, Node *initial_test);
0N/A // Expansion modifies the JVMState, so we need to clone it
0N/A virtual void clone_jvms() {
0N/A set_jvms(jvms()->clone_deep(Compile::current()));
0N/A }
0N/A virtual int Opcode() const;
0N/A virtual uint ideal_reg() const { return Op_RegP; }
0N/A virtual bool guaranteed_safepoint() { return false; }
0N/A
65N/A // allocations do not modify their arguments
65N/A virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;}
65N/A
0N/A // Pattern-match a possible usage of AllocateNode.
0N/A // Return null if no allocation is recognized.
0N/A // The operand is the pointer produced by the (possible) allocation.
0N/A // It must be a projection of the Allocate or its subsequent CastPP.
0N/A // (Note: This function is defined in file graphKit.cpp, near
0N/A // GraphKit::new_instance/new_array, whose output it recognizes.)
0N/A // The 'ptr' may not have an offset unless the 'offset' argument is given.
0N/A static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
0N/A
0N/A // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
0N/A // an offset, which is reported back to the caller.
0N/A // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
0N/A static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
0N/A intptr_t& offset);
0N/A
0N/A // Dig the klass operand out of a (possible) allocation site.
0N/A static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
0N/A AllocateNode* allo = Ideal_allocation(ptr, phase);
0N/A return (allo == NULL) ? NULL : allo->in(KlassNode);
0N/A }
0N/A
0N/A // Conservatively small estimate of offset of first non-header byte.
0N/A int minimum_header_size() {
113N/A return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
113N/A instanceOopDesc::base_offset_in_bytes();
0N/A }
0N/A
0N/A // Return the corresponding initialization barrier (or null if none).
0N/A // Walks out edges to find it...
0N/A // (Note: Both InitializeNode::allocation and AllocateNode::initialization
0N/A // are defined in graphKit.cpp, which sets up the bidirectional relation.)
0N/A InitializeNode* initialization();
0N/A
3043N/A // Return the corresponding storestore barrier (or null if none).
3043N/A // Walks out edges to find it...
3043N/A MemBarStoreStoreNode* storestore();
3043N/A
0N/A // Convenience for initialization->maybe_set_complete(phase)
0N/A bool maybe_set_complete(PhaseGVN* phase);
0N/A};
0N/A
0N/A//------------------------------AllocateArray---------------------------------
0N/A//
0N/A// High-level array allocation
0N/A//
0N/Aclass AllocateArrayNode : public AllocateNode {
0N/Apublic:
0N/A AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
0N/A Node* size, Node* klass_node, Node* initial_test,
0N/A Node* count_val
0N/A )
0N/A : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
0N/A initial_test)
0N/A {
0N/A init_class_id(Class_AllocateArray);
0N/A set_req(AllocateNode::ALength, count_val);
0N/A }
0N/A virtual int Opcode() const;
0N/A virtual uint size_of() const; // Size is bigger
704N/A virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
0N/A
366N/A // Dig the length operand out of a array allocation site.
366N/A Node* Ideal_length() {
366N/A return in(AllocateNode::ALength);
366N/A }
366N/A
366N/A // Dig the length operand out of a array allocation site and narrow the
366N/A // type with a CastII, if necesssary
366N/A Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
366N/A
0N/A // Pattern-match a possible usage of AllocateArrayNode.
0N/A // Return null if no allocation is recognized.
0N/A static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
0N/A AllocateNode* allo = Ideal_allocation(ptr, phase);
0N/A return (allo == NULL || !allo->is_AllocateArray())
0N/A ? NULL : allo->as_AllocateArray();
0N/A }
0N/A};
0N/A
0N/A//------------------------------AbstractLockNode-----------------------------------
0N/Aclass AbstractLockNode: public CallNode {
0N/Aprivate:
3057N/A enum {
3057N/A Regular = 0, // Normal lock
3057N/A NonEscObj, // Lock is used for non escaping object
3057N/A Coarsened, // Lock was coarsened
3057N/A Nested // Nested lock
3057N/A } _kind;
0N/A#ifndef PRODUCT
0N/A NamedCounter* _counter;
0N/A#endif
0N/A
0N/Aprotected:
0N/A // helper functions for lock elimination
0N/A //
0N/A
0N/A bool find_matching_unlock(const Node* ctrl, LockNode* lock,
0N/A GrowableArray<AbstractLockNode*> &lock_ops);
0N/A bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
0N/A GrowableArray<AbstractLockNode*> &lock_ops);
0N/A bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
0N/A GrowableArray<AbstractLockNode*> &lock_ops);
0N/A LockNode *find_matching_lock(UnlockNode* unlock);
0N/A
3057N/A // Update the counter to indicate that this lock was eliminated.
3057N/A void set_eliminated_lock_counter() PRODUCT_RETURN;
0N/A
0N/Apublic:
0N/A AbstractLockNode(const TypeFunc *tf)
0N/A : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
3057N/A _kind(Regular)
0N/A {
0N/A#ifndef PRODUCT
0N/A _counter = NULL;
0N/A#endif
0N/A }
0N/A virtual int Opcode() const = 0;
0N/A Node * obj_node() const {return in(TypeFunc::Parms + 0); }
0N/A Node * box_node() const {return in(TypeFunc::Parms + 1); }
0N/A Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
3057N/A void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
3057N/A
0N/A const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
0N/A
0N/A virtual uint size_of() const { return sizeof(*this); }
0N/A
3057N/A bool is_eliminated() const { return (_kind != Regular); }
3057N/A bool is_non_esc_obj() const { return (_kind == NonEscObj); }
3057N/A bool is_coarsened() const { return (_kind == Coarsened); }
3057N/A bool is_nested() const { return (_kind == Nested); }
0N/A
3057N/A void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
3057N/A void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
3057N/A void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
460N/A
65N/A // locking does not modify its arguments
3057N/A virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
65N/A
0N/A#ifndef PRODUCT
0N/A void create_lock_counter(JVMState* s);
0N/A NamedCounter* counter() const { return _counter; }
0N/A#endif
0N/A};
0N/A
0N/A//------------------------------Lock---------------------------------------
0N/A// High-level lock operation
0N/A//
0N/A// This is a subclass of CallNode because it is a macro node which gets expanded
0N/A// into a code sequence containing a call. This node takes 3 "parameters":
0N/A// 0 - object to lock
0N/A// 1 - a BoxLockNode
0N/A// 2 - a FastLockNode
0N/A//
0N/Aclass LockNode : public AbstractLockNode {
0N/Apublic:
0N/A
0N/A static const TypeFunc *lock_type() {
0N/A // create input type (domain)
0N/A const Type **fields = TypeTuple::fields(3);
0N/A fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
0N/A fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
0N/A fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
0N/A const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
0N/A
0N/A // create result type (range)
0N/A fields = TypeTuple::fields(0);
0N/A
0N/A const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
0N/A
0N/A return TypeFunc::make(domain,range);
0N/A }
0N/A
0N/A virtual int Opcode() const;
0N/A virtual uint size_of() const; // Size is bigger
0N/A LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
0N/A init_class_id(Class_Lock);
0N/A init_flags(Flag_is_macro);
0N/A C->add_macro_node(this);
0N/A }
0N/A virtual bool guaranteed_safepoint() { return false; }
0N/A
0N/A virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
0N/A // Expansion modifies the JVMState, so we need to clone it
0N/A virtual void clone_jvms() {
0N/A set_jvms(jvms()->clone_deep(Compile::current()));
0N/A }
3057N/A
3057N/A bool is_nested_lock_region(); // Is this Lock nested?
0N/A};
0N/A
0N/A//------------------------------Unlock---------------------------------------
0N/A// High-level unlock operation
0N/Aclass UnlockNode : public AbstractLockNode {
0N/Apublic:
0N/A virtual int Opcode() const;
0N/A virtual uint size_of() const; // Size is bigger
0N/A UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
0N/A init_class_id(Class_Unlock);
0N/A init_flags(Flag_is_macro);
0N/A C->add_macro_node(this);
0N/A }
0N/A virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
0N/A // unlock is never a safepoint
0N/A virtual bool guaranteed_safepoint() { return false; }
0N/A};
1879N/A
1879N/A#endif // SHARE_VM_OPTO_CALLNODE_HPP