/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OPTO_MEMNODE_HPP
#define SHARE_VM_OPTO_MEMNODE_HPP
#include "opto/multnode.hpp"
#include "opto/opcodes.hpp"
// Portions of code courtesy of Clifford Click
class MultiNode;
class PhaseCCP;
class PhaseTransform;
//------------------------------MemNode----------------------------------------
// Load or Store, possibly throwing a NULL pointer exception
protected:
#ifdef ASSERT
#endif
public:
};
protected:
}
}
}
public:
// Helpers for the optimizer. Documented in memnode.cpp.
// This one should probably be a phase-specific function:
// Find any cast-away of null-ness and keep its control.
// Shared code for Ideal methods:
// Helper function for adr_type() implementations.
// Raw access function, to allow copying of adr_type efficiently in
// product builds and retain the debug info for debug builds.
#ifdef ASSERT
return _adr_type;
#else
return 0;
#endif
}
// Map a load or store opcode to its corresponding store opcode.
// (Return -1 if unknown.)
// What is the type of the value in memory? (T_VOID mean "unspecified".)
virtual BasicType memory_type() const = 0;
virtual int memory_size() const {
#ifdef ASSERT
return type2aelembytes(memory_type(), true);
#else
return type2aelembytes(memory_type());
#endif
}
// Search through memory states which precede this node (load or store).
// Look for an exact match for the address, with no intervening
// aliased stores.
// Can this node (load or store) accurately see a stored value in
// the given memory state? (The state may or may not be in(Memory).)
#ifndef PRODUCT
#endif
};
//------------------------------LoadNode---------------------------------------
// Load value; requires Memory and Address
protected:
public:
}
// Polymorphic factory method:
// Handle algebraic identities here. If we have an identity, return the Node
// we are equivalent to. We look for Load of a Store.
// If the load is from Field memory and the pointer is non-null, we can
// zero out the control input.
// Split instance field load through Phi.
// Recover original value from boxed values
// Compute a new Type for this node. Basically we just do the pre-check,
// then call the virtual add() to set the type.
// Common methods for LoadKlass and LoadNKlass nodes.
virtual const Type *bottom_type() const;
// Following method is copied from TypeNode:
// If this node is in the hash table, make sure it doesn't need a rehash.
}
// Do not match memory edge
// Map a load opcode to its corresponding store opcode.
virtual int store_Opcode() const = 0;
// Check if the load's memory input is a Phi node with the same control.
#ifndef PRODUCT
#endif
#ifdef ASSERT
// Helper function to allow a raw load without control edge for some cases
#endif
protected:
};
//------------------------------LoadBNode--------------------------------------
// Load a byte (8bits signed) from memory
public:
virtual int Opcode() const;
};
//------------------------------LoadUBNode-------------------------------------
// Load a unsigned byte (8bits unsigned) from memory
public:
virtual int Opcode() const;
};
//------------------------------LoadUSNode-------------------------------------
public:
virtual int Opcode() const;
};
//------------------------------LoadSNode--------------------------------------
// Load a short (16bits signed) from memory
public:
virtual int Opcode() const;
};
//------------------------------LoadINode--------------------------------------
// Load an integer from memory
public:
virtual int Opcode() const;
};
//------------------------------LoadRangeNode----------------------------------
// Load an array length from the array
public:
virtual int Opcode() const;
};
//------------------------------LoadLNode--------------------------------------
// Load a long from memory
}
public:
bool require_atomic_access = false )
{}
virtual int Opcode() const;
static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
#ifndef PRODUCT
}
#endif
};
//------------------------------LoadL_unalignedNode----------------------------
// Load a long from unaligned memory
public:
virtual int Opcode() const;
};
//------------------------------LoadFNode--------------------------------------
// Load a float (64 bits) from memory
public:
virtual int Opcode() const;
};
//------------------------------LoadDNode--------------------------------------
// Load a double (64 bits) from memory
public:
virtual int Opcode() const;
};
//------------------------------LoadD_unalignedNode----------------------------
// Load a double from unaligned memory
public:
virtual int Opcode() const;
};
//------------------------------LoadPNode--------------------------------------
// Load a pointer from memory (either object or array)
public:
virtual int Opcode() const;
// depends_only_on_test is almost always true, and needs to be almost always
// true to enable key hoisting & commoning optimizations. However, for the
// special case of RawPtr loads from TLS top & end, the control edge carries
// the dependence preventing hoisting past a Safepoint instead of the memory
// edge. (An unfortunate consequence of having Safepoints not set Raw
// Memory; itself an unfortunate consequence of having Nodes which produce
// results (new raw memory state) inside of loops preventing all manner of
// other optimizations). Basically, it's ugly but so is the alternative.
// See comment in macro.cpp, around line 125 expand_allocate_common().
};
//------------------------------LoadNNode--------------------------------------
// Load a narrow oop from memory (either object or array)
public:
virtual int Opcode() const;
// depends_only_on_test is almost always true, and needs to be almost always
// true to enable key hoisting & commoning optimizations. However, for the
// special case of RawPtr loads from TLS top & end, the control edge carries
// the dependence preventing hoisting past a Safepoint instead of the memory
// edge. (An unfortunate consequence of having Safepoints not set Raw
// Memory; itself an unfortunate consequence of having Nodes which produce
// results (new raw memory state) inside of loops preventing all manner of
// other optimizations). Basically, it's ugly but so is the alternative.
// See comment in macro.cpp, around line 125 expand_allocate_common().
};
//------------------------------LoadKlassNode----------------------------------
// Load a Klass from an object
public:
virtual int Opcode() const;
virtual bool depends_only_on_test() const { return true; }
// Polymorphic factory method:
};
//------------------------------LoadNKlassNode---------------------------------
// Load a narrow Klass from an object.
public:
virtual int Opcode() const;
virtual bool depends_only_on_test() const { return true; }
};
//------------------------------StoreNode--------------------------------------
// Store value; requires Store, Address and Value
protected:
virtual bool depends_only_on_test() const { return false; }
public:
}
}
// Polymorphic factory method:
// If the store is to Field memory and the pointer is non-null, we can
// zero out the control input.
// Compute a new Type for this node. Basically we just do the pre-check,
// then call the virtual add() to set the type.
// Check for identity function on memory (Load then Store at same address)
// Do not match memory edge
// Map a store opcode to its corresponding own opcode, trivially.
// have all possible loads of the value stored been optimized away?
};
//------------------------------StoreBNode-------------------------------------
// Store byte to memory
public:
StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
virtual int Opcode() const;
};
//------------------------------StoreCNode-------------------------------------
public:
StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
virtual int Opcode() const;
};
//------------------------------StoreINode-------------------------------------
// Store int to memory
public:
StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
virtual int Opcode() const;
};
//------------------------------StoreLNode-------------------------------------
// Store long to memory
}
public:
bool require_atomic_access = false )
{}
virtual int Opcode() const;
static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
#ifndef PRODUCT
}
#endif
};
//------------------------------StoreFNode-------------------------------------
// Store float to memory
public:
StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
virtual int Opcode() const;
};
//------------------------------StoreDNode-------------------------------------
// Store double to memory
public:
StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
virtual int Opcode() const;
};
//------------------------------StorePNode-------------------------------------
// Store pointer to memory
public:
StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
virtual int Opcode() const;
};
//------------------------------StoreNNode-------------------------------------
// Store narrow oop to memory
public:
StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
virtual int Opcode() const;
};
//------------------------------StoreCMNode-----------------------------------
// Store card-mark byte to memory for CM
// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
// Preceeding equivalent StoreCMs may be eliminated.
private:
}
public:
StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
"bad oop alias idx");
}
virtual int Opcode() const;
};
//------------------------------LoadPLockedNode---------------------------------
// Load-locked a pointer from memory (either object or array).
// On Sparc & Intel this is implemented as a normal pointer load.
// On PowerPC and friends it's a real load-locked.
public:
virtual int Opcode() const;
virtual bool depends_only_on_test() const { return true; }
};
//------------------------------SCMemProjNode---------------------------------------
// This class defines a projection of the memory state of a store conditional node.
// These nodes return a value, but also update memory.
public:
virtual int Opcode() const;
virtual bool is_CFG() const { return false; }
#ifndef PRODUCT
#endif
};
//------------------------------LoadStoreNode---------------------------
// Note: is_Mem() method returns 'true' for this class.
private:
public:
LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
virtual bool depends_only_on_test() const { return false; }
virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address
bool result_not_used() const;
};
public:
enum {
};
};
//------------------------------StorePConditionalNode---------------------------
// Conditionally store pointer to memory, if no change since prior
// load-locked. Sets flags for success or failure of the store.
public:
StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
virtual int Opcode() const;
// Produces flags
};
//------------------------------StoreIConditionalNode---------------------------
// Conditionally store int to memory, if no change since prior
// load-locked. Sets flags for success or failure of the store.
public:
StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
virtual int Opcode() const;
// Produces flags
};
//------------------------------StoreLConditionalNode---------------------------
// Conditionally store long to memory, if no change since prior
// load-locked. Sets flags for success or failure of the store.
public:
StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
virtual int Opcode() const;
// Produces flags
};
//------------------------------CompareAndSwapLNode---------------------------
public:
CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
virtual int Opcode() const;
};
//------------------------------CompareAndSwapINode---------------------------
public:
CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
virtual int Opcode() const;
};
//------------------------------CompareAndSwapPNode---------------------------
public:
CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
virtual int Opcode() const;
};
//------------------------------CompareAndSwapNNode---------------------------
public:
CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
virtual int Opcode() const;
};
//------------------------------GetAndAddINode---------------------------
public:
GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
virtual int Opcode() const;
};
//------------------------------GetAndAddLNode---------------------------
public:
GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
virtual int Opcode() const;
};
//------------------------------GetAndSetINode---------------------------
public:
GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
virtual int Opcode() const;
};
//------------------------------GetAndSetINode---------------------------
public:
GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
virtual int Opcode() const;
};
//------------------------------GetAndSetPNode---------------------------
public:
GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
virtual int Opcode() const;
};
//------------------------------GetAndSetNNode---------------------------
public:
GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
virtual int Opcode() const;
};
//------------------------------ClearArray-------------------------------------
public:
}
virtual int Opcode() const;
// ClearArray modifies array elements, and so affects only the
// array memory addressed by the bottom_type of its base address.
// Clear the given area of an object or array.
// The start offset must always be aligned mod BytesPerInt.
// The end offset must always be aligned mod BytesPerLong.
// Return the new memory.
// Return allocation input memory edge if it is different instance
// or itself if it is the one we are looking for.
};
//------------------------------StrIntrinsic-------------------------------
// Base class for Ideal nodes used in String instrinsic code.
public:
}
}
}
virtual bool depends_only_on_test() const { return false; }
};
//------------------------------StrComp-------------------------------------
public:
virtual int Opcode() const;
};
//------------------------------StrEquals-------------------------------------
public:
virtual int Opcode() const;
};
//------------------------------StrIndexOf-------------------------------------
public:
virtual int Opcode() const;
};
//------------------------------AryEq---------------------------------------
public:
virtual int Opcode() const;
};
//------------------------------MemBar-----------------------------------------
// There are different flavors of Memory Barriers to match the Java Memory
// Model. Monitor-enter and volatile-load act as Aquires: no following ref
// can be moved to before them. We insert a MemBar-Acquire after a FastLock or
// volatile-load. Monitor-exit and volatile-store act as Release: no
// preceding ref can be moved to after them. We insert a MemBar-Release
// before a FastUnlock or volatile-store. All volatiles need to be
// serialized, so we follow all volatile-stores with a MemBar-Volatile to
// separate it from any following volatile-load.
// Memory type this node is serializing. Usually either rawptr or bottom.
public:
enum {
};
virtual int Opcode() const = 0;
// Factory method. Builds a wide or narrow membar.
// Optional 'precedent' becomes an extra edge if not null.
};
// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache). Requires multi-cpu
// visibility. Inserted after a volatile load.
public:
virtual int Opcode() const;
};
// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load). Requires
// multi-cpu visibility. Inserted before a volatile store.
public:
virtual int Opcode() const;
};
// "Acquire" - no following ref can move before (but earlier refs can
// follow, like an early Load stalled in cache). Requires multi-cpu
// visibility. Inserted after a FastLock.
public:
virtual int Opcode() const;
};
// "Release" - no earlier ref can move after (but later refs can move
// up, like a speculative pipelined cache-hitting Load). Requires
// multi-cpu visibility. Inserted before a FastUnLock.
public:
virtual int Opcode() const;
};
public:
}
virtual int Opcode() const;
};
// Ordering between a volatile store and a following volatile load.
// Requires multi-CPU visibility?
public:
virtual int Opcode() const;
};
// Ordering within the same CPU. Used to order unsafe memory references
// inside the compiler when we lack alias info. Not needed "outside" the
// compiler because the CPU does all the ordering for us.
public:
virtual int Opcode() const;
};
// Isolation of object setup after an AllocateNode and before next safepoint.
// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
friend class AllocateNode;
enum {
Incomplete = 0,
};
int _is_complete;
bool _does_not_escape;
public:
enum {
};
virtual int Opcode() const;
// Manage incoming memory edges via a MergeMem on in(Memory):
// The raw memory edge coming directly from the Allocation.
// The contents of this memory are *always* all-zero-bits.
// Return the corresponding allocation for this initialization (or null if none).
// (Note: Both InitializeNode::allocation and AllocateNode::initialization
// are defined in graphKit.cpp, which sets up the bidirectional relation.)
// Anything other than zeroing in this init?
bool is_non_zero();
// An InitializeNode must completed before macro expansion is done.
// Completion requires that the AllocateNode must be followed by
// initialization of the new memory to zero, then to any initializers.
// Mark complete. (Must not yet be complete.)
#ifdef ASSERT
// ensure all non-degenerate stores are ordered and non-overlapping
#endif //ASSERT
// See if this store can be captured; return offset where it initializes.
// Return 0 if the store cannot be moved (any sort of problem).
// Capture another store; reformat it to write my internal raw memory.
// Return the captured copy, else NULL if there is some sort of problem.
// Find captured store which corresponds to the range [start..start+size).
// Return my own memory projection (meaning the initial zero bits)
// if there is no such store. Return NULL if there is a problem.
// Called when the associated AllocateNode is expanded into CFG.
private:
void remove_extra_zeroes();
// Find out where a captured store should be placed (or already is placed).
};
//------------------------------MergeMem---------------------------------------
// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
friend class MergeMemStream;
public:
// If the input is a whole memory state, clone it with all its slices intact.
// Otherwise, make a new memory state with just that base memory input.
// In either case, the result is a newly created MergeMem.
virtual int Opcode() const;
virtual const RegMask &out_RegMask() const;
// sparse accessors
// Fetch the previously stored "set_memory_at", or else the base memory.
// (Caller should clone it if it is a phi-nest.)
// set the memory, regardless of its previous value
// the "base" is the memory that provides the non-finite support
// warning: setting the base can implicitly set any of the other slices too
// sentinel value which denotes a copy of the base memory:
bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
// hook for the iterator, to perform any necessary setup
// push sentinels until I am at least as long as the other (semantic no-op)
#ifndef PRODUCT
#endif
};
private:
int _idx;
int _cnt;
int _cnt2;
// subsume_node will break sparseness at times, whenever a memory slice
// folds down to a copy of the base ("fat") memory. In such a case,
// the raw edge will update to base, although it should be top.
// This iterator will recognize either top or base_memory as an
// "empty" slice. See is_empty, is_empty2, and next below.
//
// The sparseness property is repaired in MergeMemNode::Ideal.
// As long as access to a MergeMem goes through this iterator
// or the memory_at accessor, flaws in the sparseness will
// never be observed.
//
// Also, iteration_setup repairs sparseness.
}
#ifdef ASSERT
if (at_base_memory())
return _mm->base_memory();
else
return _mm_base;
}
}
#endif
void assert_synch() const {
"no side-effects except through the stream");
}
public:
// expected usages:
// for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
// for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
// iterate over one merge
mm->iteration_setup();
}
// iterate in parallel over two merges
// only iterates through non-empty elements of mm2
}
#ifdef ASSERT
~MergeMemStream() {
assert_synch();
}
#endif
return _mm;
}
return _mm_base;
}
return _mm2;
}
bool at_base_memory() const {
}
int alias_idx() const {
return _idx;
}
}
return C->get_adr_type(alias_idx());
}
bool is_empty() const {
}
bool is_empty2() const {
}
assert_synch();
return _mem;
}
// get the current memory, regardless of empty or non-empty status
// Use _mm_base to defend against updates to _mem->base_memory().
return mem;
}
return _mem2;
}
if (at_base_memory()) {
// Note that this does not change the invariant _mm_base.
} else {
}
assert_synch();
}
// Recover from a side effect to the MergeMemNode.
void set_memory() {
}
// next_non_empty2 can yield states where is_empty() is true
private:
// find the next item, which might be empty
assert_synch();
// Note: This iterator allows _mm to be non-sparse.
// It behaves the same whether _mem is top or base_memory.
if (have_mm2)
return true;
}
return false;
}
// find the next non-empty item
if (!is_empty()) {
// make sure _mem2 is filled in sensibly
return true;
return true; // is_empty() == true
}
}
return false;
}
};
//------------------------------Prefetch---------------------------------------
// Non-faulting prefetch load. Prefetch for many reads.
public:
virtual int Opcode() const;
};
// Non-faulting prefetch load. Prefetch for many reads & many writes.
public:
virtual int Opcode() const;
};
// Allocation prefetch which may fault, TLAB size have to be adjusted.
public:
virtual int Opcode() const;
virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
};
#endif // SHARE_VM_OPTO_MEMNODE_HPP