macro.cpp revision 66
/*
* Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_macro.cpp.incl"
//
// Replace any references to "oldref" in inputs to "use" with "newref".
// Returns the number of replacements made.
//
int nreplacements = 0;
if (j < req)
else
break;
}
}
return nreplacements;
}
// Copy debug information and adjust JVMState information
// Clone old SafePointScalarObjectNodes, adjusting their field contents.
if (old_in->is_SafePointScalarObject()) {
if (old_unique != C->unique()) {
}
}
}
}
}
// Fast path taken; set region slot 2
// Fast path not-taken, i.e. slow path
return slow_taken;
}
//--------------------copy_predefined_input_for_runtime_call--------------------
void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) {
// Set fixed predefined input arguments
}
//------------------------------make_slow_call---------------------------------
CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) {
// Slow-path call
? (CallNode*)new (C, size) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM )
: (CallNode*)new (C, size) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM );
// Slow path call has no side-effects, uses few values
return call;
}
{
// For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
else {
}
}
}
break;
}
if (pn->_is_io_use)
else
break;
if (pn->_is_io_use)
else
break;
break;
default:
assert(false, "unexpected projection from allocation node.");
}
}
}
//---------------------------set_eden_pointers-------------------------
if (UseTLAB) { // Private allocation: load from TLS
} else { // Shared allocation: load from globals
}
}
Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
return value;
}
Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
return mem;
}
//=============================================================================
//
// A L L O C A T I O N
//
// Allocation attempts to be fast in the case of frequent small objects.
// It breaks down like this:
//
// 1) Size in doublewords is computed. This is a constant for objects and
// variable for most arrays. Doubleword units are used to avoid size
// overflow of huge doubleword arrays. We need doublewords in the end for
// rounding.
//
// 2) Size is checked for being 'too large'. Too-large allocations will go
// the slow path into the VM. The slow path can throw any required
// exceptions, and does all the special checks for very large arrays. The
// size test can constant-fold away for objects. For objects with
// finalizers it constant-folds the otherway: you always go slow with
// finalizers.
//
// 3) If NOT using TLABs, this is the contended loop-back point.
// Load-Locked the heap top. If using TLABs normal-load the heap top.
//
// 4) Check that heap top + size*8 < max. If we fail go the slow ` route.
// NOTE: "top+size*8" cannot wrap the 4Gig line! Here's why: for largish
// "size*8" we always enter the VM, where "largish" is a constant picked small
// enough that there's always space between the eden max and 4Gig (old space is
// there so it's quite large) and large enough that the cost of entering the VM
// is dwarfed by the cost to initialize the space.
//
// 5) If NOT using TLABs, Store-Conditional the adjusted heap top back
// down. If contended, repeat at step 3. If using TLABs normal-store
// adjusted heap top back down; there is no contention.
//
// fields.
//
// 7) Merge with the slow-path; cast the raw memory pointer to the correct
// oop flavor.
//
//=============================================================================
// FastAllocateSizeLimit value is in DOUBLEWORDS.
// Allocations bigger than this always go the slow route.
// This value must be small enough that allocation attempts that need to
// trigger exceptions go the slow route. Also, it must be small enough so
// that heap_top + size_in_bytes does not wrap around the 4Gig limit.
//=============================================================================j//
// %%% Here is an old comment from parseHelper.cpp; is it outdated?
// The allocator will coalesce int->oop copies away. See comment in
// coalesce.cpp about how this works. It depends critically on the exact
// code shape produced here, so if you are changing this code shape
// make sure the GC info for the heap-top is correct in and around the
// slow-path call.
//
)
{
// Load Eden::end. Loop invariant and hoisted.
//
// Note: We set the control input on "eden_end" and "old_eden_top" when using
// a TLAB to work around a bug where these values were being moved across
// a safepoint. These are not oops, so they cannot be include in the oop
// map, but the can be changed by a GC. The proper way to fix this would
// be to set the raw memory state when generating a SafepointNode. However
// this will require extensive changes to the loop optimization in order to
// prevent a degradation of the optimization.
// See comment in memnode.hpp, around line 227 in class LoadPNode.
// We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
// they will not be used if "always_slow" is set
// The initial slow comparison is a size check, the comparison
// we want to do is a BoolTest::gt
bool always_slow = false;
if (tv >= 0) {
} else {
}
if (DTraceAllocProbes) {
// Force slow-path allocation
always_slow = true;
}
// generate the initial test if necessary
if (initial_slow_test != NULL ) {
// Now make the initial failure test. Usually a too-big test but
// might be a TRUE for finalizers or a fancy class check for
// newInstance0.
// Plug the failing-too-big test into the slow-path region
} else { // No initial test, just fall into next case
toobig_false = ctrl;
}
// generate the fast allocation code unless we know that the initial test will always go slow
if (!always_slow) {
// allocate the Region and Phi nodes for the result
// We need a Region for the loop-back contended case.
if( UseTLAB ) {
} else {
// Now handle the passing-too-big test. We fall into the contended
// loop-back merge point.
}
// Load(-locked) the heap top.
// See note above concerning the control input when using a TLAB
? new (C, 3) LoadPNode ( ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM )
// Add to heap top to get a new heap top
// Check for needing a GC; compare against heap end
IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN );
// Plug the failing-heap-space-need-gc test into the slow-path region
if( initial_slow_test ) {
// This completes all paths into the slow merge point
} else { // No initial slow path needed!
// Just fall from the need-GC path straight into the VM call.
}
// No need for a GC. Setup for the Store-Conditional
// Grab regular I/O before optional prefetch may change it.
// Slow-path does no I/O so just set it to the original I/O.
// Store (-conditional) the modified eden top back down.
// StorePConditional produces flags for a test PLUS a modified raw
// memory state.
if( UseTLAB ) {
store_eden_top = new (C, 4) StorePNode( needgc_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, new_eden_top );
} else {
store_eden_top = new (C, 5) StorePConditionalNode( needgc_false, contended_phi_rawmem, eden_top_adr, new_eden_top, old_eden_top );
// If not using TLABs, check to see if there was contention.
IfNode *contention_iff = new (C, 2) IfNode ( needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN );
// If contention, loopback and try again.
// Fast-path succeeded with no contention!
}
// Rename successful fast-path variables to make meaning more obvious
if (ExtendedDTraceProbes) {
// Slow-path call
"dtrace_object_alloc",
TypeRawPtr::BOTTOM);
// Get base of thread-local storage area
}
// Plug in the successful fast-path into the result merge point
} else {
slow_region = ctrl;
}
// Generate slow-path call
}
// Copy debug information and adjust JVMState information, then replace
// allocate node with the call
if (!always_slow) {
}
// Identify the output projections from the allocate node and
// adjust any references to them.
// The control and io projections look like:
//
// v---Proj(ctrl) <-----+ v---CatchProj(ctrl)
// Allocate Catch
// ^---Proj(io) <-------+ ^---CatchProj(io)
//
// We are interested in the CatchProj nodes.
//
// An allocate node has separate memory projections for the uses on the control and i_o paths
// Replace uses of the control memory projection with result_phi_rawmem (unless we are only generating a slow call)
// back up iterator
--i;
}
}
// Now change uses of _memproj_catchall to use _memproj_fallthrough and delete _memproj_catchall so
// we end up with a call that has only 1 memory projection
if (_memproj_catchall != NULL ) {
if (_memproj_fallthrough == NULL) {
}
// back up iterator
--i;
}
}
// An allocate node has separate i_o projections for the uses on the control and i_o paths
// Replace uses of the control i_o projection with result_phi_i_o (unless we are only generating a slow call)
if (_ioproj_fallthrough == NULL) {
} else if (!always_slow) {
// back up iterator
--i;
}
}
// Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete _ioproj_catchall so
// we end up with a call that has only 1 control projection
if (_ioproj_catchall != NULL ) {
// back up iterator
--i;
}
}
// if we generated only a slow call, we are done
if (always_slow)
return;
if (_fallthroughcatchproj != NULL) {
} else {
}
// no uses of the allocation result
slow_result = top();
} else {
}
// Plug slow-path into result merge point
// This completes all paths into the result merge point
}
// Helper for PhaseMacroExpand::expand_allocate_common.
// Initializes the newly-allocated storage.
Node*
Node* size_in_bytes) {
// Store the klass & mark bits
// For now only enable fast locking for non-array types
mark_node = make_load(NULL, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS);
} else {
}
rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT);
// Array length
rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT);
// conservatively small header size:
header_size = sizeof(arrayOopDesc);
if (k->is_array_klass()) // we know the exact header size in most cases:
}
// Clear the object body, if necessary.
// The init has somehow disappeared; be cautious and clear everything.
//
// This can happen if a node is allocated but an uncommon trap occurs
// immediately. In this case, the Initialize gets associated with the
// trap, and may be placed in a different (outer) loop, if the Allocate
// is in a loop. If (this is rare) the inner loop gets unrolled, then
// there can be two Allocates to one Initialize. The answer in all these
// edge cases is safety first. It is always safe to clear immediately
// within an Allocate, and then (maybe or maybe not) clear some more later.
if (!ZeroTLAB)
&_igvn);
} else {
if (!init->is_complete()) {
// Try to win by zeroing only what the init does not store.
// We can also try to do some peephole optimizations,
// such as combining some adjacent subword stores.
}
// We have no more use for this link, since the AllocateNode goes away:
// (If we keep the link, it just confuses the register allocator,
// who thinks he sees a real use of the address by the membar.)
}
return rawmem;
}
// Generate prefetch instructions for next allocations.
// Generate prefetch allocation with watermark check.
// As an allocation hits the watermark, we will prefetch starting
// at a "distance" away from watermark.
TypeRawPtr::BOTTOM );
// I/O is used for Prefetch
// check against new_eden_top
// true node, add prefetchdistance
// adding prefetches
}
i_o = pf_phi_abio;
} else if( AllocatePrefetchStyle > 0 ) {
// Insert a prefetch for each allocation only on the fast-path
// Generate several prefetch instructions only for arrays.
// Do not let it float too high, since if eden_top == eden_end,
// both might be null.
if( i == 0 ) { // Set control for first prefetch, next follows it
}
}
}
return i_o;
}
}
}
// eliminate the node without expanding it.
//
// eliminated. This should be investigated as a future enhancement.
//
if (!alock->is_eliminated()) {
return false;
}
// Mark the box lock as eliminated if all correspondent locks are eliminated
// to construct correct debug info.
if (!box->is_eliminated()) {
bool eliminate = true;
eliminate = false;
break;
}
}
if (eliminate)
box->set_eliminated();
}
#ifndef PRODUCT
if (PrintEliminateLocks) {
} else {
}
}
#endif
// There are 2 projections from the lock. The lock node will
// be deleted when its last use is subsumed below.
_fallthroughproj != NULL &&
// The input to a Lock is merged memory, so extract its RawMem input
// (unless the MergeMem has been optimized away.)
// Seach for MemBarAcquire node and delete it also.
}
// Seach for MemBarRelease node and delete it also.
}
return true;
}
//------------------------------expand_lock_node----------------------
if (eliminate_locking_node(lock)) {
return;
}
// Make the merge point
// Optimize test; set region slot 2
// Make slow path call
CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box );
// Slow path can only throw asynchronous exceptions, which are always
// de-opted. So the compiler thinks the slow-call can never throw an
// exception. If it DOES throw an exception we would need the debug
// info removed first (since if it throws there is no monitor).
// Capture slow path
// disconnect fall-through projection from call and create a new one
// hook up users of fall-through projection to region
// region inputs are now complete
// create a Phi for the memory state
}
//------------------------------expand_unlock_node----------------------
if (eliminate_locking_node(unlock)) {
return;
}
// No need for a null check on unlock
// Make the merge point
// Optimize test; set region slot 2
CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box );
// No exceptions for unlocking
// Capture slow path
// disconnect fall-through projection from call and create a new one
// hook up users of fall-through projection to region
// region inputs are now complete
// create a Phi for the memory state
}
//------------------------------expand_macro_nodes----------------------
// Returns true if a failure occurred.
bool PhaseMacroExpand::expand_macro_nodes() {
if (C->macro_count() == 0)
return false;
// Make sure expansion will not cause node limit to be exceeded. Worst case is a
// macro node gets expanded into about 50 nodes. Allow 50% more for optimization
return true;
// expand "macro" nodes
// nodes are removed from the macro list as they are processed
while (C->macro_count() > 0) {
Node * n = C->macro_node(0);
// node is unreachable, so don't try to expand it
C->remove_macro_node(n);
continue;
}
switch (n->class_id()) {
case Node::Class_Allocate:
expand_allocate(n->as_Allocate());
break;
case Node::Class_AllocateArray:
break;
case Node::Class_Lock:
expand_lock_node(n->as_Lock());
break;
case Node::Class_Unlock:
expand_unlock_node(n->as_Unlock());
break;
default:
assert(false, "unknown node type in macro list");
}
if (C->failing()) return true;
}
return false;
}