/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/barrierSet.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "opto/addnode.hpp"
#include "opto/graphKit.hpp"
#include "opto/idealKit.hpp"
#include "opto/locknode.hpp"
#include "opto/machnode.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/sharedRuntime.hpp"
//----------------------------GraphKit-----------------------------------------
// Main utility constructor.
_gvn(*C->initial_gvn())
{
}
// Private constructor for parser.
_gvn(*C->initial_gvn())
{
_exceptions = NULL;
}
//---------------------------clean_stack---------------------------------------
// Clear away rubbish from the stack area of the JVM state.
// This destroys any arguments that may be waiting on the stack.
}
}
}
//--------------------------------sync_jvms-----------------------------------
// Make sure our current jvms agrees with our parse state.
return jvms;
}
//--------------------------------sync_jvms_for_reexecute---------------------
// Make sure our current jvms agrees with our parse state. This version
// uses the reexecute_sp for reexecuting bytecodes.
return jvms;
}
#ifdef ASSERT
return true;
}
return true;
}
// Local helper checks for special internal merge points
// used to accumulate and merge exception states.
// They are marked by the region's in(0) edge being the map itself.
// Such merge points must never "escape" into the parser at large,
// until they have been handed to gvn.transform.
}
}
}
}
#endif
//---------------------------stop_and_kill_map---------------------------------
// Set _map to NULL, signalling a stop to further bytecode execution.
// First smash the current map's control to a constant, to mark it dead.
}
}
//--------------------------------stopped--------------------------------------
// Tell if _map is NULL, or control is top.
else return false;
}
//-----------------------------has_ex_handler----------------------------------
// Tell if this method or any caller method has exception handlers.
return true;
}
}
return false;
}
//------------------------------save_ex_oop------------------------------------
// Save an exception without blowing stack contents or other JVM state.
}
return ex_oop;
}
//-----------------------------saved_ex_oop------------------------------------
// Recover a saved exception from its map.
return common_saved_ex_oop(ex_map, false);
}
//--------------------------clear_saved_ex_oop---------------------------------
// Erase a previously saved exception from its map.
return common_saved_ex_oop(ex_map, true);
}
#ifdef ASSERT
//---------------------------has_saved_ex_oop----------------------------------
// Erase a previously saved exception from its map.
}
#endif
//-------------------------make_exception_state--------------------------------
// Turn the current JVM state into an exception state, appending the ex_oop.
sync_jvms();
return ex_map;
}
//--------------------------add_exception_state--------------------------------
// Add an exception to my list of exceptions.
return;
}
#ifdef ASSERT
if (has_exceptions()) {
assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place");
}
#endif
// If there is already an exception of exactly this type, merge with it.
// In particular, null-checks and other low-level exceptions common up here.
// No action needed.
return;
}
// We check sp also because call bytecodes can generate exceptions
// both before and after arguments are popped!
return;
}
}
// No pre-existing exception of the same type. Chain it on the list.
}
//-----------------------add_exception_states_from-----------------------------
}
}
}
//-----------------------transfer_exceptions_into_jvms-------------------------
// We need a JVMS to carry the exceptions, but the map has gone away.
// Create a scratch JVMS, cloned from any of the exception states...
if (has_exceptions()) {
_map = _exceptions;
debug_only(verify_map());
} else {
// ...or created from scratch
set_all_memory(top());
}
// (This is a kludge, in case you didn't notice.)
set_control(top());
}
return jvms;
}
}
}
}
//-----------------------combine_exception_states------------------------------
// This helper function combines exception states by building phis on a
// specially marked state-merging region. These regions and phis are
// untransformed, and can build up gradually. The region is marked by
// having a control input of its exception map, rather than NULL. Such
// regions do not appear except in this function, and in use_exception_state.
if (failing()) return; // dying anyway...
// The control input is not (yet) a specially-marked region in phi_map.
// Make it so, and build some phis.
}
}
// Either or both of phi_map and ex_map might already be converted into phis.
// if there is special marking on ex_map also, we add multiple edges from src
// how wide was the destination phi_map, originally?
if (add_multiple) {
} else {
// ex_map has no merges, so we just add single edges everywhere
}
// get a copy of the base memory, and patch some inputs into it
// Prepare to append interesting stuff onto the newly sliced phi:
}
// Append stuff from ex_map:
if (add_multiple) {
} else {
}
}
// Skip everything in the JVMS after tos. (The ex_oop follows.)
// Prepare to append interesting stuff onto the new phi:
} else {
}
// Both are phis.
} else {
}
}
}
}
}
}
//--------------------------use_exception_state--------------------------------
// Special marking for internal ex-states. Process the phis now.
// Note: Setting the jvms also sets the bci and sp.
}
}
}
}
}
} else {
}
return ex_oop;
}
//---------------------------------java_bc-------------------------------------
else
}
bool must_throw) {
// if the exception capability is set, then we will generate code
// to check the JavaThread.should_post_on_exceptions flag to see
// if we actually need to report exception events (for this
// thread). If we don't need to report exception events, we will
// take the normal fast path provided by add_exception_events. If
// exception event reporting is enabled for this thread, we will
// take the uncommon_trap in the BuildCutout below.
// first must access the should_post_on_exceptions_flag in this thread's JavaThread
Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false);
// Test the should_post_on_exceptions_flag vs. 0
// Branch to slow_path if should_post_on_exceptions_flag was true
// Do not try anything fancy if we're notifying the VM on every throw.
// Cf. case Bytecodes::_athrow in parse2.cpp.
}
}
//------------------------------builtin_throw----------------------------------
bool must_throw = true;
if (env()->jvmti_can_post_on_exceptions()) {
// check if we must post exception events, take uncommon trap if so
// here if should_post_on_exceptions is false
// continue on with the normal codegen
}
// If this particular condition has not yet happened at this
// bytecode, then use the uncommon trap mechanism, and allow for
// a future recompilation if several traps occur here.
// If the throw is hot, try to use a more complicated inline mechanism
// which keeps execution inside the compiled code.
bool treat_throw_as_hot = false;
if (ProfileTraps) {
if (too_many_traps(reason)) {
treat_throw_as_hot = true;
}
// (If there is no MDO at all, assume it is early in
// execution, and that any deopts are part of the
// startup transient, and don't need to be remembered.)
// Also, if there is a local exception handler, treat all throws
// as hot if there has been at least one in this method.
if (C->trap_count(reason) != 0
&& has_ex_handler()) {
treat_throw_as_hot = true;
}
}
// If this throw happens frequently, an uncommon trap might cause
// a performance pothole. If there is a local exception handler,
// and if this particular bytecode appears to be deoptimizing often,
// let us handle the throw inline, with a preconstructed instance.
// Note: If the deopt count has blown up, the uncommon trap
// runtime is going to flush this nmethod, not matter what.
&& (!StackTraceInThrowable || OmitStackTraceInFastThrow)) {
// If the throw is local, we use a pre-existing instance and
// punt on the backtrace. This would lead to a missing backtrace
// (a repeat of 4292742) if the backtrace object is ever asked
// for its backtrace.
// Fixing this remaining case of 4292742 requires some flavor of
// escape analysis. Leave that for the future.
switch (reason) {
case Deoptimization::Reason_null_check:
break;
case Deoptimization::Reason_div0_check:
break;
case Deoptimization::Reason_range_check:
break;
case Deoptimization::Reason_class_check:
} else {
}
break;
}
// Cheat with a preallocated exception object.
// Clear the detail message of the preallocated exception object.
// Weblogic sometimes mutates the detail message of exceptions
// using reflection.
return;
}
}
// %%% Maybe add entry to OptoRuntime which directly throws the exc.?
// It won't be much cheaper than bailing to the interp., since we'll
// have to pass up all the debug-info, and the runtime will have to
// create the stack trace.
// Usual case: Bail to interpreter.
// Reserve the right to recompile if we haven't seen anything yet.
|| C->too_many_traps(reason))) {
// We cannot afford to take more traps here. Suffer in the interpreter.
C->trap_count(reason));
}
// "must_throw" prunes the JVM state to include only the stack, if there
// are no local exception handlers. This should cut down on register
// allocation time and code size, by drastically reducing the number
// of in-edges on the call to the uncommon trap.
}
//----------------------------PreserveJVMState---------------------------------
#ifdef ASSERT
#endif
}
#ifdef ASSERT
#endif
}
//-----------------------------BuildCutout-------------------------------------
{
}
BuildCutout::~BuildCutout() {
}
//---------------------------PreserveReexecuteState----------------------------
}
}
//------------------------------clone_map--------------------------------------
// Implementation of PreserveJVMState
//
// Only clone_map(...) here. If this function is only used in the
// PreserveJVMState class we may want to get rid of this extra
// function eventually and do it all there.
// Clone the memory edge first
return clonemap;
}
//-----------------------------set_map_clone-----------------------------------
_map = m;
debug_only(verify_map());
}
//----------------------------kill_dead_locals---------------------------------
// Detect any locals which are known to be dead, and force them to top.
// Consult the liveness information for the locals. If any
// of them are unused, then they can be replaced by top(). This
// should help register allocation time and cut down on the size
// of the deoptimization information.
// This call is made from many of the bytecode handling
// subroutines called from the Big Switch in do_one_bytecode.
// Every bytecode which might include a slow path is responsible
// for killing its dead locals. The more consistent we
// are about killing deads, the fewer useless phis will be
// constructed for them at various merge points.
// bci can be -1 (InvocationEntryBci). We return the entry
// liveness for the method.
// We are building a graph for a call to a native method.
// All locals are live.
return;
}
// Consult the liveness information for the locals. If any
// of them are unused, then they can be replaced by top(). This
// should help register allocation time and cut down on the size
// of the deoptimization information.
}
}
}
#ifdef ASSERT
//-------------------------dead_locals_are_killed------------------------------
// Return true if all dead locals are set to top in the map.
// Used to assert "clean" debug info at various points.
// No locals need to be dead, so all is as it should be.
return true;
}
// Make sure somebody called kill_dead_locals upstream.
}
// This method is trivial, or is poisoned by a breakpoint.
return true;
}
return false;
}
}
}
return true;
}
#endif //ASSERT
// Helper function for enforcing certain bytecodes to reexecute if
// deoptimization happens
// Reexecute _multianewarray bytecode which was replaced with
// sequence of [a]newarray. See Parse::do_multianewarray().
//
// Note: interpreter should not have it set since this optimization
// is limited by dimensions and guarded by flag so in some cases
// multianewarray() runtime calls will be generated and
// the bytecode should not be reexecutes (stack will not be reset).
} else
return false;
}
// Helper function for adding JVMState and debug information to node
// Add the safepoint edges to the call (or other safepoint).
// Make sure dead locals are set to top. This
// should help register allocation time and cut down on the size
// of the deoptimization information.
// Walk the inline list to fill in the correct set of JVMState's
// Also fill in the associated edges for each JVMState.
// If the bytecode needs to be reexecuted we need to put
// the arguments back on the stack.
// NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
// undefined if the bci is different. This is normal for Parse but it
// should not happen for LibraryCallKit because only one bci is processed.
"in LibraryCallKit the reexecute bit should not change");
// If we are guaranteed to throw, we can prune everything but the
// input to the current bytecode.
bool can_prune_locals = false;
if (must_throw) {
can_prune_locals = true;
}
}
if (env()->jvmti_can_access_local_variables()) {
// At any safepoint, this method can get breakpointed, which would
// then require an immediate deoptimization.
can_prune_locals = false; // do not prune locals
}
// do not scribble on the input jvms
// For a known set of bytecodes, the interpreter should reexecute them if
// deoptimization happens. We set the reexecute state for them here
}
// Presize the call:
// Set up edges so that the call looks like this:
// Call [state:] ctl io mem fptr retadr
// [parms:] parm0 ... parmN
// [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
// [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
// [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
// Note that caller debug info precedes callee debug info.
// Fill pointer walks backwards from "young:" to "root:" in the diagram above:
// Loop over the map input edges associated with jvms, add them
// to the call node, & reset all offsets to match call node array.
uint j, k, l;
if (can_prune_locals) {
// If the current throw can reach an exception handler in this JVMS,
// then we must keep everything live that can reach that handler.
// As a quick and dirty approximation, we look for any handlers at all.
can_prune_locals = false;
}
}
// Add the Locals
out_jvms->set_locoff(p);
if (!can_prune_locals) {
for (j = 0; j < l; j++)
} else {
p += l; // already set to top above by add_req_batch
}
// Add the Expression Stack
out_jvms->set_stkoff(p);
if (!can_prune_locals) {
for (j = 0; j < l; j++)
} else if (can_prune_locals && stack_slots_not_pruned != 0) {
// Divide stack into {S0,...,S1}, where S0 is set to top.
stack_slots_not_pruned = 0; // for next iteration
p += s0; // skip the tops preinstalled by add_req_batch
for (j = s0; j < l; j++)
} else {
p += l; // already set to top above by add_req_batch
}
// Add the Monitors
out_jvms->set_monoff(p);
for (j = 0; j < l; j++)
// Copy any scalar object fields.
out_jvms->set_scloff(p);
for (j = 0; j < l; j++)
// Finish the new jvms.
out_jvms->set_endoff(p);
// Update the two tail pointers in parallel.
}
// Test the correctness of JVMState::debug_xxx accessors:
}
}
int rsize = 0;
if (rtype < T_CONFLICT)
}
switch (code) {
return false;
inputs = 0;
break;
case Bytecodes::_getstatic:
case Bytecodes::_putstatic:
{
bool ignored_will_link;
if (is_get) {
} else {
}
}
break;
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
{
bool ignored_will_link;
ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
}
break;
case Bytecodes::_multianewarray:
{
}
break;
break;
inputs = 0;
break;
default:
// bytecode produces a typed result
break;
}
#ifdef ASSERT
// spot check
switch (code) {
}
#endif //ASSERT
return true;
}
//------------------------------basic_plus_adr---------------------------------
// short-circuit a common case
}
// short-circuit a common case
}
}
// short-circuit a common case
return intcon((int) offset_con);
}
}
//-------------------------load_object_klass-----------------------------------
// Special-case a fresh allocation to avoid building nodes:
}
//-------------------------load_array_length-----------------------------------
// Special-case a fresh allocation to avoid building nodes:
} else {
}
}
return alen;
}
//------------------------------do_null_check----------------------------------
// Helper function to do a NULL pointer check. Returned value is
// the incoming address with NULL casted away. You are allowed to use the
// not-null value only if you are control dependent on the test.
extern int explicit_null_checks_inserted,
// optional arguments for variations:
bool assert_null,
Node* *null_control) {
// For some performance testing, we may wish to suppress null checking.
return value;
}
// Construct NULL check
switch(type) {
case T_ARRAY : // fall through
case T_OBJECT : {
// Only for do_null_check, not any of its siblings:
// Usually, any field access or invocation on an unloaded oop type
// will simply fail to link, since the statically linked class is
// likely also to be unloaded. However, in -Xcomp mode, sometimes
// the static class is loaded but the sharper oop type is not.
// Rather than checking for this obscure case in lots of places,
// we simply observe that a null check on an unloaded class
// will always be followed by a nonsense operation, so we
// can just issue the uncommon trap here.
// Our access to the unloaded class will only be correct
// after it has been loaded and initialized, which requires
// a trip through the interpreter.
#ifndef PRODUCT
#endif
return top();
}
if (assert_null) {
// See if the type is contained in NULL_PTR.
// If so, then the value is already null.
return value; // Elided null assert quickly!
}
} else {
// See if mixing in the NULL pointer changes type.
// If so, then the NULL pointer was not allowed in the original
// type. In other words, "value" was not-null.
// same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
return value; // Elided null check quickly!
}
}
break;
}
default:
}
//-----------
// if peephole optimizations occurred, a prior test existed.
// If a prior test existed, maybe it dominates as we can avoid this test.
// At this point we want to scan up the CFG to see if we can
// find an identical test (and so avoid this test altogether).
int depth = 0;
// Found prior test. Use "cast_not_null" to construct an identical
// CastPP (and hence hash to) as already exists for the prior test.
// Return that casted value.
if (assert_null) {
return null(); // do not issue the redundant test
}
return res;
}
depth++;
}
}
//-----------
// Branch to failure if null
if (assert_null)
else
// %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
// ciMethodData::has_trap_at will return a conservative -1 if any
// must-be-null assertion has failed. This could cause performance
// problems for a method after its first do_null_assert failure.
// Consider using 'Reason_class_check' instead?
// To cause an implicit null check, we set the not-null probability
// to the maximum (PROB_MAX). For an explicit check the probability
// is set to a smaller value.
// probability is less likely
} else if (!assert_null &&
(ImplicitNullCheckThreshold > 0) &&
>= (uint)ImplicitNullCheckThreshold)) {
}
if (null_control != NULL) {
(*null_control) = null_true;
} else {
// Check for optimizer eliding test at parse time
if (stopped()) {
// Failure not possible; do not bother making uncommon trap.
} else if (assert_null) {
NULL, "assert_null");
} else {
}
}
// Must throw exception, fall-thru not possible?
if (stopped()) {
return top(); // No result
}
if (assert_null) {
// Cast obj to null on this path.
}
// Cast obj to not-null on this path, if there is no null_control.
// (If there is a null_control, a non-null value may come back to haunt us.)
}
return value;
}
//------------------------------cast_not_null----------------------------------
// Cast obj to not-null on this path
// Object is already not-null?
if( t == t_not_null ) return obj;
// Scan for instances of 'obj' in the current JVM mapping.
// These instances are known to be not-null after the test.
if (do_replace_in_map)
return cast; // Return casted value
}
//--------------------------replace_in_map-------------------------------------
// Note: This operation potentially replaces any edge
// on the map. This includes locals, stack, and monitors
// of the current (innermost) JVM state.
// We can consider replacing in caller maps.
// The idea would be that an inlined function's null checks
// can be shared with the entire inlining tree.
// The expense of doing this is that the PreserveJVMState class
// would have to preserve caller states too, with a deep copy.
}
//=============================================================================
//--------------------------------memory---------------------------------------
return p;
}
//-----------------------------reset_memory------------------------------------
// do not use this node for any more parsing!
}
//------------------------------set_all_memory---------------------------------
}
//------------------------------set_all_memory_call----------------------------
}
//=============================================================================
//
// parser factory methods for MemNodes
//
// These are layered on top of the factory methods in LoadNode and StoreNode,
// and integrate with the parser's memory state and _gvn engine.
//
// factory methods in "int adr_idx"
int adr_idx,
bool require_atomic_access) {
} else {
}
}
int adr_idx,
bool require_atomic_access) {
} else {
}
// Back-to-back stores can only remove intermediate store with DU info
// so push on worklist for optimizer.
return st;
}
const TypeOopPtr* val_type,
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
break;
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
break;
case BarrierSet::Other:
default :
}
}
bool use_precise) {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
break;
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
break;
case BarrierSet::ModRef:
break;
case BarrierSet::Other:
default :
}
}
const TypeOopPtr* val_type,
bool use_precise) {
// Transformation of a value which could be NULL pointer (CastPP #NULL)
// could be delayed during Parse (for example, in adjust_map_after_if()).
// Execute transformation here to avoid barrier generation in such case.
pre_barrier(true /* do_load */,
NULL /* pre_val */,
bt);
return store;
}
// Could be an array or object we don't know at compile time (unsafe ref.)
if (adr_type->isa_instptr()) {
// known field. This code is a copy of the do_put_xxx logic.
} else {
}
}
} else if (adr_type->isa_aryptr()) {
}
}
}
//-------------------------array_element_address-------------------------
// short-circuit a common case (saves lots of confusing waste motion)
if (idx_con >= 0) {
}
// must be correct type for alignment purposes
#ifdef _LP64
// The scaled index operand to AddP must be a clean 64-bit value.
// Java allows a 32-bit int to be incremented to a negative
// value, which appears in a 64-bit register as a large
// positive number. Using that large positive number as an
// operand in pointer arithmetic has bad consequences.
// On the other hand, 32-bit overflow is rare, and the possibility
// can often be excluded, if we annotate the ConvI2L node with
// a type assertion that its value is known to be a small positive
// number. (The prior range check has ensured this.)
// This assertion is used by ConvI2LNode::Ideal.
#endif
}
//-------------------------load_array_element-------------------------
return ld;
}
//-------------------------set_arguments_for_java_call-------------------------
// Arguments (pre-popped from the stack) are taken from the JVMS.
// Add the call arguments:
}
}
//---------------------------set_edges_for_java_call---------------------------
// Connect a newly created call into the current JVMS.
// A return value node (if any) is returned from set_edges_for_java_call.
void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
// Add the predefined inputs:
set_control(top());
return;
}
// Re-use the current map to produce the result.
//return xcall; // no need, caller already has it
}
// Capture the return value, if any.
// Note: Since any out-of-line call can produce an exception,
// we always insert an I_O projection from the call into the result.
if (separate_io_proj) {
// The caller requested separate projections be used by the fall
// through and exceptional paths, so replace the projections for
// the fall through path.
}
return ret;
}
//--------------------set_predefined_input_for_runtime_call--------------------
// Reading and setting the memory state is way conservative here.
// The real problem is that I am not doing real Type analysis on memory,
// so I cannot distinguish card mark stores from other stores. Across a GC
// point the Store Barrier and the card mark memory has to agree. I cannot
// have a card mark store and its barrier split across the GC point from
// either above or below. Here I get that to happen by reading ALL of memory.
// A better answer would be to separate out card marks from other memory.
// For now, return the input memory state, so that it can be reused
// after the call, if this call has restricted memory effects.
// Set fixed predefined input arguments
return memory;
}
//-------------------set_predefined_output_for_runtime_call--------------------
// Set control and memory (not i_o) from the call.
// If keep_mem is not NULL, use it for the output state,
// except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM.
// If hook_mem is NULL, this call produces no memory effects at all.
// If hook_mem is a Java-visible memory slice (such as arraycopy operands),
// then only that memory slice is taken from the call.
// In the last case, we must put an appropriate memory barrier before
// the call, so as to create the correct anti-dependencies on loads
// preceding the call.
// no i/o
if (keep_mem) {
// First clone the existing memory state
// Make memory for the call
// We also use hook_mem to extract specific effects from arraycopy stubs.
}
// ...else the call has NO memory effects.
// Make sure the call advertises its memory effects precisely.
// This lets us build accurate anti-dependences in gcm.cpp.
"call node must be constructed correctly");
} else {
// This is not a "slow path" call; all memory comes from the call.
}
}
// Replace the call with the current state of the kit.
if (has_exceptions()) {
}
// Find all the needed outputs of this call
// Replace all the old call edges with the edges from the inlining result
}
}
}
// Replace the result with the new result if it exists and is used
}
// No exception edges to simply kill off those paths
}
}
}
// Replace the old exception object with top
}
} else {
// Load my combined exception state into the kit, with all phis transformed:
}
}
}
// Replace the old exception object with the newly created one
}
}
// Disconnect the call from the graph
// Clean up any MergeMems that feed other MergeMems since the
// optimizer doesn't like that.
if (final_mem->is_MergeMem()) {
}
}
}
}
}
//------------------------------increment_counter------------------------------
// for statistics: increment a VM counter by 1
}
}
//------------------------------uncommon_trap----------------------------------
// Bail out to the interpreter in mid-method. Implemented by calling the
// uncommon_trap blob. This helper function inserts a runtime call with the
// right debug info.
bool must_throw,
bool keep_exact_action) {
if (stopped()) return; // trap reachable?
// Note: If ProfileTraps is true, and if a deopt. actually
// occurs here, the runtime will make sure an MDO exists. There is
// no need to call method()->ensure_method_data() at this point.
// Set the stack pointer to the right value for reexecution:
set_sp(reexecute_sp());
#ifdef ASSERT
if (!must_throw) {
// Make sure the stack has at least enough depth to execute
// the current bytecode.
}
}
#endif
switch (action) {
case Deoptimization::Action_reinterpret:
// Temporary fix for 6529811 to allow virtual calls to be sure they
// get the chance to go from mono->bi->mega
if (!keep_exact_action &&
// This BCI is causing too many recompilations.
} else {
C->set_trap_can_recompile(true);
}
break;
C->set_trap_can_recompile(true);
break;
#ifdef ASSERT
case Deoptimization::Action_none:
break;
default:
break;
#endif
}
if (TraceOptoParse) {
trap_request), bci());
}
trap_request));
}
// Make sure any guarding test views this path as very unlikely
if (f > PROB_UNLIKELY_MAG(4))
} else {
if (f < PROB_LIKELY_MAG(4))
}
}
// Clear out dead values from the debug info.
// Now insert the uncommon trap subroutine call
// Pass the index of the class to be loaded
(must_throw ? RC_MUST_THROW : 0),
"must extract request correctly from the graph");
// The debug info is the only real input to this call.
// Halt-and-catch fire here. The above call should never return!
}
//--------------------------just_allocated_object------------------------------
// Report the object that was just allocated.
// It must be the case that there are no intervening safepoints.
// We use this to determine if an object is so "fresh" that
// it does not require card marks.
if (C->recent_alloc_ctl() == current_control)
return C->recent_alloc_obj();
return NULL;
}
// (Note: TypeFunc::make has a cache that makes this fast.)
for (int j = 0; j < nargs; j++) {
// If any parameters are doubles, they must be rounded before
// the call, dstore_rounding does gvn.transform
set_argument(j, arg);
}
}
}
// A non-strict method may return a double value which has an extended
// exponent, but this must not be visible in a caller which is 'strict'
// If a strict caller invokes a non-strict callee, round a double result
// Destination method's return value is on top of stack
// dstore_rounding() does gvn.transform
}
}
// rounding for strict float precision conformance
: n;
}
// rounding for strict double precision conformance
: n;
}
// rounding for non-strict double stores
&& UseSSE <= 1
: n;
}
//=============================================================================
// [foo] indicates that 'foo' is a parameter
//
// [in] NULL
// \ /
// CmpP
// Bool ne
// If
// / \
// True False-<2>
// / |
// / cast_not_null
// Load | | ^
// [fast_test] | |
// gvn to opt_test | |
// / \ | <1>
// True False |
// | \\ |
// [slow_call] \[fast_result]
// Ctl Val \ \
// | \ \
// Catch <1> \ \
// / \ ^ \ \
// Ex No_Ex | \ \
// | \ \ | \ <2> \
// ... \ [slow_res] | | \ [null_result]
// \ \--+--+--- | |
// \ | / \ | /
// --------Region Phi
//
//=============================================================================
// Code is structured as a series of driver functions all called 'do_XXX' that
// call a set of helper functions. Helper functions first, then drivers.
//------------------------------null_check_oop---------------------------------
// Null check oop. Set null-path control into Region in slot 3.
// Make a cast-not-nullness use the other not-null control. Return cast.
bool never_see_null) {
// Initial NULL check taken path
(*null_control) = top();
// Generate uncommon_trap:
// If we see an unexpected null at a check-cast we record it and force a
// recompile; the offending check-cast will be compiled to handle NULLs.
// If we see more than one offending BCI, then all checkcasts in the
// method will be compiled to handle NULLs.
PreserveJVMState pjvms(this);
}
// Cast away null-ness on the result
return cast;
}
//------------------------------opt_iff----------------------------------------
// Optimize the fast-check IfNode. Set the fast-path region slot 2.
// Return slow-path control.
// Fast path taken; set region slot 2
// Fast path not-taken, i.e. slow path
return slow_taken;
}
//-----------------------------make_runtime_call-------------------------------
const char* call_name,
// The following parms are all optional.
// The first NULL ends the list.
// Slow-path call
}
if (!is_leaf) {
} else {
}
// The following is similar to set_edges_for_java_call,
// except that the memory effects of the call are restricted to AliasIdxRaw.
// Slow path call has no side-effects, uses few values
if (wide_in) {
} else {
prev_mem = reset_memory();
}
// Hook each parm in order. Stop looking at the first NULL.
/* close each nested if ===> */ } } } } } } } }
if (!is_leaf) {
// Non-leaves can block and take safepoints:
}
// Non-leaves can throw exceptions:
if (has_io) {
}
if (flags & RC_UNCOMMON) {
// Set the count to a tiny probability. Cf. Estimate_Block_Frequency.
// (An "if" probability corresponds roughly to an unconditional count.
// Sort of.)
}
if (wide_out) {
// Slow path call has full side-effects.
} else {
}
if (has_io) {
}
return call;
}
//------------------------------merge_memory-----------------------------------
// Merge memory from one path into the current memory state.
#ifdef ASSERT
// Caller is responsible for ensuring that any pre-existing
// phis are already aware of old memory.
#endif
} else {
}
}
}
}
//------------------------------make_slow_call_ex------------------------------
// Make the exception handler hookups for the slow call
if (stopped()) return;
// Make a catch node with just two handlers: fall-through and catch-all
Node* norm = _gvn.transform( new (C) CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) );
Node* excp = _gvn.transform( new (C) CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci) );
{ PreserveJVMState pjvms(this);
// Create an exception state also.
// Use an exact type if the caller has specified a specific exception.
const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull);
}
}
// Get the no-exception control from the CatchNode.
}
//-------------------------------gen_subtype_check-----------------------------
// Generate a subtyping check. Takes as input the subtype and supertype.
// Returns 2 values: sets the default control() to the true path and returns
// the false path. Only reads invariant memory; sets no (visible) memory.
// The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding
// but that's not exposed to the optimizer. This call also doesn't take in an
// Object; if you wish to check an Object you need to load the Object's class
// prior to coming here.
// Fast check for identical types, perhaps identical constants.
// The types can even be identical non-constants, in cases
// involving Array.newInstance, Object.clone, etc.
if (subklass == superklass)
return top(); // false path is dead; no test needed.
// In the common case of an exact superklass, try to fold up the
// test before generating code. You may ask, why not just generate
// the code and then let it fold up? The answer is that the generated
// code will necessarily include null checks, which do not always
// completely fold away. If they are also needless, then they turn
// into a performance loss. Example:
// Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
// Here, the type of 'fa' is often exact, so the store check
// of fa[1]=x will fold up, without testing the nullness of x.
case SSC_always_false:
{
set_control(top());
return always_fail;
}
case SSC_always_true:
return top();
case SSC_easy_test:
{
// Just do a direct pointer compare and be done.
}
case SSC_full_test:
break;
default:
}
}
// %%% Possible further optimization: Even if the superklass is not exact,
// if the subklass is the unique subtype of the superklass, the check
// will always succeed. We could leave a dependency behind to ensure this.
// First load the super-klass's check-offset
Node *chk_off = _gvn.transform( new (C) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) );
// Load from the sub-klass's super-class display list, or a 1-word cache of
// the secondary superclass list, or a failing value with a sentinel offset
// if the super-klass is an interface or exceptionally deep in the Java
// hierarchy and we have to scan the secondary superclass list the hard way.
// Worst-case type is a little odd: NULL is allowed as a result (usually
// klass loads can never produce a NULL).
// For some types like interfaces the following loadKlass is from a 1-word
// cache which is mutable so can't use immutable memory. Other
// types load from the super-class display table which is immutable.
Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
// Compile speed common case: ARE a subtype and we canNOT fail
if( superklass == nkls )
return top(); // false path is dead; no test needed.
// See if we get an immediate positive hit. Happens roughly 83% of the
// time. Test to see if the value loaded just previously from the subklass
// is exactly the superklass.
// Compile speed common case: Check for being deterministic right now. If
// chk_off is a constant and not equal to cacheoff then we are NOT a
// subklass. In this case we need exactly the 1 test above and we can
// return those results immediately.
if (!might_be_cache) {
return not_subtype_ctrl;
}
// Gather the various success & failures here
// Check for immediate negative hit. Happens roughly 11% of the time (which
// is roughly 63% of the remaining cases). Test to see if the loaded
// check-offset points into the subklass display list or the 1-element
// cache. If it points to the display (and NOT the cache) and the display
// missed then it's not a subtype.
// Check for self. Very rare to get here, but it is taken 1/3 the time.
// No performance impact (too rare) but allows sharing of secondary arrays
// which has some footprint reduction.
// -- Roads not taken here: --
// We could also have chosen to perform the self-check at the beginning
// of this code sequence, as the assembler does. This would not pay off
// the same way, since the optimizer, unlike the assembler, can perform
// static type analysis to fold away many successful self-checks.
// Non-foldable self checks work better here in second position, because
// the initial primary superclass check subsumes a self-check for most
// types. An exception would be a secondary type like array-of-interface,
// which does not appear in its own primary supertype display.
// Finally, we could have chosen to move the self-check into the
// PartialSubtypeCheckNode, and from there out-of-line in a platform
// dependent manner. But it is worthwhile to have the check here,
// where it can be perhaps be optimized. The cost in code space is
// small (register compare, branch).
// Now do a linear scan of the secondary super-klass array. Again, no real
// performance impact (too rare) but it's gotta be done.
// Since the code is rarely used, there is no penalty for moving it
// out of line, and it can only improve I-cache density.
// The decision to inline or out-of-line this final check is platform
// dependent, and is found in the AD file definition of PartialSubtypeCheck.
// Return false path; set default control to true path.
}
//----------------------------static_subtype_check-----------------------------
// Shortcut important common cases when superklass is exact:
// (0) superklass is java.lang.Object (can occur in reflective code)
// (1) subklass is already limited to a subtype of superklass => always ok
// (2) subklass does not overlap with superklass => always fail
// (3) superklass has NO subtypes and we can check with a simple compare.
if (StressReflectiveCode) {
return SSC_full_test; // Let caller generate the general case.
}
return SSC_always_true; // (0) this test cannot fail
}
if (superelem->is_array_klass())
return SSC_always_true; // (1) false path dead; no dynamic test needed
}
return SSC_always_false;
}
}
// If casting to an instance klass, it must have no subtypes
if (superk->is_interface()) {
// Cannot trust interfaces yet.
// %%% S.B. superk->nof_implementors() == 1
} else if (superelem->is_instance_klass()) {
// Add a dependency if there is a chance of a later subclass.
}
return SSC_easy_test; // (3) caller can do a simple ptr comparison
}
} else {
// A primitive array type has no subtypes.
return SSC_easy_test; // (3) caller can do a simple ptr comparison
}
return SSC_full_test;
}
// Profile-driven exact type check:
float prob,
Node* *casted_receiver) {
// Subsume downstream occurrences of receiver with a cast to
// recv_xtype, since now we know what the type will be.
// (User must make the replace_in_map call.)
return fail;
}
//------------------------------seems_never_null-------------------------------
// Use null_seen information if it is available from the profile.
// If we see an unexpected null at a type check we record it and force a
// recompile; the offending check will be recompiled to handle NULLs.
// If we see several offending BCIs, then all checks in the
// method will be recompiled.
if (UncommonNullCast // Cutout for this technique
) {
// Edge case: no mature data. Be optimistic here.
return true;
// If the profile has not seen a null, assume it won't happen.
}
return false;
}
//------------------------maybe_cast_profiled_receiver-------------------------
// If the profile has seen exactly one type, narrow to exactly that type.
// Subsequent type checks will always fold up.
ciKlass* require_klass) {
// Make sure we haven't already deoptimized from this tactic.
return NULL;
// (No, this isn't a call, but it's enough like a virtual call
// to use the same ciMethod accessor to get the profile info...)
profile.has_receiver(0) &&
if (require_klass == NULL ||
// If we narrow the type to match what the type profile sees,
// we can then remove the rest of the cast.
// This is a win, even if the exact_kls is very specific,
// because downstream operations, such as method calls,
// will often benefit from the sharper type.
&exact_obj);
{ PreserveJVMState pjvms(this);
}
return exact_obj;
}
// assert(ssc == SSC_always_true)... except maybe the profile lied to us.
}
return NULL;
}
//-------------------------------gen_instanceof--------------------------------
// Generate an instance-of idiom. Used by both the instance-of bytecode
// and the reflective instance-of call.
kill_dead_locals(); // Benefit all the uncommon traps
"must check for not-null not-dead klass in callers");
// Make the merge point
C->set_has_split_ifs(true); // Has chance for split-if optimization
}
// Null check; get casted pointer; set region slot 3
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a NULL?
return intcon(0);
}
// Do this eagerly, so that pattern matches like is_diamond_phi
// will work even during parsing.
}
if (stopped()) { // Profile disagrees with this path.
return intcon(0);
}
}
// Load the object's klass
// Generate the subtype check
// Plug in the success path to the general merge in slot 1.
// Plug in the failing path to the general merge in slot 2.
// Return final merged results
}
//-------------------------------gen_checkcast---------------------------------
// Generate a checkcast idiom. Used by both the checkcast bytecode and the
// array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
// uncommon-trap paths work. Adjust stack after this call.
// If failure_control is supplied and not null, it is filled in with
// the control edge for the cast failure. Otherwise, an appropriate
// uncommon trap or exception is thrown.
Node* *failure_control) {
kill_dead_locals(); // Benefit all the uncommon traps
// Fast cutout: Check the case that the cast is vacuously true.
// This detects the common cases where the test will short-circuit
// away completely. We do this before we perform the null check,
// because if the test is going to turn into zero code, we don't
// want a residual null check left around. (Causes a slowdown,
// for example, in some objArray manipulations, such as a[i]=a[j].)
case SSC_always_true:
return obj;
case SSC_always_false:
// It needs a null check because a null will *pass* the cast check.
// A non-null value will always produce an exception.
return null_assert(obj);
}
}
}
"interpreter profiles type checks only for these BCs");
}
// Make the merge point
C->set_has_split_ifs(true); // Has chance for split-if optimization
// Use null-cast information if it is available
// Null check; get casted pointer; set region slot 3
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a NULL?
return null();
}
// Do this eagerly, so that pattern matches like is_diamond_phi
// will work even during parsing.
}
// Counter has never been decremented (due to cast failure).
// ...This is a reasonable thing to expect. It is true of
// all casts inserted by javac to implement generic types.
(*failure_control) = top();
// adjust the type of the phi to the exact klass:
}
}
// Load the object's klass
// Generate the subtype check
// Plug in success path into the merge
not_null_obj, toop));
// Failure path ends in uncommon trap (or may be dead - failure impossible)
if (failure_control == NULL) {
PreserveJVMState pjvms(this);
}
} else {
}
}
// A merge of NULL or Casted-NotNull obj
// Note I do NOT always 'replace_in_map(obj,result)' here.
// if( tk->klass()->can_be_primary_super() )
// This means that if I successfully store an Object into an array-of-String
// I 'forget' that the Object is really now known to be a String. I have to
// do this because we don't have true union types for interfaces - if I store
// a Baz into an array-of-Interface and then tell the optimizer it's an
// Interface, I forget that it's also a Baz and cannot do Baz-like field
// references to it. FIX THIS WHEN UNION TYPES APPEAR!
// replace_in_map( obj, res );
// Return final merged results
return res;
}
//------------------------------next_monitor-----------------------------------
// What number should be given to the next monitor?
// Keep the toplevel high water mark current:
return current;
}
//------------------------------insert_mem_bar---------------------------------
// Memory barrier to avoid floating things around
// The membar serves as a pinch point between both control and all memory slices.
return membar;
}
//-------------------------insert_mem_bar_volatile----------------------------
// Memory barrier to avoid floating things around
// The membar serves as a pinch point between both control and memory(alias_idx).
// If you want to make a pinch point on all memory slices, do not use this
// function (even with AliasIdxBot); use insert_mem_bar() instead.
// When Parse::do_put_xxx updates a volatile field, it appends a series
// of MemBarVolatile nodes, one for *each* volatile field alias category.
// The first membar is on the same memory slice as the field store opcode.
// This forces the membar to follow the store. (Bug 6500685 broke this.)
// All the other membars (for other volatile slices, including AliasIdxBot,
// which stands for all unknown volatile slices) are control-dependent
// on the first membar. This prevents later volatile loads or stores
// from sliding up past the just-emitted store.
} else {
}
} else {
}
return membar;
}
//------------------------------shared_lock------------------------------------
// Emit locking code.
// bci is either a monitorenter bc or InvocationEntryBci
// %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
if( !GenerateSynchronizationCode )
return NULL; // Not locking things?
if (stopped()) // Dead monitor?
return NULL;
// Box the stack location
// Create the counters for this fast lock.
}
// Add monitor to debug info for the slow path. If we block inside the
// slow path and de-opt, we need the monitor hanging around
// lock has no side-effects, sets few values
// Add this to the worklist so that the lock can be eliminated
#ifndef PRODUCT
if (PrintLockStatistics) {
// Update the counter for this lock. Don't bother using an atomic
// operation since we don't require absolute accuracy.
}
#endif
return flock;
}
//------------------------------shared_unlock----------------------------------
// Emit unlocking code.
// bci is either a monitorenter bc or InvocationEntryBci
// %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
if( !GenerateSynchronizationCode )
return;
if (stopped()) { // Dead monitor?
return;
}
// Memory barrier to avoid floating things down past the locked region
// unlock has no side-effects, sets few values
// Kill monitor from debug info
map()->pop_monitor( );
}
//-------------------------------get_layout_helper-----------------------------
// If the given klass is a constant or known to be an array,
// fetch the constant layout helper value into constant_value
// and return (Node*)NULL. Otherwise, load the non-constant
// layout helper value, and return the node which represents it.
// This two-faced routine is useful because allocation sites
// almost always feature constant types.
}
}
}
}
// We just put in an allocate/initialize with a big raw-memory effect.
// Hook selected additional alias categories on the initialization.
Node* init_out_raw) {
}
//---------------------------set_output_for_allocation-------------------------
const TypeOopPtr* oop_type) {
// create memory projection for i_o
// create a memory projection as for the normal control path
// a normal slow-call doesn't change i_o, but an allocation does
// we create a separate i_o projection for the normal control path
// put in an initialization barrier
rawoop)->as_Initialize();
{
// Extract memory strands which may participate in the new object's
// initialization, and source them from the new InitializeNode.
// This will allow us to observe initializations when they occur,
// and link them properly (as a group) to the InitializeNode.
if (oop_type->isa_aryptr()) {
} else if (oop_type->isa_instptr()) {
continue; // do not bother to track really large numbers of fields
// Find (or create) the alias category for this field:
}
}
}
// Cast raw oop to the real thing...
#ifdef ASSERT
{ // Verify that the AllocateNode::Ideal_allocation recognizers work:
"Ideal_allocation works");
"Ideal_allocation works");
if (alloc->is_AllocateArray()) {
"Ideal_allocation works");
"Ideal_allocation works");
} else {
}
}
#endif //ASSERT
return javaoop;
}
//---------------------------new_instance--------------------------------------
// This routine takes a klass_node which may be constant (for a static type)
// or may be non-constant (for reflective code). It will work equally well
// for either, and the graph will fold nicely if the optimizer later reduces
// the type to a constant.
// The optional arguments are for specialized use by intrinsics:
// - If 'extra_slow_test' if not null is an extra condition for the slow-path.
// - If 'return_size_val', report the the total object size to the caller.
Node* *return_size_val) {
// Compute size in doublewords
// The size is always an integral number of doublewords, represented
// as a positive bytewise size stored in the klass's layout_helper.
// The layout_helper also encodes (in a low bit) the need for a slow path.
// Generate the initial go-slow test. It's either ALWAYS (return a
// Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
// case) a computed value derived from the layout_helper.
if (layout_is_con) {
} else { // reflective case
// This reflective path is used by Unsafe.allocateInstance.
// (It may be stress-tested by specifying StressReflectiveCode.)
// Basically, we want to get into the VM is there's an illegal argument.
if (extra_slow_test != intcon(0)) {
}
// (Macro-expander will further convert this to a Bool, if necessary.)
}
// Find the size in bytes. This is easy; it's the layout_helper.
// The size value must be valid even if the slow path is taken.
if (layout_is_con) {
} else { // reflective case
// This reflective path is used by clone and Unsafe.allocateInstance.
// Clear the low bits to extract layout_helper_size_in_bytes:
}
if (return_size_val != NULL) {
(*return_size_val) = size;
}
// This is a precise notnull oop of the klass.
// (Actually, it need not be precise if this is a reflective allocation.)
// It's what we cast the result to.
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
// since GC and deoptimization can happened.
}
//-------------------------------new_array-------------------------------------
// helper for both newarray and anewarray
// The 'length' parameter is (obviously) the length of the array.
// See comments on new_instance for the meaning of the other arguments.
int nargs, // number of arguments to push back for uncommon trap
Node* *return_size_val) {
if (!layout_is_con && !StressReflectiveCode &&
// This is a reflective array creation site.
// Optimistically assume that it is a subtype of Object[],
// so that we can fold up all the address arithmetic.
}
layout_val = NULL;
layout_is_con = true;
}
// Generate the initial go-slow test. Make sure we do not overflow
// if length is huge (near 2Gig) or negative! We do not need
// exact double-words here, just a close approximation of needed
// double-words. We can't add any offset or rounding bits, lest we
// take a size -1 of bytes and make it positive. Use an unsigned
// compare, so negative sizes look hugely positive.
if (layout_is_con) {
// Increase the size limit if we have exact knowledge of array type.
}
if (initial_slow_test->is_Bool()) {
// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
}
// --- Size Computation ---
// array_size = round_to_heap(array_header + (length << elem_shift));
// where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
// and round_to(x, y) == ((x + y-1) & ~(y-1))
// The rounding mask is strength-reduced, if possible.
// (T_BYTE has the weakest alignment and size restrictions...)
if (layout_is_con) {
round_mask = 0; // strength-reduce it if it goes away completely
} else {
}
if (layout_is_con) {
if (eshift != 0)
} else {
// There is no need to mask or shift this value.
// The semantics of LShiftINode include an implicit mask to 0x1F.
}
// Transition to native address size for all offset calculations:
#ifdef _LP64
// Add a manual constraint to a positive range. Cf. array_element_address.
}
}
#endif
// Combine header size (plus rounding) and body size. Then round down.
// This computation cannot overflow, because it is used only in two
// places, one where the length is sharply limited, and the other
// after a successful allocation.
if (elem_shift != NULL)
if (round_mask != 0) {
}
// else if round_mask == 0, the size computation is self-rounding
if (return_size_val != NULL) {
// This is the size
(*return_size_val) = size;
}
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
// since GC and deoptimization can happened.
// Create the AllocateArrayNode and its result projections
length);
// Cast to correct type. Note that the klass_node may be constant or not,
// and in the latter case the actual array type will be inexact also.
// (This happens via a non-constant argument to inline_native_newArray.)
// In any case, the value of klass_node provides the desired array type.
// Try to get a better type than POS for the size
}
// Cast length on remaining path to be as narrow as possible
}
}
return javaoop;
}
// The following "Ideal_foo" functions are placed here because they recognize
// the graph shapes created by the functions immediately above.
//---------------------------Ideal_allocation----------------------------------
// Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
return NULL;
}
}
return allo->as_Allocate();
}
}
// Report failure to match.
return NULL;
}
// Fancy version which also strips off an offset (and reports it to caller).
}
// Trace Initialize <- Proj[Parm] <- Allocate
if (alloc->is_Allocate()) {
return alloc->as_Allocate();
}
}
return NULL;
}
// Trace Allocate -> Proj[Parm] -> Initialize
if (init->is_Initialize()) {
return init->as_Initialize();
}
}
return NULL;
}
// Trace Allocate -> Proj[Parm] -> MemBarStoreStore
if (storestore->is_MemBarStoreStore()) {
return storestore->as_MemBarStoreStore();
}
}
return NULL;
}
//----------------------------- loop predicates ---------------------------
//------------------------------add_predicate_impl----------------------------
// Too many traps seen?
if (too_many_traps(reason)) {
#ifdef ASSERT
if (TraceLoopPredicate) {
}
#endif
// We cannot afford to take more traps here,
// do not generate predicate.
return;
}
C->add_predicate_opaq(opq);
{
PreserveJVMState pjvms(this);
}
}
//------------------------------add_predicate---------------------------------
if (UseLoopPredicate) {
}
// loop's limit check predicate should be near the loop.
if (LoopLimitCheck) {
}
}
//----------------------------- store barriers ----------------------------
}
// Final sync IdealKit and graphKit.
}
// Insert a write-barrier store. This is to let generational GC work; we have
// to flag all oop-stores before the next GC point.
bool use_precise) {
// No store check needed if we're storing a NULL or an old object
// (latter case is probably a string constant). The concurrent
// mark sweep garbage collector, however, needs to have all nonNull
// oop updates flagged via card-marks.
// must be either an oop or NULL
// stores of null never (?) need barriers
return;
// no store barrier needed, because no old-to-new ref created
return;
}
// We can skip marks on a freshly-allocated object in Eden.
// Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
// That routine informs GC to take appropriate compensating steps,
// upon a slow-path allocation, so as to make this card-mark
// elision safe.
return;
}
if (!use_precise) {
// All card marks for a (non-array) instance are in one place:
}
// (Else it's an array (or unknown), and we want more precise card marks.)
// Convert the pointer to an int prior to doing math on it
// Divide by card size
"Only one we handle so far.");
// Combine card table base and card offset
// Get the alias_index for raw card-mark memory
if (UseCondCardMark) {
// The classic GC reference write barrier is typically implemented
// as a store into the global card mark table. Unfortunately
// unconditional stores can result in false sharing and excessive
// coherence traffic as well as false transactional aborts.
// UseCondCardMark enables MP "polite" conditional card mark
// stores. In theory we could relax the load from ctrl() to
// no_ctrl, but that doesn't buy much latitude.
}
// Smash zero into card
if( !UseConcMarkSweepGC ) {
} else {
// Specialized path for CM store barrier
}
if (UseCondCardMark) {
}
// Final sync IdealKit and GraphKit.
}
const TypeOopPtr* val_type,
// Some sanity checks
// Note: val is unused in this routine.
if (do_load) {
// We need to generate the load of the previous value
} else {
// In this case both val_type and alias_idx are unused.
}
assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
// Offsets into the thread
// Now the actual pointers into the thread
// Now some of the values
// if (!marking)
assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
if (do_load) {
// load original value
// alias_idx correct??
}
// if (pre_val != NULL)
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
// is the queue for this thread full?
// decrement the index
// Now get the buffer location we will log the previous value into and store it
// update the index
// logging buffer is full, call the runtime
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
// Final sync IdealKit and GraphKit.
}
//
// Update the card table and add card address to the queue
//
// Smash zero into card. MUST BE ORDERED WRT TO STORE
// Now do the queue work
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
}
bool use_precise) {
// If we are writing a NULL then we need no post barrier
// Must be NULL
// No post barrier if writing NULLx
return;
}
if (!use_precise) {
// All card marks for a (non-array) instance are in one place:
}
// (Else it's an array (or unknown), and we want more precise card marks.)
// Get the alias_index for raw card-mark memory
// Offsets into the thread
// Pointers into the thread
// Now some values
// Use ctrl to avoid hoisting these values past a safepoint, which could
// potentially reset these fields in the JavaThread.
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
// Convert the store obj pointer to an int prior to doing math on it
// Must use ctrl to prevent "integerized oop" existing across safepoint
// Divide pointer by card size
// Combine card table base and card offset
// If we know the value being stored does it cross regions?
// Does the store cause us to cross regions?
// Should be able to do an unsigned compare of region_size instead of
// and extra shift. Do we have an unsigned compare??
// Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
// if (xor_res == 0) same region so skip
// No barrier if we are storing a NULL
// Ok must mark the card if not already dirty
// load the original value of the card
} else {
// Object.clone() instrinsic uses this path.
}
// Final sync IdealKit and GraphKit.
}
if (java_lang_String::has_offset_field()) {
false, NULL, 0);
} else {
return intcon(0);
}
}
if (java_lang_String::has_count_field()) {
false, NULL, 0);
} else {
}
}
false, NULL, 0);
}
false, NULL, 0);
}
false, NULL, 0);
}
false, NULL, 0);
}