parse1.cpp revision 1264
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_parse1.cpp.incl"
// Static array so we can figure out which bytecodes stop us from compiling
// the most. Some of the non-static variables are needed in bytecodeInfo.cpp
// and eventually should be encapsulated in a proper class (gri 8/18/98).
int nodes_created = 0;
int methods_parsed = 0;
int methods_seen = 0;
int blocks_parsed = 0;
int blocks_seen = 0;
int explicit_null_checks_inserted = 0;
int explicit_null_checks_elided = 0;
int all_null_checks_found = 0, implicit_null_checks = 0;
int implicit_null_throws = 0;
int reclaim_idx = 0;
int reclaim_in = 0;
int reclaim_node = 0;
#ifndef PRODUCT
#endif
//------------------------------print_statistics-------------------------------
#ifndef PRODUCT
void Parse::print_statistics() {
if (methods_seen != methods_parsed)
tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found);
if( all_null_checks_found )
if( implicit_null_throws )
}
}
#endif
//------------------------------ON STACK REPLACEMENT---------------------------
// Construct a node which can be used to get incoming state for
// on stack replacement.
Node *local_addrs_base) {
// Very similar to LoadNode::make, except we handle un-aligned longs and
// doubles on Sparc. Intel can handle them just fine directly.
Node *l;
switch( bt ) { // Signature is flattened
case T_ADDRESS: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break;
case T_OBJECT: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
case T_LONG:
case T_DOUBLE: {
// Since arguments are in reverse order, the argument address 'adr'
if( Matcher::misaligned_doubles_ok ) {
} else {
}
break;
}
default: ShouldNotReachHere();
}
}
// Helper routine to prevent the interpreter from handing
// unexpected typestate to an OSR method.
// The Node l is a value newly dug out of the interpreter frame.
// The type is the type predicted by ciTypeFlow. Note that it is
// not a general type, but can only come from Type::get_typeflow_type.
// The safepoint is a map which will feed an uncommon trap.
// TypeFlow may assert null-ness if a type appears unloaded.
// Value must be null, not a real oop.
l = null();
}
// Typeflow can also cut off paths from the CFG, based on
// types which appear unloaded, or call sites which appear unlinked.
// When paths are cut off, values at later merge points can rise
// toward more specific classes. Make sure these specific classes
// are still in effect.
// TypeFlow asserted a specific object type. Value must have that type.
}
return l;
}
// Helper routine which sets up elements of the initial parser map when
// performing a parse for on stack replacement. Add values into map.
// The only parameter contains the address of a interpreter arguments.
int index;
// Mismatch between method and jvms can occur since map briefly held
// an OSR entry state (which takes up one RawPtr word).
// Find the start block.
// Set initial BCI.
// Set initial stack depth.
// Check bailouts. We currently do not perform on stack replacement
// of loops in catch blocks or loops which branch with a non-empty stack.
if (sp() != 0) {
C->record_method_not_compilable("OSR starts with non-empty stack");
return;
}
// Do not OSR inside finally clauses:
C->record_method_not_compilable("OSR starts with an immediate trap");
return;
}
// Commute monitors from interpreter frame to compiler frame.
// Make a BoxLockNode for the monitor.
// Displaced headers and locked objects are interleaved in the
// temp OSR buffer. We only copy the locked objects out here.
// Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
// Try and copy the displaced header to the BoxNode
// Build a bogus FastLockNode (no code will be generated) and push the
// monitor into our debug info.
const FastLockNode *flock = _gvn.transform(new (C, 3) FastLockNode( 0, lock_object, box ))->as_FastLock();
// If the lock is our method synchronization lock, tuck it away in
// _sync_lock for return and rethrow exit paths.
_synch_lock = flock;
}
}
// Use the raw liveness computation to make sure that unexpected
// values don't propagate into the OSR frame.
if (!live_locals.is_valid()) {
// Degenerate or breakpointed method.
C->record_method_not_compilable("OSR in empty or breakpointed method");
return;
}
// Extract the needed locals from the interpreter frame.
// find all the locals that the interpreter thinks contain live oops
continue;
}
// 6403625: Verify that the interpreter oopMap thinks that the oop is live
// else we might load a stale oop if the MethodLiveness disagrees with the
// result of the interpreter. If the interpreter says it is dead we agree
// by making the value go to top.
//
}
// and ignore it for the loads
continue;
}
}
// Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
continue;
}
// If the type falls to bottom, then this must be a local that
// is mixing ints and oops or some such. Forcing it to top
// makes it go dead.
continue;
}
// Construct code to access the appropriate local.
}
// Extract the needed stack entries from the interpreter frame.
// Currently the compiler bails out when attempting to on stack replace
// at a bci with a non-empty stack. We should not reach here.
}
}
// End the OSR migration
osr_buf);
// Now that the interpreter state is loaded, make sure it will match
// at execution time what the compiler is expecting now:
if (stopped()) break;
if (l->is_top()) continue; // nothing here
// skip type check for dead oops
continue;
}
}
// Skip type check for dead address locals
continue;
}
}
if (stopped()) break;
if (l->is_top()) continue; // nothing here
}
// Build an uncommon trap here, if any inputs can be unexpected.
// The unexpected type happens because a new edge is active
// in the CFG, which typeflow had previously ignored.
// E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
// This x will be typed as Integer if notReached is not yet linked.
}
}
//------------------------------Parse------------------------------------------
// Main parser constructor.
{
// Init some variables
_wrote_final = false;
#ifndef PRODUCT
if (PrintCompilation || PrintOpto) {
// Make sure I have an inline tree, so I can print messages about it.
}
_max_switch_depth = 0;
_est_switch_depth = 0;
#endif
}
#ifndef PRODUCT
if (_flow->has_irreducible_entry()) {
C->set_parsed_irreducible_loop(true);
}
#endif
if (_expected_uses <= 0) {
_prof_factor = 1;
} else {
if (prof_total <= _expected_uses) {
_prof_factor = 1;
} else {
}
}
}
}
// Accumulate deoptimization counts.
// (The range_check and store_check counts are checked elsewhere.)
if (md_count != 0) {
total_count += md_count;
// Saturate the add if it overflows.
}
}
// Accumulate total sum of decompilations, also.
}
// Always register dependence if JVMTI is enabled, because
// either breakpoint setting or hotswapping of methods may
// cause deoptimization.
if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
}
methods_seen++;
// Do some special top-level things.
_entry_bci = C->entry_bci();
#ifndef PRODUCT
if (Verbose) {
method()->print_codes();
}
}
#endif
}
}
#ifdef ASSERT
if (depth() == 1) {
"Must invalidate if TypeFuncs differ");
}
} else {
}
#endif
#ifndef PRODUCT
// add method size here to guarantee that inlined methods are added too
if (TimeCompiler)
#endif
if (failing()) {
return;
}
// Import the results of the ciTypeFlow.
init_blocks();
// Merge point for all normal exits
build_exits();
// Setup the initial JVM state map.
// Check for bailouts during map initialization
return;
}
// Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
}
if (is_osr_parse()) {
} else {
}
// Check for bailouts during method entry.
if (failing()) {
return;
}
// We begin parsing as if we have just encountered a jump to the
// method entry.
#ifndef PRODUCT
BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
#endif
// Parse all the basic blocks.
// Check for bailouts during conversion to graph
if (failing()) {
return;
}
// Fix up all exiting control flow.
do_exits();
}
//---------------------------do_all_blocks-------------------------------------
void Parse::do_all_blocks() {
// Walk over all blocks in Reverse Post-Order.
while (true) {
bool progress = false;
// Dead block, no state reaches this block
continue;
}
// Prepare to parse this block.
if (stopped()) {
// Block is dead.
continue;
}
progress = true;
// Not all preds have been parsed. We must build phis everywhere.
// (Note that dead locals do not get phis built, ever.)
// Leave behind an undisturbed copy of the map, for future merges.
}
// In the absence of irreducible loops, the Region and Phis
// associated with a merge that doesn't involve a backedge can
// be simplified now since the RPO parsing order guarantees
// that any path which was supposed to reach here has already
// been parsed or must be dead.
if (c != result && TraceOptoParse) {
}
}
}
// Parse the block.
do_one_block();
// Check for bailouts.
if (failing()) return;
}
// with irreducible loops multiple passes might be necessary to parse everything
if (!has_irreducible || !progress) {
break;
}
}
blocks_seen += block_count();
#ifndef PRODUCT
// Make sure there are no half-processed blocks remaining.
// Every remaining unprocessed block is dead and may be ignored now.
if (TraceOptoParse) {
}
}
}
#endif
}
//-------------------------------build_exits----------------------------------
// Build normal and exceptional exit merge points.
void Parse::build_exits() {
// make a clone of caller to prevent sharing of side-effects
// Note: iophi and memphi are not transformed until do_exits.
// Add a return value to the exit state. (Do not push it yet.)
// Don't "bind" an unloaded return klass to the ret_phi. If the klass
// becomes loaded during the subsequent parsing, the loaded and unloaded
// types will not join when we transform and push in do_exits().
}
// Note: ret_phi is not yet pushed, until do_exits.
}
}
//----------------------------build_start_state-------------------------------
// Construct a state which contains only the incoming arguments from an
// unknown caller. The method & bci will be NULL & InvocationEntryBci.
assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
entry_jvms->set_offsets(0);
}
uint i;
// Record all these guys for later GVN.
}
}
return jvms;
}
//-----------------------------make_node_notes---------------------------------
jvms->set_offsets(0);
return nn;
}
//--------------------------return_values--------------------------------------
kit.reset_memory(),
// Add zero or 1 return values
if (ret_size > 0) {
// Note: The second dummy edge is not needed by a ReturnNode.
}
// bind it to root
}
//------------------------rethrow_exceptions-----------------------------------
// Bind all exception states in the list into a single RethrowNode.
// Load my combined exception state into the kit, with all phis transformed:
// like a return but with exception input
ex_oop);
// bind to root
}
bool Parse::can_rerun_bytecode() {
switch (bc()) {
case Bytecodes::_getstatic:
case Bytecodes::_putstatic:
case Bytecodes::_arraylength:
case Bytecodes::_checkcast:
case Bytecodes::_instanceof:
case Bytecodes::_anewarray:
case Bytecodes::_multianewarray:
return true;
break;
// Don't rerun athrow since it's part of the exception path.
case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
return false;
break;
default:
assert(false, "unexpected bytecode produced an exception");
return true;
}
}
//---------------------------do_exceptions-------------------------------------
// Process exceptions arising from the current bytecode.
// Send caught exceptions to the proper handler within this method.
// Unhandled exceptions feed into _exit.
void Parse::do_exceptions() {
if (!has_exceptions()) return;
if (failing()) {
// Pop them all off and throw them away.
while (pop_exception_state() != NULL) ;
return;
}
// Make sure we can classify this bytecode if we need to.
PreserveJVMState pjvms(this, false);
if (!method()->has_exception_handlers()) {
// Common case: Transfer control outward.
// Doing it this early allows the exceptions to common up
// even between adjacent method calls.
} else {
// Have to look at the exception first.
stop_and_kill_map(); // we used up this exception state; kill it
}
}
// We now return to our regularly scheduled program:
}
//---------------------------throw_to_exit-------------------------------------
// Merge the given map into an exception exit from this method.
// The exception exit will handle any unlocking of receiver.
// The ex_oop must be saved within the ex_map, unlike merge_exception.
// Pop the JVMS to (a copy of) the caller.
// Copy out the standard machine state:
}
// ...and the exception:
// Finally, collect the new exception state in my exits:
}
//------------------------------do_exits---------------------------------------
// Now peephole on the return bits
if (wrote_final()) {
// This method (which must be a constructor by the rules of Java)
// wrote a final. The effects of all initializations must be
// committed to memory before any code after the constructor
// publishes the reference to the newly constructor object.
// Rather than wait for the publication, we simply block the
// writes here. Rather than put a barrier on only those writes
// which are required to complete, we force all writes to complete.
//
// "All bets are off" unless the first publication occurs after a
// normal return from the constructor. We do not attempt to detect
// such unusual early publications. But no barrier is needed on
// exceptional returns, since they cannot publish normally.
//
#ifndef PRODUCT
method()->print_name();
}
#endif
}
// transform each slice of the original memphi:
}
assert(_exits.control()->is_top() || !_gvn.type(ret_phi)->empty(), "return value must be well defined");
}
// Note: Logic for creating and optimizing the ReturnNode is in Compile.
// Unlock along the exceptional paths.
// This is done late so that we can common up equivalent exceptions
// (e.g., null checks) arising from multiple points within this method.
// See GraphKit::add_exception_state, which performs the commoning.
// record exit from a method if compiled while Dtrace is turned on.
// First move the exception list out of _exits:
// Now re-collect the exceptions into _exits:
// Force the exiting JVM state to have this method at InvocationEntryBci.
// The exiting JVM state is otherwise a copy of the calling JVMS.
if (do_synch) {
// Unlock!
}
if (C->env()->dtrace_method_probes()) {
}
// Done with exception-path processing.
// Pop the last vestige of this method:
}
}
{
// Capture very early exceptions (receiver null checks) from caller JVMS
}
}
}
//-----------------------------create_entry_map-------------------------------
// Initialize our parser map to contain the types at method entry.
// For OSR, the map contains a single RawPtr parameter.
// Initial monitor locking for sync. methods is performed by do_method_entry.
// Check for really stupid bail-out cases.
if (len >= 32760) {
C->record_method_not_compilable_all_tiers("too many local variables");
return NULL;
}
// If this is an inlined method, we may have to do a receiver null check.
return NULL;
}
}
// Create an initial safepoint to hold JVM state during parsing
record_for_igvn(map());
uint i;
// Pass thru the predefined input parameters.
}
if (depth() == 1) {
// Insert the memory aliasing node
}
// Now add the locals which are initially bound to arguments:
}
// Clear out the rest of the map (locals and stack)
}
return entry_map;
}
//-----------------------------do_method_entry--------------------------------
// Emit any code needed in the pseudo-block before BCI zero.
// The main thing to do is lock the receiver of a synchronized method.
void Parse::do_method_entry() {
set_sp(0); // Java Stack Pointer
if (C->env()->dtrace_method_probes()) {
}
// If the method is synchronized, we need to construct a lock node, attach
// it to the Start node, and pin it there.
if (method()->is_synchronized()) {
// Insert a FastLockNode right after the Start which takes as arguments
// the current thread pointer, the "this" pointer & the address of the
// stack slot pair used for the lock. The "this" pointer is a projection
// off the start node, but the locking spot has to be constructed by
// creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
// becomes the second argument to the FastLockNode call. The
// FastLockNode becomes the new control parent to pin it to the start.
// Setup Object Pointer
} else { // Else pass the "this" pointer,
}
// Clear out dead values from the debug info.
// Build the FastLockNode
}
if (depth() == 1) {
}
}
//------------------------------init_blocks------------------------------------
void Parse::init_blocks() {
// Create the blocks.
int rpo;
// Initialize the structs.
}
// Collect predecessor and successor information.
block->init_graph(this);
}
}
//-------------------------------init_node-------------------------------------
_pred_count = 0;
_preds_parsed = 0;
_count = 0;
// entry point has additional predecessor
}
//-------------------------------init_graph------------------------------------
// Create the successor list for this parser block.
int p = 0;
_successors[i] = block2;
// Accumulate pred info for the other block, too.
if (i < ns) {
block2->_pred_count++;
} else {
block2->_is_handler = true;
}
#ifdef ASSERT
// A block's successors must be distinguishable by BCI.
// That is, no bytecode is allowed to branch to two different
// clones of the same code location.
for (int j = 0; j < i; j++) {
}
#endif
}
// Note: We never call next_path_num along exception paths, so they
// never get processed as "ready". Also, the input phis of exception
// handlers get specially processed, so that
}
//---------------------------successor_for_bci---------------------------------
for (int i = 0; i < all_successors(); i++) {
}
// We can actually reach here if ciTypeFlow traps out a block
// due to an unloaded class, and concurrently with compilation the
// class is then loaded, so that a later phase of the parser is
// able to see more of the bytecode CFG. Or, the flow pass and
// the parser can have a minor difference of opinion about executability
// of bytecodes. For example, "obj.field = null" is executable even
// if the field's type is an unloaded class; the flow pass used to
// make a trap for such code.
return NULL;
}
//-----------------------------stack_type_at-----------------------------------
}
//-----------------------------local_type_at-----------------------------------
// Make dead locals fall to bottom.
if (_live_locals.size() == 0) {
// This bitmap can be zero length if we saw a breakpoint.
// In such cases, pretend they are all live.
}
}
#ifndef PRODUCT
//----------------------------name_for_bc--------------------------------------
// helper method for BytecodeParseHistogram
static const char* name_for_bc(int i) {
}
//----------------------------BytecodeParseHistogram------------------------------------
_parser = p;
_compiler = c;
}
//----------------------------current_count------------------------------------
switch( bph_type ) {
default: { ShouldNotReachHere(); return 0; }
}
}
//----------------------------initialized--------------------------------------
//----------------------------reset--------------------------------------------
int i = Bytecodes::number_of_codes;
while (i-- > 0) { _bytecodes_parsed[i] = 0; _nodes_constructed[i] = 0; _nodes_transformed[i] = 0; _new_values[i] = 0; }
}
//----------------------------set_initial_state--------------------------------
// Record info when starting to parse one bytecode
}
}
//----------------------------record_change--------------------------------
// Record results of parsing one bytecode
}
}
//----------------------------print--------------------------------------------
// print profile
int total = 0;
int i = 0;
int abs_sum = 0;
if( total == 0 ) { return; }
while (--i > 0) {
int abs = _bytecodes_parsed[i];
tty->print_cr("%10d %7.2f%% %6.1f %6.2f %6.1f %6.1f %s", abs, rel, nodes, rnodes, xforms, values, name_for_bc(i));
}
}
}
#endif
//----------------------------load_state_from----------------------------------
// load the block's JVM state:
}
//-----------------------------record_state------------------------------------
set_start_map(p->stop());
}
//------------------------------do_one_block-----------------------------------
void Parse::do_one_block() {
if (TraceOptoParse) {
int ns = b->num_successors();
int nt = b->all_successors();
for (int i = 0; i < nt; i++) {
}
}
block()->mark_parsed();
// Set iterator to start of block.
// Parse bytecodes
// Learn the current bci from the iterator:
// insert a predicate if it falls through to a loop head block
if (should_add_predicate(bci())){
}
// Do not walk into the next block until directed by do_all_blocks.
break;
}
// Output an optional context marker, to help place actions
// that occur during parsing of this BC. If there is no log
// output until the next context string, this context string
// will be silently ignored.
}
// We must respect the flow pass's traps, because it will refuse
// to produce successors for trapping blocks.
break;
}
#ifdef ASSERT
#endif //ASSERT
assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth, "correct depth prediction");
// Fall into next bytecode. Each bytecode normally has 1 sequential
// successor which is typically made ready by visiting this bytecode.
// If the successor has several predecessors, then it is a merge
// point, starts a new basic block, and is handled like other basic blocks.
}
}
//------------------------------merge------------------------------------------
// Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
return;
}
// Update the JVMS annotation, if present.
// Update the JVMS.
}
}
//------------------------------merge------------------------------------------
// Merge the current mapping into the basic block starting at bci
}
//-------------------------merge_new_path--------------------------------------
// Merge the current mapping into the basic block, using a new path
}
//-------------------------merge_exception-------------------------------------
// Merge the current mapping into the basic block starting at bci
// The ex_oop must be pushed on the stack, unlike throw_to_exit.
}
//--------------------handle_missing_successor---------------------------------
#ifndef PRODUCT
tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
#endif
}
//--------------------------merge_common---------------------------------------
if (TraceOptoParse) {
}
// Zap extra stack slots to top
clean_stack(sp());
// If this path is dead, do not bother capturing it as a merge.
// It is "as if" we had 1 fewer predecessors from the beginning.
if (stopped()) {
return;
}
// Record that a new block has been merged.
// Make a region if we know there are multiple or unpredictable inputs.
// (Also, if this is a plain fall-through, we might see another region,
// which must not be allowed into this block's map.)
// Add a Region to start the new basic block. Phis will be added
// later lazily.
record_for_igvn(r);
// zap all inputs to NULL for debugging (done in Node(uint) constructor)
// for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
set_control(r);
}
// Convert the existing Parser mapping into a mapping at this bci.
} else { // Prior mapping at this bci
// We must not manufacture more phis if the target is already parsed.
// Iterate over my current mapping and the old mapping.
// Where different, insert Phi functions.
// Use any existing Phi functions.
// Compute where to merge into
// Merge incoming control path
if (r != result && TraceOptoParse) {
}
}
record_for_igvn(r);
}
// Update all the non-control inputs to map:
else
if (m != n) { // Different; must merge
switch (j) {
// Frame pointer and Return Address never changes
break;
continue;
default: // All normal stuff
}
}
break;
}
}
// At this point, n might be top if:
// - there is no phi (because TypeFlow detected a conflict), or
// - the corresponding control edges is top (a dead incoming path)
// It is a bug if we create a phi which sees a garbage value on a live path.
// Last merge for this Phi.
// So far, Phis have had a reasonable type from ciTypeFlow.
// Now _gvn will join that with the meet of current inputs.
// BOTTOM is never permissible here, 'cause pessimistically
// Phis of pointers cannot lose the basic pointer type.
}
}
} // End of for all values to be merged
!r->in(0)) { // The occasional useless Region
set_control(r->nonnull_req());
}
// newin has been subsumed into the lazy merge, and is now dead.
stop(); // done with this guy, for now
}
if (TraceOptoParse) {
}
// Done with this parser state.
}
//--------------------------merge_memory_edges---------------------------------
// (nophi means we must not create phis, because we already parsed here)
// Merge the inputs to the MergeMems
MergeMemNode* m = merged_memory();
// Trouble: No new splits allowed after a loop body is parsed.
// Instead, wire the new split into a MergeMem on the backedge.
// The optimizer will sort it out, slicing the phi.
}
continue;
}
if (p != q) {
} else {
else
}
// Insert q into local phi
p = phi;
if (mms.at_base_memory()) {
} else if (pnum == 1) {
}
}
}
// Transform base last, in case we must fiddle with remerging.
}
}
//------------------------ensure_phis_everywhere-------------------------------
void Parse::ensure_phis_everywhere() {
// Ensure a phi on all currently known memories.
}
// Note: This is our only chance to create phis for memory slices.
// If we miss a slice that crops up later, it will have to be
// merged into the base-memory phi that we are building here.
// Later, the optimizer will comb out the knot, and build separate
// phi-loops for each memory slice that matters.
// Monitors must nest nicely and not get confused amongst themselves.
// Phi-ify everything up to the monitors, though.
ensure_phi(i);
}
}
// Even monitors need Phis, though they are well-structured.
// This is true for OSR methods, and also for the rare cases where
// a monitor object is the subject of a replace_in_map operation.
// See bugs 4426707 and 5043395.
for (uint m = 0; m < nof_monitors; m++) {
}
}
//-----------------------------add_new_path------------------------------------
// Add a previously unaccounted predecessor to this block.
// If there is no map, return the lowest unused path number.
// Add new path to the region.
// Ensure a phi on all currently known memories.
}
}
} else {
}
}
}
return pnum;
}
//------------------------------ensure_phi-------------------------------------
// Turn the idx'th entry of the current map into a Phi
return o->as_Phi();
}
// Now use a Phi here for merging
const Type* t;
t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
} else {
assert(false, "no type information for this phi");
}
// If the type falls to bottom, then this must be a local that
// is mixing ints and oops or some such. Forcing it to top
// makes it go dead.
return NULL;
}
// Do not create phis for top either.
// A top on a non-null control flow must be an unused even after the.phi.
return NULL;
}
return phi;
}
//--------------------------ensure_memory_phi----------------------------------
// Turn the idx'th slice of the current memory into a Phi
// clone the shared base memory phi to make a new memory split
}
return phi;
}
// Now use a Phi here for merging
const Type* t = o->bottom_type();
else
return phi;
}
//------------------------------call_register_finalizer-----------------------
// Check the klass of the receiver and call register_finalizer if the
// class need finalization.
void Parse::call_register_finalizer() {
"must have non-null instance type");
// The type isn't known exactly so see if CHA tells us anything.
// No finalizable subclasses so skip the dynamic check.
return;
}
}
// Insert a dynamic test for whether the instance needs
// finalization. In general this will fold up since the concrete
// class is often visible so the access flags are constant.
Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
Node* access_flags_addr = basic_plus_adr(klass, klass, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
if (stopped()) {
// There is no slow path.
} else {
receiver);
// These two phis are pre-filled with copies of of the fast IO and Memory
}
}
//------------------------------return_current---------------------------------
// Append current _map to _exit_return
if (RegisterFinalizersAtInit &&
}
// Do not set_parse_bci, so that return goo is credited to the return insn.
}
if (C->env()->dtrace_method_probes()) {
}
// get a copy of the base memory, and patch just this one input
}
}
// frame pointer is always same, already captured
// If returning oops to an interface-return, there is a silent free
// cast from oop to interface allowed by the Verifier. Make it explicit
// here.
// sharpen the type eagerly; this eases certain assert checking
}
}
}
stop_and_kill_map(); // This CFG path dies here
}
//------------------------------add_safepoint----------------------------------
void Parse::add_safepoint() {
// See if we can avoid this safepoint. No need for a SafePoint immediately
// after a Call (except Leaf Call) or another SafePoint.
}
return;
return;
}
}
// Clear out dead values from the debug info.
// Clone the JVM State
// Capture memory state BEFORE a SafePoint. Since we can block at a
// SafePoint we need our GC state to be safe; i.e. we need all our current
// write barriers (card marks) to not float down after the SafePoint so we
// must read raw memory. Likewise we need all oop stores to match the card
// marks. If deopt can happen, we need ALL stores (we need the correct JVM
// state on a deopt).
// We do not need to WRITE the memory state after a SafePoint. The control
// edge will keep card-marks and oop-stores from floating up from below a
// SafePoint and our true dependency added here will keep them from floating
// down below a SafePoint.
// Clone the current memory state
// Pass control through the safepoint
// Fix edges normally used by a call
// Create a node for the polling address
if( add_poll_param ) {
}
// Fix up the JVM State edges
// Provide an edge from root to safepoint. This makes the safepoint
// appear useful until the parse has completed.
}
}
//------------------------------should_add_predicate--------------------------
if (!UseLoopPredicate) return false;
target->is_loop_head() &&
return true;
}
return false;
}
//------------------------------add_predicate---------------------------------
void Parse::add_predicate() {
C->add_predicate_opaq(opq);
{
PreserveJVMState pjvms(this);
}
}
#ifndef PRODUCT
//------------------------show_parse_info--------------------------------------
void Parse::show_parse_info() {
}
if (PrintCompilation && Verbose) {
if (depth() == 1) {
if( ilt->count_inlines() ) {
ilt->count_inline_bcs());
}
} else {
// Check this is not the final compiled version
if (C->trap_can_recompile()) {
} else {
}
method()->print_short_name();
if (is_osr_parse()) {
}
if (ilt->count_inlines()) {
ilt->count_inline_bcs());
}
}
}
// Print that we succeeded; suppress this message on the first osr parse.
// Check this is not the final compiled version
} else {
}
method()->print_short_name();
if (is_osr_parse()) {
}
}
if (ilt->count_inlines()) {
ilt->count_inline_bcs());
}
}
}
//------------------------------dump-------------------------------------------
// Dump information associated with the bytecodes of current _method
// Iterate over bytecodes
}
}
}
// Dump information associated with a byte code index, 'bci'
// Output info on merge-points, cloning, and within _jsr..._ret
// NYI
}
#endif