nmethod.cpp revision 30
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_nmethod.cpp.incl"
#ifdef DTRACE_ENABLED
// Only bother with this argument setup if dtrace is available
const char*, int, const char*, int, const char*, int, void*, size_t);
char*, int, char*, int, char*, int);
#define DTRACE_METHOD_UNLOAD_PROBE(method) \
{ \
if (m != NULL) { \
} \
}
#else // ndef DTRACE_ENABLED
#define DTRACE_METHOD_UNLOAD_PROBE(method)
#endif
bool nmethod::is_compiled_by_c1() const {
if (is_native_method()) return false;
}
bool nmethod::is_compiled_by_c2() const {
if (is_native_method()) return false;
}
//---------------------------------------------------------------------------------
// NMethod statistics
// They are printed under various flags, including:
// PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
// (In the latter two cases, they like other stats are printed to the log only.)
#ifndef PRODUCT
// These variables are put into one block to reduce relocations
// and make it simpler to print from the debugger.
static
struct nmethod_stats_struct {
int nmethod_count;
int total_size;
int relocation_size;
int code_size;
int stub_size;
int consts_size;
int scopes_data_size;
int scopes_pcs_size;
int dependencies_size;
int handler_table_size;
int nul_chk_table_size;
int oops_size;
nmethod_count += 1;
}
void print_nmethod_stats() {
if (nmethod_count == 0) return;
}
int native_nmethod_count;
int native_total_size;
int native_code_size;
int native_oops_size;
native_nmethod_count += 1;
}
void print_native_nmethod_stats() {
if (native_nmethod_count == 0) return;
}
int pc_desc_resets; // number of resets (= number of caches)
int pc_desc_queries; // queries to nmethod::find_pc_desc
int pc_desc_approx; // number of those which have approximate true
int pc_desc_repeats; // number of _last_pc_desc hits
int pc_desc_hits; // number of LRU cache hits
int pc_desc_tests; // total number of PcDesc examinations
int pc_desc_searches; // total number of quasi-binary search steps
int pc_desc_adds; // number of LUR cache insertions
void print_pc_stats() {
(double)(pc_desc_tests + pc_desc_searches)
/ pc_desc_queries);
}
#endif //PRODUCT
//---------------------------------------------------------------------------------
// The _unwind_handler is a special marker address, which says that
// for given exception oop and address, the frame should be removed
// as the tuple cannot be caught in the nmethod
_count = 0;
}
return (test_address(pc));
}
return NULL;
}
return true;
}
return false;
}
for (int i=0; i<count(); i++) {
return handler_at(i);
}
}
return NULL;
}
if (count() < cache_size) {
return true;
}
return false;
}
// private method for handling exception cache
// These methods are private, and used to manipulate the exception cache
// directly.
return ec;
}
}
return NULL;
}
//-----------------------------------------------------------------------------
// Helper used by both find_pc_desc methods.
if (!approximate)
else
}
if (initial_pc_desc == NULL) {
return;
}
// reset the cache by filling it with benign (non-null) values
for (int i = 0; i < cache_size; i++)
_pc_descs[i] = initial_pc_desc;
}
// In order to prevent race conditions do not load cache elements
// repeatedly, but use a local copy:
// Step one: Check the most recently returned value.
res = _last_pc_desc;
return res;
}
// Step two: Check the LRU cache.
for (int i = 0; i < cache_size; i++) {
return res;
}
}
// Report failure.
return NULL;
}
// Update the LRU cache by shifting pc_desc forward:
for (int i = 0; i < cache_size; i++) {
}
// Note: Do not update _last_pc_desc. It fronts for the LRU cache.
}
// adjust pcs_size so that it is a multiple of both oopSize and
// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
// of oopSize, then 2*sizeof(PcDesc) is)
static int adjust_pcs_size(int pcs_size) {
}
return nsize;
}
//-----------------------------------------------------------------------------
if (exception_cache() != NULL) {
}
}
// find the previous and next entry of ec
}
// now: curr == ec
} else {
}
delete curr;
}
// public method for accessing the exception cache
// These are the public access methods.
// We never grab a lock to read the exception cache, so we may
// have false negatives. This is okay, as it can only happen during
// the first few exception lookups for a given nmethod.
return ret_val;
}
}
return NULL;
}
// There are potential race conditions during exception cache updates, so we
// must own the ExceptionCache_lock before doing ANY modifications. Because
// we dont lock during reads, it is possible to have several threads attempt
// to update the cache with the same data. We need to check for already inserted
// copies of the current data before adding it.
}
}
//-------------end of code for ExceptionCache--------------
*(jint*)this = 0;
}
int nmethod::total_size() const {
return
code_size() +
stub_size() +
consts_size() +
scopes_data_size() +
scopes_pcs_size() +
}
const char* nmethod::compile_kind() const {
if (is_native_method()) return "c2n";
if (is_osr_method()) return "osr";
return NULL;
}
// %%% This variable is no longer used?
int vep_offset,
int frame_complete,
int frame_size,
// create nmethod
{
nm = new (native_nmethod_size)
oop_maps);
}
// verify nmethod
nm->log_new_nmethod();
}
return nm;
}
int compile_id,
int entry_bci,
int orig_pc_offset,
int comp_level
)
{
// create nmethod
int nmethod_size =
nm = new (nmethod_size)
// To make dependency checking during class loading fast, record
// the nmethod dependencies in the classes it is dependent on.
// This allows the dependency checking code to simply walk the
// class hierarchy above the loaded class, checking only nmethods
// which are dependent on those classes. The slow way is to
// check every nmethod for dependencies which makes it linear in
// the number of methods compiled. For applications with a lot
// classes the slow way is too slow.
// record this nmethod as dependent on this klass
}
}
}
// verify nmethod
nm->log_new_nmethod();
}
// done
return nm;
}
// For native wrappers
int nmethod_size,
int frame_size,
{
{
NOT_PRODUCT(_has_debug_info = false; )
// We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry
_exception_offset = 0;
_deoptimize_offset = 0;
_orig_pc_offset = 0;
_stub_offset = data_offset();
_compile_id = 0; // default
_lock_count = 0;
code_buffer->copy_oops_to(this);
VTune::create_nmethod(this);
}
// This output goes directly to the tty, not the compiler log.
// To enable tools to match it up with the compilation activity,
// be sure to tag this tty output with the compile ID.
}
// print the header part first
print();
// then print the requested information
if (PrintNativeNMethods) {
print_code();
}
if (PrintRelocations) {
}
}
}
}
}
int nmethod_size,
int compile_id,
int entry_bci,
int orig_pc_offset,
int frame_size,
int comp_level
)
{
{
NOT_PRODUCT(_has_debug_info = false; )
// Exception handler and deopt handler are in the stub section
_consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
_unload_reported = false; // jvmti state
_lock_count = 0;
// Copy contents of ScopeDescRecorder to nmethod
code_buffer->copy_oops_to(this);
debug_info->copy_to(this);
dependencies->copy_to(this);
VTune::create_nmethod(this);
// Copy contents of ExceptionHandlerTable to nmethod
handler_table->copy_to(this);
nul_chk_table->copy_to(this);
// we use the information of entry points to find out if a method is
// static or non static
" entry points must be same for static methods and vice versa");
}
if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
}
// Note: Do not verify in here as the CodeCache_lock is
// taken which would conflict with the CompiledIC_lock
// which taken during the verification of call sites.
// (was bug - gri 10/25/99)
}
// Print a short set of xml attributes to identify this nmethod. The
// output should be embedded in some other element.
const char* nm_kind = compile_kind();
}
#ifdef TIERED
#endif // TIERED
}
void nmethod::log_new_nmethod() const {
instructions_begin(), size());
}
}
// Print out more verbose output usually for a newly created nmethod.
// Print a little tag line that looks like +PrintCompilation output:
compile_id(),
is_osr_method() ? '%' :
title);
#ifdef TIERED
#endif // TIERED
if (is_osr_method())
}
}
}
#ifndef PRODUCT
}
// print the header part first
print();
// then print the requested information
if (printmethod) {
print_code();
print_pcs();
}
if (PrintDebugInfo) {
print_scopes();
}
if (PrintRelocations) {
}
if (PrintDependencies) {
}
if (PrintExceptionHandlers) {
}
}
}
#endif
void nmethod::set_version(int v) {
}
pd->obj_decode_offset());
}
void nmethod::clear_inline_caches() {
if (is_zombie()) {
return;
}
RelocIterator iter(this);
}
}
void nmethod::cleanup_inline_caches() {
!CompiledIC_lock->is_locked() &&
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
if (!is_in_use()) {
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// This means that the low_boundary is going to be a little too high.
// This shouldn't matter, since oops of non-entrant methods are never used.
// In fact, why are we bothering to look at oops in a non-entrant method??
}
// Find all calls in an nmethod, and clear the ones that points to zombie methods
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
// Ok, to lookup references to zombies here
// Clean inline caches pointing to both zombie and not_entrant methods
}
break;
}
case relocInfo::static_call_type: {
// Clean inline caches pointing to both zombie and not_entrant methods
}
break;
}
}
}
}
void nmethod::mark_as_seen_on_stack() {
}
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
bool nmethod::can_not_entrant_be_converted() {
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
// nmethod for the second time.
}
void nmethod::inc_decompile_count() {
// Could be gated by ProfileTraps, but do not bother...
if (m == NULL) return;
// There is a benign race here. See comments in methodDataOop.hpp.
}
// Since this nmethod is being unloaded, make sure that dependencies
// recorded in instanceKlasses get flushed and pass non-NULL closure to
// indicate that this work is being done during a GC.
// A non-NULL is_alive closure indicates that this is being called during GC.
// Break cycle between nmethod & method
if (TraceClassUnloading && WizardMode) {
" unloadable], methodOop(" INTPTR_FORMAT
}
// If _method is already NULL the methodOop is about to be unloaded,
// so we don't have to break the cycle. Note that it is possible to
// have the methodOop live here, in case we unload the nmethod because
// it is pointing to some oop (other than the methodOop) being unloaded.
// OSR methods point to the methodOop, but the methodOop does not
// point back!
}
inc_decompile_count(); // Last chance to make a mark on the MDO
}
// Make the class unloaded - i.e., change state and notify sweeper
if (is_in_use()) {
// Transitioning directly from live to unloaded -- so
// we need to force a cache clean-up; remember this
// for later on.
CodeCache::set_needs_cache_clean(true);
}
// The methodOop is gone at this point
NMethodSweeper::notify(this);
}
void nmethod::invalidate_osr_method() {
if (_entry_bci != InvalidOSREntryBci)
// Remove from list of active nmethods
// Set entry as invalid
}
if (LogCompilation) {
os::current_thread_id());
}
}
if (PrintCompilation) {
}
}
// Common functionality for both make_not_entrant and make_zombie
// Code for an on-stack-replacement nmethod is removed when a class gets unloaded.
// They never become zombie/non-entrant, so the nmethod sweeper will never remove
// them. Instead the entry_bci is set to InvalidOSREntryBci, so the osr nmethod
// will never be used anymore. That the nmethods only gets removed when class unloading
// happens, make life much simpler, since the nmethods are not just going to disappear
// out of the blue.
if (is_osr_only_method()) {
if (osr_entry_bci() != InvalidOSREntryBci) {
// only log this once
}
return;
}
// If the method is already zombie or set to the state we want, nothing to do
return;
}
// Make sure the nmethod is not flushed in case of a safepoint in code below.
nmethodLocker nml(this);
{
// Enter critical section. Does not block for safepoint.
// The caller can be calling the method statically or through an inline
// cache call.
if (!is_not_entrant()) {
}
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
} else {
}
// Change state
} // leave critical region under Patching_lock
if (state == not_entrant) {
} else {
}
if (TraceCreateZombies) {
tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
}
// Make sweeper aware that there is a zombie method that needs to be removed
NMethodSweeper::notify(this);
// not_entrant only stuff
if (state == not_entrant) {
}
// It's a true state change, so mark the method as decompiled.
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
!unload_reported()) {
{
}
}
}
// Zombie only stuff
VTune::delete_nmethod(this);
}
// Check whether method got unloaded at a safepoint before this,
// if so we can skip the flushing steps below
// Remove nmethod from method.
// We need to check if both the _code and _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in methodOop as seen in bugid 4947125.
// If the vep() points to the zombie nmethod, the memory for the nmethod
// could be flushed and the compiler and vtable stubs could still call
// through it.
method()->clear_code();
}
}
#ifndef PRODUCT
void nmethod::check_safepoint() {
}
#endif
// Note that there are no valid oops in the nmethod anymore.
assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
// completely deallocate this method
if (PrintMethodFlushing) {
}
// We need to deallocate any ExceptionCache data.
// Note that we do not need to grab the nmethod lock for this, it
// better be thread safe if we're disposing of it!
delete ec;
}
}
//
// Notify all classes this nmethod is dependent on that it is no
// longer dependent. This should only be called in two situations.
// First, when a nmethod transitions to a zombie all dependents need
// to be clear. Since zombification happens at a safepoint there's no
// synchronization issues. The second place is a little more tricky.
// During phase 1 of mark sweep class unloading may happen and as a
// result some nmethods may get unloaded. In this case the flushing
// of dependencies must happen during phase 1 since after GC any
// dependencies in the unloaded nmethod won't be updated, so
// traversing the dependency information in unsafe. In that case this
// function is called with a non-NULL argument and this function only
// notifies instanceKlasses that are reachable
"is_alive is non-NULL if and only if we are called during GC");
if (!has_flushed_dependencies()) {
// During GC the is_alive closure is non-NULL, and is used to
// determine liveness of dependees that need to be updated.
}
}
}
}
// If this oop is not live, the nmethod can be unloaded.
return false;
}
if (obj->is_compiledICHolder()) {
if (is_alive->do_object_b(
// The oop should be kept alive
return false;
}
}
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
// Cannot do this test if verification of the UseParallelOldGC
// code using the PSMarkSweep code is being done.
}
return true;
}
// ------------------------------------------------------------------
// post_compiled_method_load_event
// new method for install_code() path
// Transfer information from compilation to jvmti
void nmethod::post_compiled_method_load_event() {
code_begin(), code_size());
if (JvmtiExport::should_post_compiled_method_load()) {
JvmtiExport::post_compiled_method_load(this);
}
}
void nmethod::post_compiled_method_unload() {
// If a JVMTI agent has enabled the CompiledMethodUnload event then
// post the event. Sometime later this nmethod will be made a zombie by
// the sweeper but the methodOop will not be valid at that point.
}
// The JVMTI CompiledMethodUnload event can be enabled or disabled at
// any time. As the nmethod is being unloaded now we mark it has
// having the unload event reported - this will ensure that we don't
// attempt to report the event in the unlikely scenario where the
// event is enabled at the time the nmethod is made a zombie.
}
// GC to unload an nmethod if it contains otherwise unreachable
// oops.
// Make sure the oop's ready to receive visitors
"should not call follow on zombie or unloaded nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
if (is_not_entrant()) {
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
// The RedefineClasses() API can cause the class unloading invariant
// to no longer be true. See jvmtiExport.hpp for details.
// Also, leave a debugging breadcrumb in local flag.
if (a_class_was_redefined) {
// This set of the unloading_occurred flag is done before the
// call to post_compiled_method_unload() so that the unloading
// of this nmethod is reported.
unloading_occurred = true;
}
// Follow methodOop
return;
}
// Exception cache
}
}
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
// The remaining live cached oops will be traversed in the relocInfo::oop_type
// iteration below.
if (unloading_occurred) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
if (ic_oop->is_compiledICHolder()) {
if (is_alive->do_object_b(
continue;
}
}
ic->set_to_clean();
}
}
}
}
// Compiled code
// In this loop, we must only traverse those oops directly embedded in
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
"oop must be found in exactly one place");
return;
}
}
}
}
// Scopes
return;
}
}
#ifndef PRODUCT
// This nmethod was not unloaded; check below that all CompiledICs
// refer to marked oops.
{
"Found unmarked ic_oop in reachable nmethod");
}
}
}
#endif // !PRODUCT
}
// make sure the oops ready to receive visitors
"should not call follow on zombie or unloaded nmethod");
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
if (is_not_entrant()) {
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
// Compiled code
}
// In this loop, we must only follow those oops directly embedded in
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place");
}
}
}
// Scopes
f->do_oop(p);
}
}
// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
}
}
}
return NULL;
}
}
#ifdef ASSERT
// must be sorted and unique; we do a binary search in find_pc_desc()
"must start with a sentinel");
for (int i = 1; i < count; i++) {
}
"must end with a sentinel");
#endif //ASSERT
// Adjust the final sentinel downward.
// Fill any rounding gaps with copies of the last record.
}
// The following assert could fail if sizeof(PcDesc) is not
// an integral multiple of oopSize (the rounding term).
// If it fails, change the logic to always allocate a multiple
// of sizeof(PcDesc), and fill unused words with copies of *last_pc.
}
}
#ifdef ASSERT
res = p;
else
}
}
return res;
}
#endif
// Finds a PcDesc with real-pc equal to "pc"
if ((pc < base_address) ||
return NULL; // PC is wildly out of range
}
// Check the PcDesc cache if it contains the desired PcDesc
// (This as an almost 100% hit rate.)
return res;
}
// Fallback algorithm: quasi-linear search for the PcDesc
// Find the last pc_offset less than the given offset.
// The successor must be the required match, if there is a match at all.
// (Use a fixed radix to avoid expensive affine pointer arithmetic.)
#define assert_LU_OK \
/* invariant on lower..upper during the following search: */ \
// Use the last successful return as a split point.
} else {
}
// Take giant steps at first (4096, then 256, then 16, then 1)
} else {
break;
}
}
}
// Sneak up on the value with a linear search of length ~16.
while (true) {
} else {
break;
}
}
return upper;
} else {
return NULL;
}
}
bool nmethod::check_all_dependencies() {
bool found_check = false;
// wholesale check of all dependencies
found_check = true;
NOT_DEBUG(break);
}
}
return found_check; // tell caller if we found anything
}
// What has happened:
// 1) a new class dependee has been added
// 2) dependee and all its super classes have been marked
bool found_check = false; // set true if we are upset
// Evaluate only relevant dependencies.
found_check = true;
NOT_DEBUG(break);
}
}
return found_check;
}
for (int j = 0; j < dependee_methods->length(); j++) {
// RC_TRACE macro has an embedded ResourceMark
RC_TRACE(0x01000000,
("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
if (TraceDependencies || LogCompilation)
return true;
}
}
}
}
return false;
}
// Called from mark_for_deoptimization, when dependee is invalidated.
continue;
}
return false;
}
if (is_zombie()) {
// a zombie may never be patched
return false;
}
return true;
}
// Exception happened outside inline-cache check code => we are inside
// an active nmethod => use cpc to determine a return address
#ifdef ASSERT
if (cont_offset == 0) {
print();
method()->print_codes();
print_code();
print_pcs();
}
#endif
return instructions_begin() + cont_offset;
}
void nmethod_init() {
// make sure you didn't forget to adjust the filler fields
}
//-------------------------------------------------------------------------------------------
// QQQ might we make this work from a frame??
}
}
}
return ret;
}
// -----------------------------------------------------------------------------
// Verification
// Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
// seems odd.
if( is_zombie() || is_not_entrant() )
return;
// Make sure all the entry points are correctly aligned for patching.
}
if(is_native_method() )
return;
if (nm != this) {
}
if (! p->verify(this)) {
}
}
}
// This code does not work in release mode since
// owns_lock only is available in debug mode.
} else {
}
pd->obj_decode_offset());
}
}
void nmethod::verify_scopes() {
if( !method() ) return; // Runtime stubs have no scope
// iterate through all interrupt point
// and verify the debug information is valid.
case relocInfo::virtual_call_type:
break;
case relocInfo::opt_virtual_call_type:
break;
case relocInfo::static_call_type:
//verify_interrupt_point(iter.addr());
break;
case relocInfo::runtime_call_type:
// Right now there is no way to find out which entries support
// an interrupt point. It would be nice if we had this
// information in a table.
break;
}
}
}
// -----------------------------------------------------------------------------
// Non-product code
#ifndef PRODUCT
void nmethod::check_store() {
// Make sure all oops in the compiled code are tenured
RelocIterator iter(this);
fatal("must be permanent oop in compiled code");
}
}
}
}
// Printing operations
if (is_compiled_by_c1()) {
} else if (is_compiled_by_c2()) {
} else {
}
if (WizardMode) {
}
(address)this,
size());
relocation_size());
code_begin(),
code_end(),
code_size());
stub_begin(),
stub_end(),
stub_size());
consts_begin(),
consts_end(),
consts_size());
if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
scopes_data_size());
scopes_pcs_size());
if (dependencies_size () > 0) tty->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
if (handler_table_size() > 0) tty->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
oops_begin(),
oops_end(),
oops_size());
}
void nmethod::print_scopes() {
// Find the first pc desc for all scopes in the code and print it.
continue;
}
}
void nmethod::print_dependencies() {
}
}
}
}
void nmethod::print_code() {
ResourceMark m;
Disassembler().decode(this);
}
void nmethod::print_relocations() {
ResourceMark m; // in case methods get printed via the debugger
RelocIterator iter(this);
if (UseRelocIndex) {
if (index_size > 0) {
ip[0],
ip[1],
header_end()+ip[0],
}
}
}
ResourceMark m; // in case methods get printed via debugger
p->print(this);
}
}
bool have_one = false;
have_one = true;
}
}
}
}
// Return a the last scope in (begin..end]
return new ScopeDesc(this, p->scope_decode_offset(),
p->obj_decode_offset());
}
return NULL;
}
// First, find an oopmap in (begin, end].
// We use the odd half-closed interval so that oop maps and scope descs
// which are tied to the byte after a call are printed with the call itself.
} else {
}
}
break;
}
}
}
} else {
} else {
switch (bc) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface:
{
else
break;
}
case Bytecodes::_getstatic:
case Bytecodes::_putstatic:
{
else
}
}
}
}
// Print all scopes
} else {
}
if (lineno != -1) {
} else {
}
}
}
// Print relocation information
}
if (cont_offset != 0) {
st->print("; implicit exception: dispatches to " INTPTR_FORMAT, instructions_begin() + cont_offset);
}
}
}
RelocIterator iter(this);
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
break;
}
case relocInfo::static_call_type:
break;
}
}
}
void nmethod::print_handler_table() {
ExceptionHandlerTable(this).print();
}
void nmethod::print_nul_chk_table() {
}
void nmethod::print_statistics() {
}
#endif // PRODUCT