/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/oopMapCache.hpp"
#include "interpreter/rewriter.hpp"
#include "jvmtifiles/jvmti.h"
#include "memory/genOopClosures.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/permGen.hpp"
#include "oops/fieldStreams.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/instanceOop.hpp"
#include "oops/methodOop.hpp"
#include "oops/objArrayKlassKlass.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/threadService.hpp"
#include "utilities/dtrace.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "thread_bsd.inline.hpp"
#endif
#ifndef SERIALGC
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#endif
#ifdef COMPILER1
#include "c1/c1_Compiler.hpp"
#endif
#ifdef DTRACE_ENABLED
#ifndef USDT2
{ \
int len = 0; \
} \
}
{ \
int len = 0; \
} \
}
#else /* USDT2 */
{ \
int len = 0; \
} \
}
{ \
int len = 0; \
} \
}
#endif /* USDT2 */
#else // ndef DTRACE_ENABLED
#endif // ndef DTRACE_ENABLED
return !is_initialized();
}
}
return new klassItable(as_klassOop());
}
if (!EagerInitialization) return;
if (this->is_not_initialized()) {
// abort if the the class has a class initializer
if (this->class_initializer() != NULL) return;
// abort if it is java.lang.Object (initialization is handled in genesis)
// abort if the super class should be initialized
// call body to expose the this pointer
}
}
// abort if someone beat us to the initialization
if (HAS_PENDING_EXCEPTION) {
// Abort if linking the class throws an exception.
// Use a test to avoid redundantly resetting the state if there's
// no change. Set_init_state() asserts that state changes make
// progress, whereas here we might just be spinning in place.
} else {
// linking successfull, mark class as initialized
// trace
if (TraceClassInitialization) {
}
}
}
// See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
// process. The step comments refers to the procedure described in that section.
// Note: implementation moved to static method to expose the this pointer.
if (this->should_be_initialized()) {
// Note: at this point the class may be initialized
// OR it may be in the state of being initialized
// in case of recursive initialization!
} else {
}
}
// 1) Verify the bytecodes
}
// Used exclusively by the shared spaces dump mechanism to prevent
// classes mapped into the shared regions in new VMs from appearing linked.
}
if (!is_linked()) {
}
}
// Called to verify that a class can link during initialization, without
// throwing a VerifyError.
if (!is_linked()) {
}
return is_linked();
}
// check for error state
if (this_oop->is_in_error_state()) {
this_oop->external_name(), false);
}
// return if already verified
return true;
}
// Timing
// timer handles recursion
// link super class before linking this class
"class %s has interface %s as super class",
);
return false;
}
}
// link all interfaces implemented by this class before linking this class
}
// in case the class is linked in the process of linking its superclasses
return true;
}
// trace only the link time for this klass that includes
// the verification time
// verification & rewriting
{
// rewritten will have been set if loader constraint error found
// on an earlier link attempt
// don't verify or rewrite if already rewritten
if (!this_oop->is_rewritten()) {
{
// Timer includes any side effects of class verification (resolution,
// etc), but not recursive entry into verify_code().
if (!verify_ok) {
return false;
}
}
// Just in case a side-effect of verify linked this class already
// (which can sometimes happen since the verifier loads classes
// using custom class loaders, which are free to initialize things)
return true;
}
// also sets rewritten
}
// relocate jsrs and link methods after they are all rewritten
// Initialize the vtable and interface table after
// methods have been rewritten since rewrite may
// fabricate new methodOops.
// also does loader constraint checking
}
#ifdef ASSERT
else {
// In case itable verification is ever added.
// this_oop->itable()->verify(tty, true);
}
#endif
if (JvmtiExport::should_post_class_prepare()) {
}
}
}
return true;
}
// Rewrite the byte codes of all of the methods of a class.
// The rewriter must be called exactly once. Rewriting must happen after
// verification but before the first method of the class is executed.
if (this_oop->is_rewritten()) {
return;
}
}
// Now relocate and link method entry points after class is rewritten.
// This is outside is_rewritten flag. In case of an exception, it can be
// executed more than once.
}
// Make sure klass is linked (verified) before initialization
// A class could already be verified, since it has been reflected upon.
bool wait = false;
// refer to the JVM book page 47 for description of steps
// Step 1
// Step 2
// If we were to use wait() instead of waitInterruptibly() then
// that aren't expected to throw. This would wreak havoc. See 6320309.
wait = true;
}
// Step 3
return;
}
// Step 4
if (this_oop->is_initialized()) {
return;
}
// Step 5
if (this_oop->is_in_error_state()) {
// Out of memory: can't create detailed error message
} else {
}
}
// Step 6
}
// Step 7
if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
if (HAS_PENDING_EXCEPTION) {
{
this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below
}
THROW_OOP(e());
}
}
// Step 8
{
// Timer includes any side effects of class initialization (resolution,
// etc), but not recursive entry into call_class_initializer().
}
// Step 9
if (!HAS_PENDING_EXCEPTION) {
}
}
else {
// Step 10 and 11
{
CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
}
THROW_OOP(e());
} else {
&args);
}
}
}
// Note: implementation moved to static method to expose the this pointer.
}
void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
}
// The embedded _implementor field can only record one implementor.
// When there are more than one implementors, the _implementor field
// is set to the interface klassOop itself. Following are the possible
// values for the _implementor field:
// NULL - no implementor
// implementor klassOop - one implementor
// self - more than one implementor
//
// The _implementor field only exists for interfaces.
// Filter out my subinterfaces.
// (Note: Interfaces are never on the subklass list.)
// Filter out subclasses whose supers already implement me.
// (Note: CHA must walk subclasses of direct implementors
// in order to locate indirect implementors.)
// We only need to check one immediate superclass, since the
// implements_interface query looks at transitive_interfaces.
// Any supers of the super have the same (or fewer) transitive_interfaces.
return;
set_implementor(k);
} else if (ik != this->as_klassOop()) {
// There is already an implementor. Use itself as an indicator of
// more than one implementors.
set_implementor(this->as_klassOop());
}
// The implementor also implements the transitive_interfaces
}
}
if (is_interface()) {
}
}
// link this class into the implementors list of every interface it implements
}
}
if (is_interface())
return false;
else
return Klass::can_be_primary_super_slow();
}
// The secondaries are the implemented interfaces.
if (num_secondaries == 0) {
return Universe::the_empty_system_obj_array();
} else if (num_extra_slots == 0) {
return interfaces();
} else {
// a mix of both
for (int i = 0; i < interfaces->length(); i++) {
}
return secondaries;
}
}
return implements_interface(k);
} else {
return Klass::compute_is_subtype_of(k);
}
}
if (as_klassOop() == k) return true;
for (int i = 0; i < transitive_interfaces()->length(); i++) {
if (transitive_interfaces()->obj_at(i) == k) {
return true;
}
}
return false;
}
report_java_out_of_memory("Requested array size exceeds VM limit");
}
objArrayOop o =
return o;
}
if (TraceFinalizerRegistration) {
i->print_value_on(tty);
}
// Pass the handle as argument, JavaCalls::call expects oop as jobjects
return h_i();
}
instanceOop i;
if (has_finalizer_flag && !RegisterFinalizersAtInit) {
i = register_finalizer(i, CHECK_NULL);
}
return i;
}
// Finalizer registration occurs in the Object.<init> constructor
// and constructors normally aren't run when allocating perm
// instances so simply disallow finalizable perm objects. This can
// be relaxed if a need for it is found.
instanceOop i = (instanceOop)
return i;
}
if (is_interface() || is_abstract()) {
}
}
}
}
klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
{
// Atomic creation of array_klasses
// Check if update has already taken place
}
}
}
// _this will always be set at this point
if (or_null) {
return oak->array_klass_or_null(n);
}
}
}
}
return clinit;
}
return NULL;
}
if (TraceClassInitialization) {
tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
}
}
}
// Dirty read, then double-check under a lock.
if (_oop_map_cache == NULL) {
// Otherwise, allocate a new one.
// First time use. Allocate a cache in C heap
if (_oop_map_cache == NULL) {
_oop_map_cache = new OopMapCache();
}
}
// _oop_map_cache is constant after init; lookup below does is own locking.
}
return true;
}
}
return false;
}
}
}
klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
const int n = local_interfaces()->length();
for (int i = 0; i < n; i++) {
// search for field in current interface
return intf1;
}
// search for field in direct superinterfaces
}
// otherwise field lookup fails
return NULL;
}
// search order according to newest JVM spec (5.4.3.2, p.167).
// 1) search for field in current klass
return as_klassOop();
}
// 2) search for field recursively in direct superinterfaces
}
// 3) apply field lookup recursively if superclass exists
}
// 4) otherwise field lookup fails
return NULL;
}
klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const {
// search order according to newest JVM spec (5.4.3.2, p.167).
// 1) search for field in current klass
}
// 2) search for field recursively in direct superinterfaces
if (is_static) {
}
// 3) apply field lookup recursively if superclass exists
}
// 4) otherwise field lookup fails
return NULL;
}
bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
}
}
return false;
}
return true;
}
}
return false;
}
f(m);
}
}
}
}
}
}
void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
}
}
}
static int compare_fields_by_offset(int* a, int* b) {
return a[0] - b[0];
}
}
// In DebugInfo nonstatic fields are sorted by offset.
int j = 0;
for (int i = 0; i < length; i += 1) {
fields_sorted[j + 1] = i;
j += 2;
}
}
if (j > 0) {
length = j;
// _sort_Fn is defined in growableArray.hpp.
for (int i = 0; i < length; i += 2) {
}
}
}
if (array_klasses() != NULL)
}
f(as_klassOop());
array_klasses_do(f);
}
#ifdef ASSERT
return index;
}
}
return -1;
}
#endif
}
// methods are sorted, so do binary search
int l = 0;
int h = len - 1;
while (l <= h) {
if (res == 0) {
// found matching name; do linear search to find matching signature
// first, quick check for common case
// search downwards through overloaded methods
int i;
for (i = mid - 1; i >= l; i--) {
}
// search upwards
for (i = mid + 1; i <= h; i++) {
}
// not found
#ifdef ASSERT
#endif
return NULL;
} else if (res < 0) {
l = mid + 1;
} else {
h = mid - 1;
}
}
#ifdef ASSERT
#endif
return NULL;
}
}
return NULL;
}
// lookup a method in all the interfaces that this class implements
for (int i = 0; i < num_ifs; i++) {
if (m != NULL) {
return m;
}
}
return NULL;
}
/* jni_id_for_impl for jfieldIds only */
// Retry lookup after we got the lock
// Slow case, allocate new static field identifier
}
return probe;
}
/* jni_id_for for jfieldIds only */
}
return probe;
}
if (inner_class_list == NULL) {
return 0;
}
if (length % inner_class_next_offset == 0) {
return 0;
} else {
}
}
u2 method_index) {
}
}
// Lookup or create a jmethodID.
// This code is called by the VMThread and JavaThreads so the
// locking has to be done very carefully to avoid deadlocks
//
// We use a double-check locking idiom here because this cache is
// performance sensitive. In the normal system, this cache only
// transitions from NULL to non-NULL which is safe because we use
// release_set_methods_jmethod_ids() to advertise the new cache.
// A partially constructed cache should never be seen by a racing
// thread. We also use release_store_ptr() to save a new jmethodID
// in the cache so a partially constructed jmethodID should never be
// seen either. Cache reads of existing jmethodIDs proceed without a
// lock, but cache writes of a new jmethodID requires uniqueness and
// creation of the cache itself requires no leaks so a lock is
// generally acquired in those two cases.
//
// If the RedefineClasses() API has been used, then this cache can
// grow and we'll have transitions from non-NULL to bigger non-NULL.
// Cache creation requires no leaks and we require safety between all
// cache accesses and freeing of the old cache so a lock is generally
// acquired when the RedefineClasses() API has been used.
// the cache already exists
if (!ik_h->idnum_can_increment()) {
// the cache can't grow so we can just get the current values
} else {
// cache can grow so we have to be more careful
if (Threads::number_of_threads() == 0 ||
// we're single threaded or at a safepoint - no locking needed
} else {
}
}
}
// implied else:
// we need to allocate a cache so default length and id values are good
// This function can be called by the VMThread so we have to do all
// things that might block on a safepoint before grabbing the lock.
// Otherwise, we can deadlock with the VMThread or have a cache
// consistency issue. These vars keep track of what we might have
// to free after the lock is dropped.
// may not allocate new_jmeths or use it if we allocate it
// allocate a new cache that might be used
// cache size is stored in element[0], other elements offset by one
}
// allocate a new jmethodID that might be used
// The method passed in is old (but not obsolete), we need to use the current version
} else {
// It is the current version of the method or an obsolete method,
// use the version passed in
}
if (Threads::number_of_threads() == 0 ||
// we're single threaded or at a safepoint - no locking needed
} else {
}
// The lock has been dropped so we can free resources.
// Free up either the old cache or the new cache if we allocated one.
if (to_dealloc_jmeths != NULL) {
}
// free up the new ID since it wasn't needed
if (to_dealloc_id != NULL) {
}
}
return id;
}
// Common code to fetch the jmethodID from the cache or update the
// cache with the new jmethodID. This function should never do anything
// that causes the caller to go to a safepoint or we can deadlock with
// the VMThread or have cache consistency issues.
//
// reacquire the cache - we are locked, single threaded or at a safepoint
// copy any existing entries from the old cache
}
}
} else {
// fetch jmethodID (if any) from the existing cache
}
// No matching jmethodID in the existing cache or we have a new
// cache or we just grew the cache. This cache write is done here
// by the first thread to win the foot race because a jmethodID
// needs to be unique once it is generally available.
// The jmethodID cache can be read while unlocked so we have to
// make sure the new jmethodID is complete before installing it
// in the cache.
} else {
}
return id;
}
// Common code to get the jmethodID cache length and the jmethodID
// value at index idnum if there is one.
//
// cache size is stored in element[0], other elements offset by one
} else {
}
}
// Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles
}
return id;
}
// Cache an itable index
// We use a double-check locking idiom here because this cache is
// performance sensitive. In the normal system, this cache only
// transitions from NULL to non-NULL which is safe because we use
// release_set_methods_cached_itable_indices() to advertise the
// new cache. A partially constructed cache should never be seen
// by a racing thread. Cache reads and writes proceed without a
// lock, but creation of the cache itself requires no leaks so a
// lock is generally acquired in that case.
//
// If the RedefineClasses() API has been used, then this cache can
// grow and we'll have transitions from non-NULL to bigger non-NULL.
// Cache creation requires no leaks and we require safety between all
// cache accesses and freeing of the old cache so a lock is generally
// acquired when the RedefineClasses() API has been used.
// we need a cache or the cache can grow
// reacquire the cache to see if another thread already did the work
// cache size is stored in element[0], other elements offset by one
new_indices[0] = (int)size;
// copy any existing entries
size_t i;
for (i = 0; i < length; i++) {
}
// Set all the rest to -1
}
// We have an old cache to delete so save it for after we
// drop the lock.
}
}
if (idnum_can_increment()) {
// this cache can grow so we have to write to it safely
}
} else {
}
if (!idnum_can_increment()) {
// The cache cannot grow and this JNI itable index value does not
// have to be unique like a jmethodID. If there is a race to set it,
// it doesn't matter.
}
if (to_dealloc_indices != NULL) {
// we allocated a new cache so free the old one
}
}
// Retrieve a cached itable index
// indices exist and are long enough, retrieve possible cached
}
return -1;
}
//
// Walk the list of dependent nmethods searching for nmethods which
// are dependent on the changes that were passed in and mark them for
// deoptimization. Returns the number of nmethods found.
//
int found = 0;
nmethodBucket* b = _dependencies;
while (b != NULL) {
// since dependencies aren't removed until an nmethod becomes a zombie,
// the dependency list may contain nmethods which aren't alive.
if (TraceDependencies) {
nm->print_dependencies();
}
found++;
}
b = b->next();
}
return found;
}
//
// Add an nmethodBucket to the list of dependencies for this nmethod.
// It's possible that an nmethod has multiple dependencies on this klass
// so a count is kept for each bucket to guarantee that creation and
// deletion of dependencies is consistent.
//
nmethodBucket* b = _dependencies;
while (b != NULL) {
if (nm == b->get_nmethod()) {
b->increment();
return;
}
b = b->next();
}
}
//
// Decrement count of the nmethod in the dependency list and remove
// the bucket competely when the count goes to 0. This method must
// find a corresponding bucket otherwise there's a bug in the
// recording of dependecies.
//
nmethodBucket* b = _dependencies;
while (b != NULL) {
if (nm == b->get_nmethod()) {
if (b->decrement() == 0) {
_dependencies = b->next();
} else {
}
delete b;
}
return;
}
last = b;
b = b->next();
}
#ifdef ASSERT
#endif // ASSERT
}
#ifndef PRODUCT
nmethodBucket* b = _dependencies;
int idx = 0;
while (b != NULL) {
if (!verbose) {
} else {
nm->print_dependencies();
}
b = b->next();
}
}
nmethodBucket* b = _dependencies;
while (b != NULL) {
if (nm == b->get_nmethod()) {
return true;
}
b = b->next();
}
return false;
}
#endif //PRODUCT
#ifdef ASSERT
template <class T> void assert_is_in(T *p) {
}
}
template <class T> void assert_is_in_closed_subset(T *p) {
}
}
template <class T> void assert_is_in_reserved(T *p) {
}
}
template <class T> void assert_nothing(T *p) {}
#else
template <class T> void assert_is_in(T *p) {}
template <class T> void assert_is_in_closed_subset(T *p) {}
template <class T> void assert_is_in_reserved(T *p) {}
template <class T> void assert_nothing(T *p) {}
#endif // ASSERT
//
// Macros that iterate over areas of oops which are specialized on type of
// oop pointer either narrow or wide, depending on UseCompressedOops
//
// Parameters are:
// T - type of oop to point to (either oop or narrowOop)
// start_p - starting pointer for region to iterate over
// count - number of oops or narrowOops to iterate over
// do_oop - action to perform on each oop (it's arbitrary C code which
// makes it more efficient to put in a macro rather than making
// it a template function)
// assert_fn - assert function which is template function because performance
// doesn't matter when enabled.
#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
assert_fn) \
{ \
T* p = (T*)(start_p); \
while (p < end) { \
(assert_fn)(p); \
do_oop; \
++p; \
} \
}
assert_fn) \
{ \
while (start < p) { \
--p; \
(assert_fn)(p); \
do_oop; \
} \
}
{ \
T* const l = (T*)(low); \
T* const h = (T*)(high); \
"bounded region must be properly aligned"); \
T* p = (T*)(start_p); \
if (p < l) p = l; \
while (p < end) { \
(assert_fn)(p); \
do_oop; \
++p; \
} \
}
// The following macros call specialized macros, passing either oop or
// narrowOop as the specialization type. These test the UseCompressedOops
// flag.
{ \
/* Compute oopmap block range. The common case \
is nonstatic_oop_map_size == 1. */ \
if (UseCompressedOops) { \
++map; \
} \
} else { \
++map; \
} \
} \
}
{ \
if (UseCompressedOops) { \
--map; \
} \
} else { \
--map; \
} \
} \
}
assert_fn) \
{ \
/* Compute oopmap block range. The common case is \
nonstatic_oop_map_size == 1, so we accept the \
usually non-existent extra overhead of examining \
all the maps. */ \
if (UseCompressedOops) { \
++map; \
} \
} else { \
++map; \
} \
} \
}
obj->follow_header();
obj, \
MarkSweep::mark_and_push(p), \
}
#ifndef SERIALGC
obj, \
}
#endif // SERIALGC
// closure's do_header() method dicates whether the given closure should be
// applied to the klass ptr in the object header.
\
/* header */ \
} \
obj, \
return size_helper(); \
}
#ifndef SERIALGC
\
OopClosureType* closure) { \
/* header */ \
} \
/* instance variables */ \
obj, \
return size_helper(); \
}
#endif // !SERIALGC
\
} \
return size_helper(); \
}
#ifndef SERIALGC
#endif // !SERIALGC
obj, \
MarkSweep::adjust_pointer(p), \
obj->adjust_header();
return size;
}
#ifndef SERIALGC
obj, \
if (PSScavenge::should_scavenge(p)) { \
pm->claim_or_forward_depth(p); \
}, \
}
obj, \
PSParallelCompact::adjust_pointer(p), \
return size_helper();
}
#endif // SERIALGC
// Subklass and sibling links are handled by Klass::follow_weak_klass_links
if (is_interface()) {
if (ClassUnloading) {
// remove this guy
*adr_implementor() = NULL;
}
}
} else {
}
}
}
}
m->clear_all_breakpoints();
}
// Deallocate oop map cache
if (_oop_map_cache != NULL) {
delete _oop_map_cache;
}
// Deallocate JNI identifiers for jfieldIDs
}
}
// release dependencies
nmethodBucket* b = _dependencies;
while (b != NULL) {
delete b;
b = next;
}
// Deallocate breakpoint records
if (breakpoints() != 0x0) {
}
// deallocate information about previous versions
if (_previous_versions != NULL) {
delete pv_node;
}
delete _previous_versions;
}
// deallocate the cached class file
if (_cached_class_file_bytes != NULL) {
}
// Decrement symbol reference counts associated with the unloaded class.
// unreference array name derived from this class name (arrays of an unloaded
// class can't be referenced anymore).
// walk constant pool and decrement symbol reference counts
}
_source_file_name = n;
}
} else {
// Adding one to the attribute length in order to store a null terminator
// character could cause an overflow because the attribute length is
// already coded with an u4 in the classfile, but in practice, it's
// unlikely to happen.
for (int i = 0; i < length; i++) {
}
}
}
return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
}
int src_index = 0;
int dest_index = 0;
while (src_index < src_length) {
}
return dest;
}
// different verisons of is_same_class_package
}
} else {
classloader2 = NULL;
}
}
}
// return true if two classes are in the same package, classloader
// and classname information is enough to determine a class's package
if (class_loader1 != class_loader2) {
return false;
} else if (class_name1 == class_name2) {
return true; // skip painful bytewise comparison
} else {
// The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly
// for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
// Otherwise, we just compare jbyte values between the strings.
// One of the two doesn't have a package. Only return true
// if the other one also doesn't have a package.
return last_slash1 == last_slash2;
} else {
// Skip over '['s
if (*name1 == '[') {
do {
name1++;
} while (*name1 == '[');
if (*name1 != 'L') {
// Something is terribly wrong. Shouldn't be here.
return false;
}
}
if (*name2 == '[') {
do {
name2++;
} while (*name2 == '[');
if (*name2 != 'L') {
// Something is terribly wrong. Shouldn't be here.
return false;
}
}
// Check that package part is identical
}
}
}
// Returns true iff super_method can be overridden by a method in targetclassname
// See JSL 3rd edition 8.4.6.1
// Assumes name-signature match
// "this" is instanceKlass of super_method which must exist
// note that the instanceKlass of the method in the targetclassname has not always been created yet
bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
// Private methods can not be overridden
if (super_method->is_private()) {
return false;
}
// If super method is accessible, then override
if ((super_method->is_protected()) ||
(super_method->is_public())) {
return true;
}
// Package-private methods are not inherited outside of package
}
/* defined for now in jvm.cpp, for historical reasons *--
klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
Symbol*& simple_name_result, TRAPS) {
...
}
*/
// tell if two classes have the same enclosing class (at package level)
// must be in same package before we try anything else
return false;
// As long as there is an outer1.getEnclosingClass,
// shift the search outward.
for (;;) {
// As we walk along, look for equalities between outer1 and class2.
// Eventually, the walks will terminate as outer1 stops
// at the top-level class around the original class.
bool ignore_inner_is_member;
}
// Now do the same for class2.
for (;;) {
bool ignore_inner_is_member;
// Might as well check the new outer against all available values.
}
// If by this point we have not found an equality between the
// two classes, we know they are in separate package members.
return false;
}
klassOop k = as_klassOop();
// But check if it happens to be member class.
// Inner class attribute can be zero, skip it.
// Strange but true: JVM spec. allows null inner class refs.
if (ioff == 0) continue;
// only look at classes that are already loaded
// since we are looking for the flags for our self.
// This is really a member class.
break;
}
}
// Remember to strip ACC_SUPER bit
}
if (is_linked()) {
}
if (is_initialized()) {
}
if (is_in_error_state()) {
}
return result;
}
/ itableOffsetEntry::size();
// If the interface isn't implemented by the receiver class,
// the VM should throw IncompatibleClassChangeError.
if (cnt >= nof_interfaces) {
}
}
if (m == NULL) {
}
return m;
}
// On-stack replacement stuff
// only one compilation can be active
// This is a short non-blocking critical region, so the no safepoint check is ok.
n->set_osr_link(osr_nmethods_head());
// Raise the highest osr level if necessary
if (TieredCompilation) {
}
// Remember to unlock again
OsrList_lock->unlock();
// Get rid of the osr methods for the same bci that have lower levels.
if (TieredCompilation) {
for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
inv->make_not_entrant();
}
}
}
}
// This is a short non-blocking critical region, so the no safepoint check is ok.
// Search for match
if (TieredCompilation) {
// Find max level before n
}
}
if (cur == n) {
// Remove first element
} else {
}
}
n->set_osr_link(NULL);
if (TieredCompilation) {
// Find max level after n
}
}
// Remember to unlock again
OsrList_lock->unlock();
}
nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
// This is a short non-blocking critical region, so the no safepoint check is ok.
// There can be a time when a c1 osr method exists but we are waiting
// for a c2 version. When c2 completes its osr nmethod we will trash
// the c1 version and only be able to find the c2 version. However
// while we overflow in the c1 code at back branches we don't want to
// try and switch to the same code as we are already running
if (match_level) {
// Found a match - return it.
OsrList_lock->unlock();
return osr;
}
} else {
// Found the best possible - return it.
OsrList_lock->unlock();
return osr;
}
}
}
}
}
OsrList_lock->unlock();
return best;
}
return NULL;
}
// -----------------------------------------------------------------------------------------------------
#ifndef PRODUCT
// Printing
} else {
}
}
value->is_typeArray() &&
if (!WizardMode) return; // that is enough
}
}
st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj));
}
}
}
#endif //PRODUCT
if (k != NULL) {
k->print_value_on(st);
} else {
}
}
} else {
}
}
}
return external_name();
}
// Verification
protected:
template <class T> void do_oop_work(T* p) {
if (!obj->is_oop_or_null()) {
guarantee(false, "boom");
}
}
public:
};
}
// JNIid class for jfieldIDs only
// Note to reviewers:
// These JNI functions are just moved over to column 1 and not changed
// in the compressed oops workspace.
debug_only(_is_static_field_id = false;)
}
}
return NULL;
}
}
}
delete current;
}
}
int end_field_offset;
end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
#ifdef ASSERT
if (current->is_static_field_id()) {
}
#endif
}
}
#ifdef ASSERT
: (_init_state < state);
}
#endif
// RedefineClasses() support for previous versions:
// Add an information node that contains weak references to the
// interesting parts of the previous version of the_class.
// This is also where we clean out any unused weak references.
// Note that while we delete nodes from the _previous_versions
// array, we never delete the array itself until the klass is
// unloaded. The has_been_redefined() query depends on that fact.
//
"only VMThread can add previous versions");
if (_previous_versions == NULL) {
// This is the first previous version so make some space.
// Start with 2 elements under the assumption that the class
// won't be redefined much.
}
// RC_TRACE macro has an embedded ResourceMark
// a shared ConstantPool requires a regular reference; a weak
// reference would be collectible
} else {
}
if (emcp_method_count == 0) {
// non-shared ConstantPool gets a weak reference
RC_TRACE(0x00000400,
("add: all methods are obsolete; flushing any EMCP weak refs"));
} else {
int local_count = 0;
for (int i = 0; i < old_methods->length(); i++) {
if (emcp_methods->at(i)) {
// this old method is EMCP so save a weak ref
if (++local_count >= emcp_method_count) {
// no more EMCP methods so bail out now
break;
}
}
}
// non-shared ConstantPool gets a weak reference
}
// Using weak references allows the interesting parts of previous
// classes to be GC'ed when they are no longer needed. Since the
// caller is the VMThread and we are at a safepoint, this is a good
// time to clear out unused weak references.
_previous_versions->length()));
// skip the last entry since we just added it
// check the previous versions array for a GC'ed weak refs
delete pv_node;
// Since we are traversing the array backwards, we don't have to
// do anything special with the index.
continue; // robustness
}
// this entry has been GC'ed so remove it
delete pv_node;
// Since we are traversing the array backwards, we don't have to
// do anything special with the index.
continue;
} else {
}
if (method_refs != NULL) {
method_refs->length()));
if (method_ref == NULL) {
method_refs->remove_at(j);
// Since we are traversing the array backwards, we don't have to
// do anything special with the index.
continue; // robustness
}
// This method entry has been GC'ed or the current
// RedefineClasses() call has made all methods obsolete
// so remove it.
method_refs->remove_at(j);
} else {
// RC_TRACE macro has an embedded ResourceMark
RC_TRACE(0x00000400,
("add: %s(%s): previous method @%d in version @%d is alive",
j, i));
}
}
}
}
if (emcp_method_count != 0 && obsolete_method_count != 0 &&
// We have a mix of obsolete and EMCP methods. If there is more
// than the previous version that we just added, then we have to
// clear out any matching EMCP method entries the hard way.
int local_count = 0;
for (int i = 0; i < old_methods->length(); i++) {
if (!emcp_methods->at(i)) {
// only obsolete methods are interesting
// skip the last entry since we just added it
// check the previous versions array for a GC'ed weak refs
delete pv_node;
// Since we are traversing the array backwards, we don't have to
// do anything special with the index.
continue; // robustness
}
// this entry has been GC'ed so remove it
delete pv_node;
// Since we are traversing the array backwards, we don't have to
// do anything special with the index.
continue;
}
if (method_refs == NULL) {
// We have run into a PreviousVersion generation where
// all methods were made obsolete during that generation's
// RedefineClasses() operation. At the time of that
// operation, all EMCP methods were flushed so we don't
// have to go back any further.
//
// A NULL method_refs is different than an empty method_refs.
// We cannot infer any optimizations about older generations
// from an empty method_refs for the current generation.
break;
}
"weak method ref was unexpectedly cleared");
if (method_ref == NULL) {
method_refs->remove_at(k);
// Since we are traversing the array backwards, we don't
// have to do anything special with the index.
continue; // robustness
}
// this method entry has been GC'ed so skip it
method_refs->remove_at(k);
continue;
}
// The current RedefineClasses() call has made all EMCP
// versions of this method obsolete so mark it as obsolete
// and remove the weak ref.
RC_TRACE(0x00000400,
("add: %s(%s): flush obsolete method @%d in version @%d",
method_refs->remove_at(k);
break;
}
}
// The previous loop may not find a matching EMCP method, but
// that doesn't mean that we can optimize and not go any
// further back in the PreviousVersion generations. The EMCP
// method for this generation could have already been GC'ed,
// but there still may be an older EMCP method that has not
// been GC'ed.
}
if (++local_count >= obsolete_method_count) {
// no more obsolete methods so bail out now
break;
}
}
}
}
} // end add_previous_version()
// Determine if instanceKlass has a previous version.
if (_previous_versions == NULL) {
// no previous versions array so answer is easy
return false;
}
// Check the previous versions array for an info node that hasn't
// been GC'ed
continue; // robustness
}
// we have at least one previous version
return true;
}
// We don't have to check the method refs. If the constant pool has
// been GC'ed then so have the methods.
}
// all of the underlying nodes' info has been GC'ed
return false;
} // end has_previous_version()
}
if (m->method_idnum() == idnum) {
return m;
}
}
}
return m;
}
// Set the annotation at 'idnum' to 'anno'.
// We don't want to create or extend the array if 'anno' is NULL, since that is the
// default value. However, if the array exists and is long enough, we must set NULL values.
// create the array
// copy the existing entries
}
}
} // if no array and idnum isn't included there is nothing to do
}
// Construct a PreviousVersionNode entry for the array hung off
// the instanceKlass.
}
// Destroy a PreviousVersionNode
if (_prev_constant_pool != NULL) {
if (_prev_cp_is_weak) {
} else {
}
}
if (_prev_EMCP_methods != NULL) {
if (method_ref != NULL) {
}
}
delete _prev_EMCP_methods;
}
}
// Construct a PreviousVersionInfo entry
return; // robustness
}
// Weak reference has been GC'ed. Since the constant pool has been
// GC'ed, the methods have also been GC'ed.
return;
}
// make the constantPoolOop safe to return
if (method_refs == NULL) {
// the instanceKlass did not have any EMCP methods
return;
}
for (int i = 0; i < n_methods; i++) {
if (method_ref == NULL) {
continue; // robustness
}
// this entry has been GC'ed so skip it
continue;
}
// make the methodOop safe to return
}
}
// Destroy a PreviousVersionInfo
// Since _prev_EMCP_method_handles is not C-heap allocated, we
// don't have to delete it.
}
// Construct a helper for walking the previous versions array
_current_index = 0;
// _hm needs no initialization
_current_p = NULL;
}
// Destroy a PreviousVersionWalker
// Delete the current info just in case the caller didn't walk to
// the end of the previous versions list. No harm if _current_p is
// already NULL.
delete _current_p;
// When _hm is destroyed, all the Handles returned in
// PreviousVersionInfo objects will be destroyed.
// Also, after this destructor is finished it will be
// safe to delete the GrowableArray allocated in the
// PreviousVersionInfo objects.
}
// Return the interesting information for the next previous version
// of the klass. Returns NULL if there are no more previous versions.
if (_previous_versions == NULL) {
// no previous versions so nothing to return
return NULL;
}
delete _current_p; // cleanup the previous info for the caller
while (_current_index < length) {
delete pv_info;
// The underlying node's info has been GC'ed so try the next one.
// We don't have to check the methods. If the constant pool has
// GC'ed then so have the methods.
continue;
}
// Found a node with non GC'ed info so return it. The caller will
// need to delete pv_info when they are done with it.
return pv_info;
}
// all of the underlying nodes' info has been GC'ed
return NULL;
} // end next_previous_version()