/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
#define SHARE_VM_OOPS_OOP_INLINE_HPP
#include "gc_implementation/shared/ageTable.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/barrierSet.inline.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/compactingPermGenGen.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/generation.hpp"
#include "memory/permGen.hpp"
#include "memory/specialized_oop_closures.hpp"
#include "oops/arrayKlass.hpp"
#include "oops/arrayOop.hpp"
#include "oops/klassOop.hpp"
#include "oops/markOop.inline.hpp"
#include "runtime/atomic.hpp"
#ifdef TARGET_ARCH_x86
# include "bytes_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "bytes_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "bytes_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "bytes_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "bytes_ppc.hpp"
#endif
// Implementation of all inlined member functions defined in oop.hpp
// We need a separate file to avoid circular references
}
}
if (UseCompressedOops) {
} else {
}
}
// can be NULL in CMS
if (UseCompressedOops) {
} else {
}
}
if (UseCompressedOops) {
} else {
}
}
inline int oopDesc::klass_gap_offset_in_bytes() {
}
// Only used internally and with CMS and will not work with
// UseCompressedOops
}
}
// since klasses are promoted no store check is needed
if (UseCompressedOops) {
} else {
}
}
return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
}
inline void oopDesc::set_klass_gap(int v) {
if (UseCompressedOops) {
*(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
}
}
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.
if (UseCompressedOops) {
} else {
}
}
inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); }
template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); }
inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
// Functions for getting and setting oops within instance objects.
// If the oops are compressed, the type passed to these overloaded functions
// is narrowOop. All functions are overloaded so they can be called by
// template functions without conditionals (the compiler instantiates via
// the right type and inlines the appopriate code).
// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
// offset from the heap base. Saving the check for null can save instructions
// in inner GC loops so these are separated.
}
}
}
return result;
}
return result;
}
}
}
// Load an oop out of the Java heap as is without decoding.
// Called by GC to check for null before decoding.
// Load and decode an oop out of the Java heap into a wide oop.
return decode_heap_oop_not_null(*p);
}
// Load and decode an oop out of the heap accepting null
return decode_heap_oop(*p);
}
// Store already encoded heap oop into the heap.
// Encode and store a heap oop.
*p = encode_heap_oop_not_null(v);
}
// Encode and store a heap oop allowing for null.
*p = encode_heap_oop(v);
}
// Store heap oop as is for volatile fields.
OrderAccess::release_store_ptr(p, v);
}
narrowOop v) {
OrderAccess::release_store(p, v);
}
inline void oopDesc::release_encode_store_heap_oop_not_null(
// heap oop is not pointer sized.
}
inline void oopDesc::release_encode_store_heap_oop_not_null(
OrderAccess::release_store_ptr(p, v);
}
oop v) {
OrderAccess::release_store_ptr(p, v);
}
inline void oopDesc::release_encode_store_heap_oop(
}
// These functions are only used to exchange oop fields in instances,
// not headers.
if (UseCompressedOops) {
// encode exchange value from oop to T
// decode old from T to oop
return decode_heap_oop(old);
} else {
}
}
oop compare_value) {
if (UseCompressedOops) {
// encode exchange and compare value from oop to T
// decode old from T to oop
return decode_heap_oop(old);
} else {
}
}
// In order to put or get a field out of an instance, must first check
// if the field has been compressed and uncompress it.
return UseCompressedOops ?
}
OrderAccess::acquire();
return value;
}
}
}
OrderAccess::release();
OrderAccess::fence();
}
inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; }
inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); }
inline void oopDesc::bool_field_put(int offset, jboolean contents) { *bool_field_addr(offset) = (jint) contents; }
inline void oopDesc::char_field_put(int offset, jchar contents) { *char_field_addr(offset) = (jint) contents; }
inline void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; }
inline void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;}
inline void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; }
inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; }
inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
return UseCompressedOops ?
: decode_heap_oop((oop)
}
}
inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); }
inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), contents); }
inline jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); }
inline void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); }
inline jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); }
inline void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); }
inline jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
inline void oopDesc::release_short_field_put(int offset, jshort contents) { OrderAccess::release_store(short_field_addr(offset), contents); }
inline jlong oopDesc::long_field_acquire(int offset) const { return OrderAccess::load_acquire(long_field_addr(offset)); }
inline void oopDesc::release_long_field_put(int offset, jlong contents) { OrderAccess::release_store(long_field_addr(offset), contents); }
inline jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); }
inline void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); }
inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
inline address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
int s;
// lh is now a value computed at class initialization that may hint
// at the size. For instances, this is positive and equal to the
// size. For arrays, this is negative and provides log2 of the
// array element size. For other oops, it is zero and thus requires
// a virtual call.
//
// We go to all this trouble because the size computation is at the
// heart of phase 2 of mark-compaction, and called for every object,
// alive or dead. So the speed here is equal in importance to the
// speed of allocation.
} else {
}
// The most common case is instances; fall through if so.
// Second most common case is arrays. We have to fetch the
// length of the array, shift (multiply) it appropriately,
// up to wordSize, add the header, and align to object size.
#ifdef _M_IA64
// The Windows Itanium Aug 2002 SDK hoists this load above
// the check for s < 0. An oop at the end of the heap will
// cause an access violation if this load is performed on a non
// array oop. Making the reference volatile prohibits this.
// (%%% please explain by what magic the length is actually fetched!)
volatile int *array_length;
array_length = (volatile int *)( (intptr_t)this +
// Put into size_t to avoid overflow.
#else
#endif
// This code could be simplified, but by keeping array_header_in_bytes
// in units of bytes and doing it this way we can round up just once,
// skipping the intermediate round to HeapWordSize. Cast the result
// of round_to to size_t to guarantee unsigned division == right shift.
// UseParNewGC, UseParallelGC and UseG1GC can change the length field
// of an "old copy" of an object array in the young gen so it indicates
// the grey portion of an already copied array. This will cause the first
// disjunct below to fail if the two comparands are computed across such
// a concurrent change.
// UseParNewGC also runs with promotion labs (which look like int
// filler arrays) which are subject to changing their declared size
// when finally retiring a PLAB; this also can cause the first disjunct
// to fail for another worker thread that is concurrently walking the block
// offset table. Both these invariant failures are benign for their
// current uses; we relax the assertion checking to cover these two cases below:
// is_objArray() && is_forwarded() // covers first scenario above
// || is_typeArray() // covers second scenario above
// technique, we will need to suitably modify the assertion.
((is_typeArray() && UseParNewGC) ||
"wrong array object size");
} else {
// Must be zero, so bite the bullet and take the virtual call.
}
}
assert(s > 0, "Bad size calculated");
return s;
}
return size_given_klass(blueprint());
}
inline bool oopDesc::is_parsable() {
return blueprint()->oop_is_parsable(this);
}
inline bool oopDesc::is_conc_safe() {
return blueprint()->oop_is_conc_safe(this);
}
inline void update_barrier_set(void* p, oop v) {
}
template <class T> inline void update_barrier_set_pre(T* p, oop v) {
}
if (always_do_update_barrier) {
oop_store((volatile T*)p, v);
} else {
update_barrier_set_pre(p, v);
oopDesc::encode_store_heap_oop(p, v);
update_barrier_set((void*)p, v); // cast away type
}
}
update_barrier_set_pre((T*)p, v); // cast away volatile
// Used by release_obj_field_put, so use release_store_ptr.
oopDesc::release_encode_store_heap_oop(p, v);
update_barrier_set((void*)p, v); // cast away type
}
template <class T> inline void oop_store_without_check(T* p, oop v) {
// XXX YSR FIX ME!!!
if (always_do_update_barrier) {
oop_store(p, v);
} else {
"oop store without store check failed");
oopDesc::encode_store_heap_oop(p, v);
}
}
// When it absolutely has to get there.
template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
// XXX YSR FIX ME!!!
if (always_do_update_barrier) {
oop_store(p, v);
} else {
"oop store without store check failed");
oopDesc::release_encode_store_heap_oop(p, v);
}
}
// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
// (without having to remember the function name this calls).
if (UseCompressedOops) {
} else {
}
}
// Used only for markSweep, scavenging
inline bool oopDesc::is_gc_marked() const {
}
}
inline bool oopDesc::is_unlocked() const {
return mark()->is_unlocked();
}
inline bool oopDesc::has_bias_pattern() const {
return mark()->has_bias_pattern();
}
// used only for asserts
if (!check_obj_alignment(obj)) return false;
// obj is aligned and accessible in heap
// try to find metaclass cycle safely without seg faulting on bad input
// we should reach klassKlassObj by following klass link at most 3 times
for (int i = 0; i < 3; i++) {
// klass should be aligned and in permspace
if (!check_obj_alignment(obj)) return false;
}
// During a dump, the _klassKlassObj moved to a shared space.
return true;
}
return false;
}
// Header verification: the mark is typically non-NULL. If we're
// at a safepoint, it must not be null.
// Outside of a safepoint, the header could be changing (for example,
// another thread could be inflating a lock on this object).
if (ignore_mark_word) {
return true;
}
return true;
}
return !SafepointSynchronize::is_at_safepoint();
}
// used only for asserts
}
#ifndef PRODUCT
// used only for asserts
inline bool oopDesc::is_unlocked_oop() const {
return mark()->is_unlocked();
}
#endif // PRODUCT
inline void oopDesc::follow_header() {
if (UseCompressedOops) {
} else {
}
}
inline void oopDesc::follow_contents(void) {
blueprint()->oop_follow_contents(this);
}
// Used by scavengers
inline bool oopDesc::is_forwarded() const {
// The extra heap check is needed since the obj might be locked, in which case the
// mark would point to a stack location and have the sentinel bit cleared
}
// Used by scavengers
"forwarding to something not aligned");
"forwarding to something not in heap");
set_mark(m);
}
// Used by parallel scavengers
"forwarding to something not aligned");
"forwarding to something not in heap");
}
// Note that the forwardee is not the same thing as the displaced_mark.
// The forwardee is used when copying during scavenge and mark-sweep.
// It does need to clear the low two locking- and GC-related bits.
}
inline bool oopDesc::has_displaced_mark() const {
return mark()->has_displaced_mark_helper();
}
return mark()->displaced_mark_helper();
}
mark()->set_displaced_mark_helper(m);
}
// The following method needs to be MT safe.
if (has_displaced_mark()) {
return displaced_mark()->age();
} else {
}
}
if (has_displaced_mark()) {
} else {
}
}
// Fast case; if the object is unlocked and the hash value is set, no locking is needed
// Note: The mark must be read into local variable to avoid concurrent updates.
} else {
return slow_identity_hash();
}
}
if (UseCompressedOops) {
} else {
}
}
if (UseCompressedOops) {
}
} else {
}
}
inline int oopDesc::adjust_pointers() {
int s = blueprint()->oop_adjust_pointers(this);
return s;
}
inline void oopDesc::adjust_header() {
if (UseCompressedOops) {
} else {
}
}
\
} \
\
}
#ifndef SERIALGC
\
}
#endif // !SERIALGC
return CompactingPermGenGen::is_shared(this);
}
inline bool oopDesc::is_shared_readonly() const {
return CompactingPermGenGen::is_shared_readonly(this);
}
inline bool oopDesc::is_shared_readwrite() const {
return CompactingPermGenGen::is_shared_readwrite(this);
}
#endif // SHARE_VM_OOPS_OOP_INLINE_HPP