/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif // SERIALGC
#include "memory/allocation.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/reflection.hpp"
#include "runtime/synchronizer.hpp"
#include "services/threadService.hpp"
#include "trace/tracing.hpp"
#include "utilities/dtrace.hpp"
/*
* Implementation of class sun.misc.Unsafe
*/
#ifndef USDT2
#endif /* !USDT2 */
#define MAX_OBJECT_SIZE \
// Can't use UNSAFE_LEAF because it has the signature of a straight
// call into the runtime (just like JVM_LEAF, funny that) but it's
// called like a Java Native and thus the wrapper built for it passes
// arguments like a JNI call. It expects those arguments to be popped
// from the stack on Intel like all good JNI args are, and adjusts the
// stack according. Since the JVM_LEAF call expects no extra
// arguments the stack isn't popped in the C code, is pushed by the
// wrapper and we get sick.
//#define UNSAFE_LEAF(result_type, header) \
// JVM_LEAF(result_type, header)
// This assert fails in a variety of ways on 32-bit systems.
// It is impossible to predict whether native code that converts
// pointers to longs will sign-extend or zero-extend the addresses.
//assert(addr == (uintptr_t)addr, "must not be odd high bits");
}
return (uintptr_t)p;
}
// Note: The VM's obj_field and related accessors use byte-scaled
// ("unscaled") offsets, just as the unsafe methods do.
// However, the method Unsafe.fieldOffset explicitly declines to
// guarantee this. The field offset values manipulated by the Java user
// through the Unsafe API are opaque cookies that just happen to be byte
// offsets. We represent this state of affairs by passing the cookies
// through conversion functions when going between the VM and the Unsafe API.
// The conversion functions just happen to be no-ops at present.
return field_offset;
}
return byte_offset;
}
return slot;
}
return key;
}
// Don't allow unsafe to be used to read or write the header word of oops
#ifdef ASSERT
if (p != NULL) {
"raw [ptr+disp] must be consistent with oop::field_base");
}
assert(byte_offset < p_size, err_msg("Unsafe access: offset " INT64_FORMAT " > object's size " INT64_FORMAT, byte_offset, p_size));
}
#endif
if (sizeof(char*) == sizeof(jint)) // (this constant folds!)
else
return (address)p + byte_offset;
}
// Externally callable versions:
// (Use these in compiler intrinsics which emulate unsafe primitives.)
return field_offset;
}
return byte_offset;
}
return invocation_key_from_method_slot(slot);
}
return invocation_key_to_method_slot(key);
}
///// Data in the Java heap.
volatile type_name v = OrderAccess::load_acquire((volatile type_name*)index_oop_from_field_offset_long(p, offset));
OrderAccess::release_store_fence((volatile type_name*)index_oop_from_field_offset_long(p, offset), x);
// Macros for oops that check UseCompressedOops
oop v; \
if (UseCompressedOops) { \
v = oopDesc::decode_heap_oop(n); \
} else { \
}
// The xxx140 variants for backward compatibility do not allow a full-width offset.
UnsafeWrapper("Unsafe_GetObject");
#ifndef SERIALGC
// We could be accessing the referent field in a reference
// object. If G1 is enabled then we need to register a non-null
// referent with the SATB barrier.
if (UseG1GC) {
bool needs_barrier = false;
needs_barrier = true;
}
}
}
if (needs_barrier) {
}
}
#endif // SERIALGC
return ret;
UNSAFE_ENTRY(void, Unsafe_SetObject140(JNIEnv *env, jobject unsafe, jobject obj, jint offset, jobject x_h))
UnsafeWrapper("Unsafe_SetObject");
//SET_FIELD(obj, offset, oop, x);
if (UseCompressedOops) {
if (x != NULL) {
// If there is a heap base pointer, we are obliged to emit a store barrier.
} else {
}
} else {
if (x != NULL) {
// If there is a heap base pointer, we are obliged to emit a store barrier.
} else {
}
}
// The normal variants allow a null base pointer with an arbitrary address.
// But if the base pointer is non-null, the offset should make some sense.
// That is, it should be in the range [0, MAX_OBJECT_SIZE].
UnsafeWrapper("Unsafe_GetObject");
#ifndef SERIALGC
// We could be accessing the referent field in a reference
// object. If G1 is enabled then we need to register non-null
// referent with the SATB barrier.
if (UseG1GC) {
bool needs_barrier = false;
needs_barrier = true;
}
}
}
if (needs_barrier) {
}
}
#endif // SERIALGC
return ret;
UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
UnsafeWrapper("Unsafe_SetObject");
if (UseCompressedOops) {
} else {
}
UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset))
UnsafeWrapper("Unsafe_GetObjectVolatile");
volatile oop v;
if (UseCompressedOops) {
v = oopDesc::decode_heap_oop(n);
} else {
}
OrderAccess::acquire();
UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
UnsafeWrapper("Unsafe_SetObjectVolatile");
OrderAccess::release();
if (UseCompressedOops) {
} else {
}
OrderAccess::fence();
// Sparc and X86 have atomic jlong (8 bytes) instructions
#else
// Keep old code for platforms which may not have atomic jlong (8 bytes) instructions
// Volatile long versions must use locks if !VM_Version::supports_cx8().
// support_cx8 is a surrogate for 'supports atomic long memory ops'.
UnsafeWrapper("Unsafe_GetLongVolatile");
{
if (VM_Version::supports_cx8()) {
return v;
}
else {
return value;
}
}
UNSAFE_ENTRY(void, Unsafe_SetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x))
UnsafeWrapper("Unsafe_SetLongVolatile");
{
if (VM_Version::supports_cx8()) {
}
else {
*addr = x;
}
}
#endif // not SPARC and not X86
\
UNSAFE_ENTRY(jboolean, Unsafe_Get##Boolean##140(JNIEnv *env, jobject unsafe, jobject obj, jint offset)) \
return v; \
\
UNSAFE_ENTRY(void, Unsafe_Set##Boolean##140(JNIEnv *env, jobject unsafe, jobject obj, jint offset, jboolean x)) \
\
UNSAFE_ENTRY(jboolean, Unsafe_Get##Boolean(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) \
return v; \
\
UNSAFE_ENTRY(void, Unsafe_Set##Boolean(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jboolean x)) \
\
// END DEFINE_GETSETOOP.
\
UNSAFE_ENTRY(jboolean, Unsafe_Get##Boolean##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) \
return v; \
\
UNSAFE_ENTRY(void, Unsafe_Set##Boolean##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jboolean x)) \
\
// END DEFINE_GETSETOOP_VOLATILE.
// Sparc and X86 have atomic jlong (8 bytes) instructions
#endif
// The non-intrinsified versions of setOrdered just use setVolatile
UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x))
UnsafeWrapper("Unsafe_SetOrderedInt");
UNSAFE_ENTRY(void, Unsafe_SetOrderedObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
UnsafeWrapper("Unsafe_SetOrderedObject");
OrderAccess::release();
if (UseCompressedOops) {
} else {
}
OrderAccess::fence();
UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x))
UnsafeWrapper("Unsafe_SetOrderedLong");
// Sparc and X86 have atomic jlong (8 bytes) instructions
#else
// Keep old code for platforms which may not have atomic long (8 bytes) instructions
{
if (VM_Version::supports_cx8()) {
}
else {
*addr = x;
}
}
#endif
////// Data in the C heap.
// Note: These do not throw NullPointerException for bad pointers.
// They just crash. Only a oop base pointer can generate a NullPointerException.
//
\
void* p = addr_from_java(addr); \
t->set_doing_unsafe_access(true); \
java_type x = *(volatile native_type*)p; \
t->set_doing_unsafe_access(false); \
return x; \
\
t->set_doing_unsafe_access(true); \
void* p = addr_from_java(addr); \
*(volatile native_type*)p = x; \
t->set_doing_unsafe_access(false); \
\
// END DEFINE_GETSETNATIVE.
// no long -- handled specially
UnsafeWrapper("Unsafe_GetNativeLong");
// We do it this way to avoid problems with access to heap using 64
// bit loads, as jlong in heap could be not 64-bit aligned, and on
// some CPUs (SPARC) it leads to SIGBUS.
t->set_doing_unsafe_access(true);
void* p = addr_from_java(addr);
jlong x;
if (((intptr_t)p & 7) == 0) {
// jlong is aligned, do a volatile access
x = *(volatile jlong*)p;
} else {
x = acc.long_value;
}
t->set_doing_unsafe_access(false);
return x;
UnsafeWrapper("Unsafe_SetNativeLong");
// see comment for Unsafe_GetNativeLong
t->set_doing_unsafe_access(true);
void* p = addr_from_java(addr);
if (((intptr_t)p & 7) == 0) {
// jlong is aligned, do a volatile access
*(volatile jlong*)p = x;
} else {
acc.long_value = x;
}
t->set_doing_unsafe_access(false);
UnsafeWrapper("Unsafe_GetNativeAddress");
void* p = addr_from_java(addr);
return addr_to_java(*(void**)p);
UnsafeWrapper("Unsafe_SetNativeAddress");
void* p = addr_from_java(addr);
*(void**)p = addr_from_java(x);
////// Allocation requests
UnsafeWrapper("Unsafe_AllocateInstance");
{
}
UnsafeWrapper("Unsafe_AllocateMemory");
}
if (sz == 0) {
return 0;
}
if (x == NULL) {
}
//Copy::fill_to_words((HeapWord*)x, sz / HeapWordSize);
return addr_to_java(x);
UnsafeWrapper("Unsafe_ReallocateMemory");
void* p = addr_from_java(addr);
}
if (sz == 0) {
return 0;
}
if (x == NULL) {
}
return addr_to_java(x);
UnsafeWrapper("Unsafe_FreeMemory");
void* p = addr_from_java(addr);
if (p == NULL) {
return;
}
UNSAFE_ENTRY(void, Unsafe_SetMemory(JNIEnv *env, jobject unsafe, jlong addr, jlong size, jbyte value))
UnsafeWrapper("Unsafe_SetMemory");
}
char* p = (char*) addr_from_java(addr);
UNSAFE_ENTRY(void, Unsafe_SetMemory2(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong size, jbyte value))
UnsafeWrapper("Unsafe_SetMemory");
}
UNSAFE_ENTRY(void, Unsafe_CopyMemory(JNIEnv *env, jobject unsafe, jlong srcAddr, jlong dstAddr, jlong size))
UnsafeWrapper("Unsafe_CopyMemory");
if (size == 0) {
return;
}
}
UNSAFE_ENTRY(void, Unsafe_CopyMemory2(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size))
UnsafeWrapper("Unsafe_CopyMemory");
if (size == 0) {
return;
}
}
// NYI: This works only for non-oop arrays at present.
// Generalizing it would be reasonable, but requires card marking.
// Also, autoboxing a Long from 0L in copyMemory(x,y, 0L,z, n) would be bad.
}
////// Random queries
// See comment at file start about UNSAFE_LEAF
//UNSAFE_LEAF(jint, Unsafe_AddressSize())
UnsafeWrapper("Unsafe_AddressSize");
return sizeof(void*);
// See comment at file start about UNSAFE_LEAF
//UNSAFE_LEAF(jint, Unsafe_PageSize())
UnsafeWrapper("Unsafe_PageSize");
return os::vm_page_size();
}
if (must_be_static >= 0) {
if (must_be_static != really_is_static) {
}
}
return field_offset_from_byte_offset(offset);
}
UnsafeWrapper("Unsafe_ObjectFieldOffset");
UnsafeWrapper("Unsafe_StaticFieldOffset");
UnsafeWrapper("Unsafe_StaticFieldBase");
// Note: In this VM implementation, a field address is always a short
// offset from the base of a a klass metaobject. Thus, the full dynamic
// range of the return type is never used. However, some implementations
// might put the static field inside an array shared by many classes,
// or even at a fixed address, in which case the address could be quite
// large. In that last case, this function would return NULL, since
// the address would operate alone, without any base pointer.
if ((modifiers & JVM_ACC_STATIC) == 0) {
}
//@deprecated
UnsafeWrapper("Unsafe_FieldOffset");
// tries (but fails) to be polymorphic between static and non-static:
//@deprecated
UnsafeWrapper("Unsafe_StaticFieldBase");
}
UnsafeWrapper("Unsafe_EnsureClassInitialized");
}
k->initialize(CHECK);
}
}
UnsafeWrapper("Unsafe_ShouldBeInitialized");
}
return true;
}
return false;
}
}
} else if (k->klass_part()->oop_is_objArray()) {
scale = heapOopSize;
} else if (k->klass_part()->oop_is_typeArray()) {
assert(base == arrayOopDesc::base_offset_in_bytes(tak->element_type()), "array_header_size semantics ok");
} else {
}
}
UnsafeWrapper("Unsafe_ArrayBaseOffset");
return field_offset_from_byte_offset(base);
UnsafeWrapper("Unsafe_ArrayIndexScale");
// This VM packs both fields and array elements down to the byte.
// But watch out: If this changes, so that array references for
// a given primitive type (say, T_BOOLEAN) use different memory units
// than fields, this method MUST return zero for such arrays.
// For example, the VM used to store sub-word sized fields in full
// words in the object layout, so that accessors like getByte(Object,int)
// did not really do what one might expect for arrays. Therefore,
// this function used to report a zero scale factor, so that the user
// would know not to attempt to access sub-word array elements.
// // Code for unpacked fields:
// if (scale < wordSize) return 0;
// The following allows for a pretty general fieldOffset cookie scheme,
// but requires it to be linear in byte offset.
char buf[100];
}
static jclass Unsafe_DefineClass(JNIEnv *env, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd) {
{
// Code lifted from JDK 1.3 ClassLoader.c
char *utfName;
char buf[128];
if (UsePerfData) {
}
return 0;
}
/* Work around 4153825. malloc crashes on Solaris when passed a
* negative size.
*/
if (length < 0) {
return 0;
}
if (body == 0) {
return 0;
}
if (env->ExceptionOccurred())
goto free_body;
goto free_body;
}
} else {
}
//VerifyFixClassname(utfName);
}
} else {
}
return result;
}
}
UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length))
UnsafeWrapper("Unsafe_DefineClass");
{
int depthFromDefineClass0 = 1;
}
UNSAFE_ENTRY(jclass, Unsafe_DefineClass1(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd))
UnsafeWrapper("Unsafe_DefineClass");
{
}
// define a class but do not make it known to the class loader or system dictionary
// - host_class: supplies context for linkage, access control, protection domain, and class loader
// - data: bytes of a class file, a raw memory address (length gives the number of bytes)
// - cp_patches: where non-null entries exist, they replace corresponding CP entries in data
// When you load an anonymous class U, it works as if you changed its name just before loading,
// to a name that you will never use again. Since the name is lost, no other class can directly
// link to any member of U. Just after U is loaded, the only way to use it is reflectively,
// through java.lang.Class methods like Class.newInstance.
// Access checks for linkage sites within U continue to follow the same rules as for named classes.
// The package of an anonymous class is given by the package qualifier on the name under which it was loaded.
// An anonymous class also has special privileges to access any member of its host class.
// This is the main reason why this loading operation is unsafe. The purpose of this is to
// allow language implementations to simulate "open classes"; a host class in effect gets
// new code when an anonymous class is loaded alongside it. A less convenient but more
// standard way to do this is with reflection, which can also be set to ignore access
// restrictions.
// Access into an anonymous class is possible only through reflection. Therefore, there
// are no special access rules for calling into an anonymous class. The relaxed access
// rule for the host class is applied in the opposite direction: A host class reflectively
// access one of its anonymous classes.
// If you load the same bytecodes twice, you get two different classes. You can reload
// the same bytecodes with or without varying CP patches.
// By using the CP patching array, you can have a new anonymous class U2 refer to an older one U1.
// The bytecodes for U2 should refer to U1 by a symbolic name (doesn't matter what the name is).
// The CONSTANT_Class entry for that name can be patched to refer directly to U1.
// This allows, for example, U2 to use U1 as a superclass or super-interface, or as
// an outer class (so that U2 is an anonymous inner class of anonymous U1).
// It is not possible for a named class, or an older anonymous class, to refer by
// name (via its CP) to a newer anonymous class.
// CP patching may also be used to modify (i.e., hack) the names of methods, classes,
// or type descriptors used in the loaded anonymous class.
// Finally, CP patching may be used to introduce "live" objects into the constant pool,
// instead of "dead" strings. A compiled statement like println((Object)"hello") can
// be changed to println(greeting), where greeting is an arbitrary object created before
// the anonymous class is loaded. This is useful in dynamic languages, in which
// various kinds of metaobjects must be introduced as constants into bytecode.
// Note the cast (Object), which tells the verifier to expect an arbitrary object,
// not just a literal string. For such ldc instructions, the verifier uses the
// type Object instead of String, if the loaded constant is not in fact a String.
static oop
TRAPS) {
if (UsePerfData) {
}
}
}
// caller responsible to free it:
(*temp_alloc) = body;
{
}
int class_bytes_length = (int) length;
if (class_bytes_length < 0) class_bytes_length = 0;
if (class_bytes == NULL
|| host_class == NULL
|| length != class_bytes_length)
if (cp_patches_jh != NULL) {
if (!p->is_objArray())
}
KlassHandle host_klass(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(host_class)));
if (cp_patches_h.not_null()) {
for (int i = alen-1; i >= 0; i--) {
if (p != NULL) {
if (cp_patches == NULL)
}
}
}
{
}
// let caller initialize it as needed...
return anon_klass->java_mirror();
}
UNSAFE_ENTRY(jclass, Unsafe_DefineAnonymousClass(JNIEnv *env, jobject unsafe, jclass host_class, jbyteArray data, jobjectArray cp_patches_jh))
{
UnsafeWrapper("Unsafe_DefineAnonymousClass");
&temp_alloc, THREAD);
}
if (temp_alloc != NULL) {
}
}
UnsafeWrapper("Unsafe_MonitorEnter");
{
}
}
UnsafeWrapper("Unsafe_TryMonitorEnter");
{
}
}
UnsafeWrapper("Unsafe_MonitorExit");
{
}
}
UnsafeWrapper("Unsafe_ThrowException");
{
}
// JSR166 ------------------------------------------------------------------
UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h))
UnsafeWrapper("Unsafe_CompareAndSwapObject");
if (UseCompressedOops) {
} else {
}
if (success)
update_barrier_set((void*)addr, x);
return success;
UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x))
UnsafeWrapper("Unsafe_CompareAndSwapInt");
UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x))
UnsafeWrapper("Unsafe_CompareAndSwapLong");
if (VM_Version::supports_cx8())
else {
return success;
}
UnsafeWrapper("Unsafe_Park");
#ifndef USDT2
#else /* USDT2 */
#endif /* USDT2 */
#ifndef USDT2
#else /* USDT2 */
#endif /* USDT2 */
if (event.should_commit()) {
}
UnsafeWrapper("Unsafe_Unpark");
if (java_thread != NULL) {
if (lp != 0) {
// This cast is OK even though the jlong might have been read
// non-atomically on 32bit systems, since there, one word will
// always be zero anyway and the value set is always the same
} else {
// Grab lock if apparently null or using older version of library
if (java_thread != NULL) {
if (p != NULL) { // Bind to Java thread for next time.
}
}
}
}
}
}
if (p != NULL) {
#ifndef USDT2
#else /* USDT2 */
(uintptr_t) p);
#endif /* USDT2 */
p->unpark();
}
UnsafeWrapper("Unsafe_Loadavg");
const int max_nelem = 3;
return -1;
}
// if successful, ret is the number of samples actually retrieved.
switch(ret) {
}
return ret;
UnsafeWrapper("Unsafe_PrefetchRead");
void* addr = index_oop_from_field_offset_long(p, 0);
UnsafeWrapper("Unsafe_PrefetchWrite");
void* addr = index_oop_from_field_offset_long(p, 0);
/// JVM_RegisterUnsafeMethods
#define ADR "J"
#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
// define deprecated accessors for compabitility with 1.4.0
#define DECLARE_GETSETOOP_140(Boolean, Z) \
// Note: In 1.4.1, getObject and kin take both int and long offsets.
#define DECLARE_GETSETOOP_141(Boolean, Z) \
// Note: In 1.5.0, there are volatile versions too
#define DECLARE_GETSETOOP(Boolean, Z) \
#define DECLARE_GETSETNATIVE(Byte, B) \
// %%% These are temporarily supported until the SDK sources
// contain the necessarily updated Unsafe.java.
static JNINativeMethod methods_140[] = {
DECLARE_GETSETOOP_140(Int, I),
DECLARE_GETSETNATIVE(Byte, B),
DECLARE_GETSETNATIVE(Char, C),
DECLARE_GETSETNATIVE(Int, I),
DECLARE_GETSETNATIVE(Long, J),
// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
};
// These are the old methods prior to the JSR 166 changes in 1.5.0
static JNINativeMethod methods_141[] = {
DECLARE_GETSETOOP_141(Int, I),
DECLARE_GETSETNATIVE(Byte, B),
DECLARE_GETSETNATIVE(Char, C),
DECLARE_GETSETNATIVE(Int, I),
DECLARE_GETSETNATIVE(Long, J),
// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
};
// These are the old methods prior to the JSR 166 changes in 1.6.0
static JNINativeMethod methods_15[] = {
DECLARE_GETSETOOP(Boolean, Z),
DECLARE_GETSETOOP(Byte, B),
DECLARE_GETSETOOP(Short, S),
DECLARE_GETSETOOP(Char, C),
DECLARE_GETSETOOP(Int, I),
DECLARE_GETSETOOP(Long, J),
DECLARE_GETSETOOP(Float, F),
DECLARE_GETSETOOP(Double, D),
DECLARE_GETSETNATIVE(Byte, B),
DECLARE_GETSETNATIVE(Char, C),
DECLARE_GETSETNATIVE(Int, I),
DECLARE_GETSETNATIVE(Long, J),
// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
};
// These are the correct methods, moving forward:
static JNINativeMethod methods[] = {
DECLARE_GETSETOOP(Boolean, Z),
DECLARE_GETSETOOP(Byte, B),
DECLARE_GETSETOOP(Short, S),
DECLARE_GETSETOOP(Char, C),
DECLARE_GETSETOOP(Int, I),
DECLARE_GETSETOOP(Long, J),
DECLARE_GETSETOOP(Float, F),
DECLARE_GETSETOOP(Double, D),
DECLARE_GETSETNATIVE(Byte, B),
DECLARE_GETSETNATIVE(Char, C),
DECLARE_GETSETNATIVE(Int, I),
DECLARE_GETSETNATIVE(Long, J),
// {CC"setMemory", CC"("ADR"JB)V", FN_PTR(Unsafe_SetMemory)},
// {CC"copyMemory", CC"("ADR ADR"J)V", FN_PTR(Unsafe_CopyMemory)},
// {CC"getLoadAverage", CC"([DI)I", FN_PTR(Unsafe_Loadavg)},
// {CC"prefetchRead", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchRead)},
// {CC"prefetchWrite", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)}
// {CC"prefetchReadStatic", CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchRead)},
// {CC"prefetchWriteStatic",CC"("OBJ"J)V", FN_PTR(Unsafe_PrefetchWrite)}
};
};
};
};
};
JNINativeMethod anonk_methods[] = {
};
JNINativeMethod lform_methods[] = {
};
// This one function is exported, used by NativeLookup.
// The Unsafe_xxx functions above are called only from the interpreter.
// The optimizer looks at names and signatures to recognize
// individual functions.
UnsafeWrapper("JVM_RegisterUnsafeMethods");
{
{
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
}
}
{
env->RegisterNatives(unsafecls, prefetch_methods, sizeof(prefetch_methods)/sizeof(JNINativeMethod));
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
}
}
{
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
env->RegisterNatives(unsafecls, memcopy_methods_15, sizeof(memcopy_methods_15)/sizeof(JNINativeMethod));
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
}
}
}
if (EnableInvokeDynamic) {
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
}
}
if (EnableInvokeDynamic) {
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
}
}
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
// %%% For now, be backward compatible with an older class:
}
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
// %%% For now, be backward compatible with an older class:
}
if (env->ExceptionOccurred()) {
}
env->ExceptionClear();
// %%% For now, be backward compatible with an older class:
}
}