/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/bytecodes.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/rewriter.hpp"
#include "memory/gcLocker.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/generateOopMap.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodComparator.hpp"
#include "prims/methodHandles.hpp"
// Computes a CPC map (new_index -> original_index) for constant pool entries
// that are referred to by the interpreter at runtime via the constant pool cache.
// Also computes a CP map (original_index -> new_index).
// Marks entries in CP which require additional processing.
bool saw_mh_symbol = false;
for (int i = 0; i < length; i++) {
switch (tag) {
case JVM_CONSTANT_Fieldref : // fall through
case JVM_CONSTANT_Methodref : // fall through
case JVM_CONSTANT_MethodHandle : // fall through
case JVM_CONSTANT_MethodType : // fall through
case JVM_CONSTANT_InvokeDynamic : // fall through
break;
case JVM_CONSTANT_Utf8:
saw_mh_symbol = true;
break;
}
}
"all cp cache indexes fit in a u2");
if (saw_mh_symbol)
}
// Unrewrite the bytecodes if an error occurs.
for (int i = len-1; i >= 0; i--) {
scan_method(method, true);
}
}
// Creates a constant pool cache given a CPC map
}
// The new finalization semantics says that registration of
// finalizable objects must be performed on successful return from the
// Object.<init> constructor. We could implement this trivially if
// <init> were never rewritten but since JVMTI allows this to occur, a
// more complicated solution is required. A special return bytecode
// is used only by Object.<init> to signal the finalization
// registration point. Additionally local 0 must be preserved so it's
// available to pass to the registration function. For simplicty we
// require that local 0 is never overwritten so it's available as an
// argument for registration.
while (!bcs.is_last_bytecode()) {
switch (opcode) {
// fall through
"can't overwrite local 0 in Object.<init>");
break;
}
}
}
// Rewrite a classfile-order CP index into a native-order CPC index.
if (!reverse) {
if (!_method_handle_invokers.is_empty())
} else {
if (!_method_handle_invokers.is_empty())
}
}
// Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
if (!reverse) {
// allow invokespecial as an alias, although it would be very odd:
// Determine whether this is a signature-polymorphic method.
if (status == 0) {
status = +1;
} else {
status = -1;
}
}
// We use a special internal bytecode for such methods (if non-static).
// The basic reason for this is that such methods need an extra "appendix" argument
// to transmit the call site's intended call type.
if (status > 0) {
}
}
} else {
// Do not need to look at cp_index.
// Ignore corner case of original _invokespecial instruction.
// This is safe because (a) the signature polymorphic method was final, and
// (b) the implementation of MethodHandle will not call invokespecial on it.
}
}
}
if (!reverse) {
// The second secondary entry is required to store the MethodType and
// must be the next entry.
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
// not just one per distinct CP entry. In other words, the
// CPC-to-CP relation is many-to-one for invokedynamic entries.
// This means we must use a larger index size than u2 to address
// all these entries. That is the main reason invokedynamic
// must have a five-byte instruction format. (Of course, other JVM
// implementations can use the bytes for other purposes.)
// Note: We use native_u4 format exclusively for 4-byte indexes.
} else {
Bytes::get_native_u4(p));
// zero out 4 bytes
Bytes::put_Java_u4(p, 0);
}
}
// Rewrite some ldc bytecodes to _fast_aldc
bool reverse) {
if (!reverse) {
if (is_wide) {
} else {
(*p) = (u1)cache_index;
}
}
} else {
if ((*bcp) == rewritten_bc) {
if (is_wide) {
} else {
(*p) = (u1)pool_index;
}
}
}
}
// Rewrites a method given the index_map information
int nof_jsrs = 0;
bool has_monitor_bytecodes = false;
{
// We cannot tolerate a GC in this block, because we've
// cached the bytecodes in 'code_base'. If the methodOop
// moves, the bytecodes will also move.
// Bytecodes and their length
int bc_length;
int prefix_length = 0;
// Since we have the code, see if we can get the length
// directly. Some more complicated bytecodes will report
// a length of zero, meaning we need to make another method
// call to calculate the length.
if (bc_length == 0) {
// length_at will put us at the bytecode after the one modified
// by 'wide'. We don't currently examine any of the bytecodes
// modified by wide, but in case we do in the future...
prefix_length = 1;
}
}
switch (c) {
case Bytecodes::_lookupswitch : {
#ifndef CC_INTERP
(*bcp) = (
);
#endif
break;
}
case Bytecodes::_fast_linearswitch:
case Bytecodes::_fast_binaryswitch: {
#ifndef CC_INTERP
#endif
break;
}
case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface:
break;
case Bytecodes::_invokedynamic:
break;
break;
break;
}
}
}
// Update access flags
if (has_monitor_bytecodes) {
}
// The present of a jsr bytecode implies that the method might potentially
// have to be rewritten, so we run the oopMapGenerator on the method
if (nof_jsrs > 0) {
method->set_has_jsrs();
// Second pass will revisit this method.
}
}
// After constant pool is created, revisit methods containing jsrs.
if (method() != original_method()) {
// Insert invalid bytecode into original methodOop and set
// interpreter entrypoint, so that a executing this method
// will manifest itself in an easy recognizable form.
}
// Update monitor matching info.
if (romc.monitor_safe()) {
}
return method;
}
// (That's all, folks.)
}
void Rewriter::rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS) {
// (That's all, folks.)
}
Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS)
{
// determine index maps for methodOop rewriting
bool did_rewrite = false;
while (i-- > 0) {
// rewrite the return bytecodes of Object.<init> to register the
// object for finalization if needed.
rewrite_Object_init(m, CHECK);
did_rewrite = true;
break;
}
}
}
// rewrite methods, in two passes
for (int i = len-1; i >= 0; i--) {
}
// allocate constant pool cache, now that we've seen all the bytecodes
// Restore bytecodes to their unrewritten state if there are exceptions
// rewriting bytecodes or allocating the cpCache
if (HAS_PENDING_EXCEPTION) {
return;
}
}
// stage because it can throw other exceptions, leaving the bytecodes
// pointing at constant pool cache entries.
// Link and check jvmti dependencies while we're iterating over the methods.
// JSR292 code calls with a different set of methods, so two entry points.
}
for (int i = len-1; i >= 0; i--) {
if (m->has_jsrs()) {
m = rewrite_jsrs(m, CHECK);
// Method might have gotten rewritten.
methods->obj_at_put(i, m());
}
// Set up method entry points for compiler and interpreter .
m->link_method(m, CHECK);
// This is for JVMTI and unrelated to relocator but the last thing we do
#ifdef ASSERT
if (StressMethodComparator) {
static int nmc = 0;
for (int j = i; j >= 0 && j >= i-4; j--) {
bool z = MethodComparator::methods_EMCP(m(),
if (j == i && !z) {
assert(z, "method must compare equal to itself");
}
}
}
#endif //ASSERT
}
}