rewriter.cpp revision 856
/*
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_rewriter.cpp.incl"
// Computes a CPC map (new_index -> original_index) for constant pool entries
// that are referred to by the interpreter at runtime via the constant pool cache.
// Also computes a CP map (original_index -> new_index).
// Marks entries in CP which require additional processing.
void Rewriter::compute_index_maps() {
for (int i = 0; i < length; i++) {
switch (tag) {
case JVM_CONSTANT_Fieldref : // fall through
case JVM_CONSTANT_Methodref : // fall through
break;
}
}
"all cp cache indexes fit in a u2");
}
// Hack: We put it on the map as an encoded value.
// The only place that consumes this is ConstantPoolCacheEntry::set_initial_state
}
// Creates a constant pool cache given a CPC map
// This creates the constant pool cache initially in a state
// that is unsafe for concurrent GC processing but sets it to
// a safe mode before the constant pool cache is returned.
}
// The new finalization semantics says that registration of
// finalizable objects must be performed on successful return from the
// Object.<init> constructor. We could implement this trivially if
// <init> were never rewritten but since JVMTI allows this to occur, a
// more complicated solution is required. A special return bytecode
// is used only by Object.<init> to signal the finalization
// registration point. Additionally local 0 must be preserved so it's
// available to pass to the registration function. For simplicty we
// require that local 0 is never overwritten so it's available as an
// argument for registration.
while (!bcs.is_last_bytecode()) {
switch (opcode) {
// fall through
"can't overwrite local 0 in Object.<init>");
break;
}
}
}
// Rewrite a classfile-order CP index into a native-order CPC index.
return cp_index;
}
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
// not just one per distinct CP entry. In other words, the
// CPC-to-CP relation is many-to-one for invokedynamic entries.
// This means we must use a larger index size than u2 to address
// all these entries. That is the main reason invokedynamic
// must have a five-byte instruction format. (Of course, other JVM
// implementations can use the bytes for other purposes.)
// Note: We use native_u4 format exclusively for 4-byte indexes.
}
// Rewrites a method given the index_map information
int nof_jsrs = 0;
bool has_monitor_bytecodes = false;
{
// We cannot tolerate a GC in this block, because we've
// cached the bytecodes in 'code_base'. If the methodOop
// moves, the bytecodes will also move.
// Bytecodes and their length
int bc_length;
int prefix_length = 0;
// Since we have the code, see if we can get the length
// directly. Some more complicated bytecodes will report
// a length of zero, meaning we need to make another method
// call to calculate the length.
if (bc_length == 0) {
// length_at will put us at the bytecode after the one modified
// by 'wide'. We don't currently examine any of the bytecodes
// modified by wide, but in case we do in the future...
prefix_length = 1;
}
}
switch (c) {
case Bytecodes::_lookupswitch : {
#ifndef CC_INTERP
);
#endif
break;
}
case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface:
break;
case Bytecodes::_invokedynamic:
break;
}
}
}
// Update access flags
if (has_monitor_bytecodes) {
}
// The present of a jsr bytecode implies that the method might potentially
// have to be rewritten, so we run the oopMapGenerator on the method
if (nof_jsrs > 0) {
method->set_has_jsrs();
// Second pass will revisit this method.
}
}
// After constant pool is created, revisit methods containing jsrs.
if (method() != original_method()) {
// Insert invalid bytecode into original methodOop and set
// interpreter entrypoint, so that a executing this method
// will manifest itself in an easy recognizable form.
}
// Update monitor matching info.
if (romc.monitor_safe()) {
}
return method;
}
// (That's all, folks.)
}
// gather starting points
{
// determine index maps for methodOop rewriting
bool did_rewrite = false;
while (i-- > 0) {
// rewrite the return bytecodes of Object.<init> to register the
// object for finalization if needed.
rewrite_Object_init(m, CHECK);
did_rewrite = true;
break;
}
}
}
// rewrite methods, in two passes
for (i = len; --i >= 0; ) {
}
// allocate constant pool cache, now that we've seen all the bytecodes
for (i = len; --i >= 0; ) {
if (m->has_jsrs()) {
m = rewrite_jsrs(m, CHECK);
// Method might have gotten rewritten.
_methods->obj_at_put(i, m());
}
// Set up method entry points for compiler and interpreter.
m->link_method(m, CHECK);
}
}