/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/rewriter.hpp"
#include "memory/universe.inline.hpp"
#include "oops/cpCacheOop.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/handles.inline.hpp"
// Implememtation of ConstantPoolCacheEntry
}
}
int option_bits,
int field_index_or_method_params) {
// Preserve existing flag bit values
// The low bits are a field offset, or else the method parameter size.
#ifdef ASSERT
"inconsistent cpCache flags state");
#endif
return (_flags | f) ;
}
#ifdef ASSERT
// Read once.
#endif
// Need to flush pending stores here before bytecode is written.
}
#ifdef ASSERT
// Read once.
#endif
// Need to flush pending stores here before bytecode is written.
}
// Sets f1, ordering with previous writes.
// Use barriers as in oop_store
}
// Sets flags, but only if the value was previously zero.
return (result == 0);
}
#ifdef ASSERT
// It is possible to have two different dummy methodOops created
// when the resolve code for invoke interface executes concurrently
// Hence the assertion below is weakened a bit for the invokeinterface
// case.
}
#endif
// Note that concurrent update of both bytecodes can leave one of them
// reset to zero. This is harmless; the interpreter will simply re-resolve
// the damaged entry. More seriously, the memory synchronization is needed
// to flush other fields (f1, f2) completely to memory before the bytecodes
int field_index,
int field_offset,
bool is_final,
bool is_volatile) {
"field index does not fit in low flag bits");
}
// This routine is called only in corner cases where the CPCE is not yet initialized.
// See AbstractInterpreter::deopt_continue_after_entry.
// Setting the parameter size by itself is only safe if the
// current value of _flags is 0, otherwise another thread may have
// updated it and we don't want to overwrite that value. Don't
// bother trying to update it once it's nonzero but always make
// sure that the final parameter size agrees with what was passed.
if (_flags == 0) {
}
}
int vtable_index) {
int byte_no = -1;
bool change_to_virtual = false;
switch (invoke_code) {
case Bytecodes::_invokeinterface:
// We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
// instruction somehow links to a non-interface method (in Object).
// In that case, the method has no itable index and must be invoked as a virtual.
// Set a flag to keep track of this corner case.
change_to_virtual = true;
// ...and fall through as if we were handling invokevirtual:
case Bytecodes::_invokevirtual:
{
if (method->can_be_statically_bound()) {
// set_f2_as_vfinal_method checks if is_vfinal flag is true.
( 1 << is_vfinal_shift) |
method()->size_of_parameters());
} else {
method()->size_of_parameters());
}
byte_no = 2;
break;
}
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
// Note: Read and preserve the value of the is_vfinal flag on any
// invokevirtual bytecode shared with this constant pool cache entry.
// It is cheap and safe to consult is_vfinal() at all times.
// Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
method()->size_of_parameters());
byte_no = 1;
break;
default:
break;
}
// Note: byte_no also appears in TemplateTable::resolve.
if (byte_no == 1) {
} else if (byte_no == 2) {
if (change_to_virtual) {
// NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
//
// Workaround for the case where we encounter an invokeinterface, but we
// should really have an _invokevirtual since the resolved method is a
// virtual method in java.lang.Object. This is a corner case in the spec
// but is presumably legal. javac does not generate this code.
//
// We set bytecode_1() to _invokeinterface, because that is the
// bytecode # used by the interpreter to see if it is resolved.
// We set bytecode_2() to _invokevirtual.
// See also interpreterRuntime.cpp. (8/25/2000)
// Only set resolved for the invokeinterface case if method is public.
// Otherwise, the method needs to be reresolved with caller for each
// interface call.
} else {
}
// set up for invokevirtual, even if linking for invokeinterface also:
} else {
}
}
assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
0, // no option bits
method()->size_of_parameters());
}
}
}
// NOTE: This CPCE can be the subject of data races.
// There are three words to update: flags, f2, f1 (in that order).
// Writers must store all other values before f1.
// Readers must test f1 first for non-null before reading other fields.
// Competing writers must acquire exclusive access via a lock.
// A losing writer waits on the lock until the winner writes f1 and leaves
// the lock, so that when the losing writer returns, he can use the linked
// cache entry.
if (!is_f1_null()) {
return;
}
if (!has_appendix) {
// The extra argument is not used, but we need a non-null value to signify linkage state.
// Set it to something benign that will never leak memory.
}
// Write the flags.
( 1 << is_vfinal_shift) |
( 1 << is_final_shift),
if (TraceInvokeDynamic) {
tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
}
// Method handle invokes and invokedynamic sites use both cp cache words.
// f1, if not null, contains a value passed as a trailing argument to the adapter.
// In the general case, this could be the call site's MethodType,
// for use with java.lang.Invokers.checkExactType, or else a CallSite object.
// f2 contains the adapter method which manages the actual call.
// In the general case, this is a compiled LambdaForm.
// (The Java code is free to optimize these calls by binding other
// sorts of methods and appendices to call sites.)
// JVM-level linking is via f2, as if for invokevfinal, and signatures are erased.
// The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
// In principle this means that the method (with appendix) could take up to 256 parameter slots.
//
// This means that given a call site like (List)mh.invoke("foo"),
// The fact that String and List are involved is encoded in the MethodType in f1.
// This allows us to create fewer method oops, while keeping type safety.
//
// Store MethodType, if any.
if (has_method_type) {
// Write the flags.
( 1 << is_vfinal_shift) |
( 1 << is_final_shift),
}
if (!is_secondary_entry()) {
// The interpreter assembly code does not check byte_2,
// but it is used by is_resolved, method_if_resolved, etc.
}
if (TraceInvokeDynamic) {
}
}
if (is_secondary_entry()) {
if (!is_f1_null())
return f2_as_vfinal_method();
return NULL;
}
// Decode the action of set_method and set_interface_call
switch (invoke_code) {
case Bytecodes::_invokeinterface:
case Bytecodes::_invokestatic:
case Bytecodes::_invokespecial:
}
}
}
invoke_code = bytecode_2();
switch (invoke_code) {
case Bytecodes::_invokevirtual:
if (is_vfinal()) {
// invokevirtual
methodOop m = f2_as_vfinal_method();
return m;
} else {
}
}
break;
case Bytecodes::_invokehandle:
case Bytecodes::_invokedynamic:
return f2_as_vfinal_method();
}
}
return NULL;
}
if (is_f1_null() || !has_appendix())
return NULL;
return f1_appendix();
}
if (is_f1_null() || !has_method_type())
return NULL;
return f1_as_instance();
}
private:
public:
};
LocalOopClosure blk(f);
oop_iterate(&blk);
}
// field[1] is always oop or NULL
if (is_vfinal()) {
}
}
// field[1] is always oop or NULL
if (is_vfinal()) {
}
}
// field[1] is always oop or NULL
if (is_vfinal()) {
}
}
#ifndef SERIALGC
// field[1] is always oop or NULL
if (is_vfinal()) {
}
}
#endif // SERIALGC
// field[1] is always oop or NULL
if (is_vfinal()) {
}
}
#ifndef SERIALGC
// field[1] is always oop or NULL
if (is_vfinal()) {
}
}
#endif // SERIALGC
// RedefineClasses() API support:
// If this constantPoolCacheEntry refers to old_method then update it
// to refer to new_method.
if (is_vfinal()) {
// virtual and final so _f2 contains method ptr instead of vtable index
if (f2_as_vfinal_method() == old_method) {
// match old_method so need an update
// NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
if (!(*trace_name_printed)) {
// RC_TRACE_MESG macro has an embedded ResourceMark
RC_TRACE_MESG(("adjust: name=%s",
*trace_name_printed = true;
}
// RC_TRACE macro has an embedded ResourceMark
}
return true;
}
// f1() is not used with virtual entries so bail out
return false;
}
// NULL f1() means this is a virtual entry so bail out
// We are assuming that the vtable index does not need change.
return false;
}
_f1 = new_method;
if (!(*trace_name_printed)) {
// RC_TRACE_MESG macro has an embedded ResourceMark
RC_TRACE_MESG(("adjust: name=%s",
*trace_name_printed = true;
}
// RC_TRACE macro has an embedded ResourceMark
}
return true;
}
return false;
}
// a constant pool cache entry should never contain old or obsolete methods
if (is_vfinal()) {
// virtual and final so _f2 contains method ptr instead of vtable index
// Return false if _f2 refers to an old or an obsolete method.
// _f2 == NULL || !m->is_method() are just as unexpected here.
// _f1 == NULL || !_f1->is_method() are OK here
return true;
}
// return false if _f1 refers to an old or an obsolete method
return (!m->is_old() && !m->is_obsolete());
}
if (!is_method_entry()) {
// not a method entry so not interesting by default
return false;
}
if (is_vfinal()) {
// virtual and final so _f2 contains method ptr instead of vtable index
m = f2_as_vfinal_method();
} else if (is_f1_null()) {
// NULL _f1 means this is a virtual entry so also not interesting
return false;
} else {
// _f1 can also contain a klassOop for an interface
return false;
}
m = f1_as_method();
}
// robustness for above sanity checks or method is not in
// the interesting class
return false;
}
// the method is in the interesting class so the entry is interesting
return true;
}
// print separator
// print entry
if (is_secondary_entry())
else
}
// not implemented yet
}
// Implementation of ConstantPoolCache
for (int i = 0; i < length(); i++) {
ConstantPoolCacheEntry* e = entry_at(i);
} else {
}
}
}
// RedefineClasses() API support:
// If any entry of this constantPoolCache points to any of
// old_methods, replace it with the corresponding new_method.
void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
int methods_length, bool * trace_name_printed) {
if (methods_length == 0) {
// nothing to do if there are no methods
return;
}
// get shorthand for the interesting class
for (int i = 0; i < length(); i++) {
// skip uninteresting methods
continue;
}
// The constantPoolCache contains entries for several different
// things, but we only care about methods. In fact, we only care
// about methods in the same class as the one that contains the
// old_methods. At this point, we have an interesting entry.
for (int j = 0; j < methods_length; j++) {
// current old_method matched this entry and we updated it so
// break out and get to the next interesting entry if there one
break;
}
}
}
}
// the constant pool cache should never contain old or obsolete methods
for (int i = 1; i < length(); i++) {
!entry_at(i)->check_no_old_or_obsolete_entries()) {
return false;
}
}
return true;
}
for (int i = 1; i < length(); i++) {
}
}
}