cpCacheOop.cpp revision 1059
0N/A/*
844N/A * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
0N/A * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
0N/A * CA 95054 USA or visit www.sun.com if you need additional information or
0N/A * have any questions.
0N/A *
0N/A */
0N/A
0N/A#include "incls/_precompiled.incl"
0N/A#include "incls/_cpCacheOop.cpp.incl"
0N/A
0N/A
0N/A// Implememtation of ConstantPoolCacheEntry
0N/A
1059N/Avoid ConstantPoolCacheEntry::initialize_entry(int index) {
726N/A assert(0 < index && index < 0x10000, "sanity check");
0N/A _indices = index;
726N/A assert(constant_pool_index() == index, "");
0N/A}
0N/A
1059N/Avoid ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
1059N/A assert(0 <= main_index && main_index < 0x10000, "sanity check");
1059N/A _indices = (main_index << 16);
1059N/A assert(main_entry_index() == main_index, "");
1059N/A}
0N/A
0N/Aint ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
0N/A bool is_vfinal, bool is_volatile,
0N/A bool is_method_interface, bool is_method) {
0N/A int f = state;
0N/A
0N/A assert( state < number_of_states, "Invalid state in as_flags");
0N/A
0N/A f <<= 1;
0N/A if (is_final) f |= 1;
0N/A f <<= 1;
0N/A if (is_vfinal) f |= 1;
0N/A f <<= 1;
0N/A if (is_volatile) f |= 1;
0N/A f <<= 1;
0N/A if (is_method_interface) f |= 1;
0N/A f <<= 1;
0N/A if (is_method) f |= 1;
0N/A f <<= ConstantPoolCacheEntry::hotSwapBit;
0N/A // Preserve existing flag bit values
0N/A#ifdef ASSERT
0N/A int old_state = ((_flags >> tosBits) & 0x0F);
0N/A assert(old_state == 0 || old_state == state,
0N/A "inconsistent cpCache flags state");
0N/A#endif
0N/A return (_flags | f) ;
0N/A}
0N/A
0N/Avoid ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
0N/A#ifdef ASSERT
0N/A // Read once.
0N/A volatile Bytecodes::Code c = bytecode_1();
0N/A assert(c == 0 || c == code || code == 0, "update must be consistent");
0N/A#endif
0N/A // Need to flush pending stores here before bytecode is written.
0N/A OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
0N/A}
0N/A
0N/Avoid ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
0N/A#ifdef ASSERT
0N/A // Read once.
0N/A volatile Bytecodes::Code c = bytecode_2();
0N/A assert(c == 0 || c == code || code == 0, "update must be consistent");
0N/A#endif
0N/A // Need to flush pending stores here before bytecode is written.
0N/A OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
0N/A}
0N/A
0N/A#ifdef ASSERT
0N/A// It is possible to have two different dummy methodOops created
0N/A// when the resolve code for invoke interface executes concurrently
0N/A// Hence the assertion below is weakened a bit for the invokeinterface
0N/A// case.
0N/Abool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
0N/A return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
0N/A ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
0N/A ((methodOop)f1)->signature());
0N/A}
0N/A#endif
0N/A
0N/A// Note that concurrent update of both bytecodes can leave one of them
0N/A// reset to zero. This is harmless; the interpreter will simply re-resolve
0N/A// the damaged entry. More seriously, the memory synchronization is needed
0N/A// to flush other fields (f1, f2) completely to memory before the bytecodes
0N/A// are updated, lest other processors see a non-zero bytecode but zero f1/f2.
0N/Avoid ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
0N/A Bytecodes::Code put_code,
0N/A KlassHandle field_holder,
0N/A int orig_field_index,
0N/A int field_offset,
0N/A TosState field_type,
0N/A bool is_final,
0N/A bool is_volatile) {
0N/A set_f1(field_holder());
0N/A set_f2(field_offset);
0N/A // The field index is used by jvm/ti and is the index into fields() array
0N/A // in holder instanceKlass. This is scaled by instanceKlass::next_offset.
0N/A assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
0N/A const int field_index = orig_field_index / instanceKlass::next_offset;
0N/A assert(field_index <= field_index_mask,
0N/A "field index does not fit in low flag bits");
0N/A set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
0N/A (field_index & field_index_mask));
0N/A set_bytecode_1(get_code);
0N/A set_bytecode_2(put_code);
0N/A NOT_PRODUCT(verify(tty));
0N/A}
0N/A
0N/Aint ConstantPoolCacheEntry::field_index() const {
0N/A return (_flags & field_index_mask) * instanceKlass::next_offset;
0N/A}
0N/A
0N/Avoid ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
0N/A methodHandle method,
0N/A int vtable_index) {
0N/A
0N/A assert(method->interpreter_entry() != NULL, "should have been set at this point");
0N/A assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
0N/A bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
0N/A
0N/A int byte_no = -1;
0N/A bool needs_vfinal_flag = false;
0N/A switch (invoke_code) {
726N/A case Bytecodes::_invokedynamic:
0N/A case Bytecodes::_invokevirtual:
0N/A case Bytecodes::_invokeinterface: {
0N/A if (method->can_be_statically_bound()) {
0N/A set_f2((intptr_t)method());
0N/A needs_vfinal_flag = true;
0N/A } else {
0N/A assert(vtable_index >= 0, "valid index");
0N/A set_f2(vtable_index);
0N/A }
0N/A byte_no = 2;
0N/A break;
0N/A }
0N/A case Bytecodes::_invokespecial:
0N/A // Preserve the value of the vfinal flag on invokevirtual bytecode
0N/A // which may be shared with this constant pool cache entry.
0N/A needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
0N/A // fall through
0N/A case Bytecodes::_invokestatic:
0N/A set_f1(method());
0N/A byte_no = 1;
0N/A break;
0N/A default:
0N/A ShouldNotReachHere();
0N/A break;
0N/A }
0N/A
0N/A set_flags(as_flags(as_TosState(method->result_type()),
0N/A method->is_final_method(),
0N/A needs_vfinal_flag,
0N/A false,
0N/A change_to_virtual,
0N/A true)|
0N/A method()->size_of_parameters());
0N/A
0N/A // Note: byte_no also appears in TemplateTable::resolve.
0N/A if (byte_no == 1) {
0N/A set_bytecode_1(invoke_code);
0N/A } else if (byte_no == 2) {
0N/A if (change_to_virtual) {
0N/A // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
0N/A //
0N/A // Workaround for the case where we encounter an invokeinterface, but we
0N/A // should really have an _invokevirtual since the resolved method is a
0N/A // virtual method in java.lang.Object. This is a corner case in the spec
0N/A // but is presumably legal. javac does not generate this code.
0N/A //
0N/A // We set bytecode_1() to _invokeinterface, because that is the
0N/A // bytecode # used by the interpreter to see if it is resolved.
0N/A // We set bytecode_2() to _invokevirtual.
0N/A // See also interpreterRuntime.cpp. (8/25/2000)
0N/A // Only set resolved for the invokeinterface case if method is public.
0N/A // Otherwise, the method needs to be reresolved with caller for each
0N/A // interface call.
0N/A if (method->is_public()) set_bytecode_1(invoke_code);
0N/A set_bytecode_2(Bytecodes::_invokevirtual);
0N/A } else {
0N/A set_bytecode_2(invoke_code);
0N/A }
0N/A } else {
0N/A ShouldNotReachHere();
0N/A }
0N/A NOT_PRODUCT(verify(tty));
0N/A}
0N/A
0N/A
0N/Avoid ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
0N/A klassOop interf = method->method_holder();
0N/A assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
0N/A set_f1(interf);
0N/A set_f2(index);
0N/A set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
0N/A set_bytecode_1(Bytecodes::_invokeinterface);
0N/A}
0N/A
0N/A
726N/Avoid ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
1059N/A methodOop method = (methodOop) java_dyn_CallSite::vmmethod(call_site());
726N/A assert(method->is_method(), "must be initialized properly");
726N/A int param_size = method->size_of_parameters();
1059N/A assert(param_size >= 1, "method argument size must include MH.this");
726N/A param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
726N/A if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
726N/A // racing threads might be trying to install their own favorites
726N/A set_f1(call_site());
726N/A }
726N/A set_f2(extra_data);
726N/A set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | param_size);
726N/A // do not do set_bytecode on a secondary CP cache entry
726N/A //set_bytecode_1(Bytecodes::_invokedynamic);
726N/A}
726N/A
726N/A
0N/Aclass LocalOopClosure: public OopClosure {
0N/A private:
0N/A void (*_f)(oop*);
0N/A
0N/A public:
0N/A LocalOopClosure(void f(oop*)) { _f = f; }
0N/A virtual void do_oop(oop* o) { _f(o); }
113N/A virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); }
0N/A};
0N/A
0N/A
0N/Avoid ConstantPoolCacheEntry::oops_do(void f(oop*)) {
0N/A LocalOopClosure blk(f);
0N/A oop_iterate(&blk);
0N/A}
0N/A
0N/A
0N/Avoid ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
0N/A assert(in_words(size()) == 4, "check code below - may need adjustment");
0N/A // field[1] is always oop or NULL
0N/A blk->do_oop((oop*)&_f1);
0N/A if (is_vfinal()) {
0N/A blk->do_oop((oop*)&_f2);
0N/A }
0N/A}
0N/A
0N/A
0N/Avoid ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
0N/A assert(in_words(size()) == 4, "check code below - may need adjustment");
0N/A // field[1] is always oop or NULL
0N/A if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
0N/A if (is_vfinal()) {
0N/A if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
0N/A }
0N/A}
0N/A
0N/A
0N/Avoid ConstantPoolCacheEntry::follow_contents() {
0N/A assert(in_words(size()) == 4, "check code below - may need adjustment");
0N/A // field[1] is always oop or NULL
0N/A MarkSweep::mark_and_push((oop*)&_f1);
0N/A if (is_vfinal()) {
0N/A MarkSweep::mark_and_push((oop*)&_f2);
0N/A }
0N/A}
0N/A
0N/A#ifndef SERIALGC
0N/Avoid ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
0N/A assert(in_words(size()) == 4, "check code below - may need adjustment");
0N/A // field[1] is always oop or NULL
0N/A PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
0N/A if (is_vfinal()) {
0N/A PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
0N/A }
0N/A}
0N/A#endif // SERIALGC
0N/A
0N/Avoid ConstantPoolCacheEntry::adjust_pointers() {
0N/A assert(in_words(size()) == 4, "check code below - may need adjustment");
0N/A // field[1] is always oop or NULL
0N/A MarkSweep::adjust_pointer((oop*)&_f1);
0N/A if (is_vfinal()) {
0N/A MarkSweep::adjust_pointer((oop*)&_f2);
0N/A }
0N/A}
0N/A
0N/A#ifndef SERIALGC
0N/Avoid ConstantPoolCacheEntry::update_pointers() {
0N/A assert(in_words(size()) == 4, "check code below - may need adjustment");
0N/A // field[1] is always oop or NULL
0N/A PSParallelCompact::adjust_pointer((oop*)&_f1);
0N/A if (is_vfinal()) {
0N/A PSParallelCompact::adjust_pointer((oop*)&_f2);
0N/A }
0N/A}
0N/A
0N/Avoid ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr,
0N/A HeapWord* end_addr) {
0N/A assert(in_words(size()) == 4, "check code below - may need adjustment");
0N/A // field[1] is always oop or NULL
0N/A PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr);
0N/A if (is_vfinal()) {
0N/A PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr);
0N/A }
0N/A}
0N/A#endif // SERIALGC
0N/A
0N/A// RedefineClasses() API support:
0N/A// If this constantPoolCacheEntry refers to old_method then update it
0N/A// to refer to new_method.
0N/Abool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
0N/A methodOop new_method, bool * trace_name_printed) {
0N/A
0N/A if (is_vfinal()) {
0N/A // virtual and final so f2() contains method ptr instead of vtable index
0N/A if (f2() == (intptr_t)old_method) {
0N/A // match old_method so need an update
0N/A _f2 = (intptr_t)new_method;
0N/A if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
0N/A if (!(*trace_name_printed)) {
0N/A // RC_TRACE_MESG macro has an embedded ResourceMark
0N/A RC_TRACE_MESG(("adjust: name=%s",
0N/A Klass::cast(old_method->method_holder())->external_name()));
0N/A *trace_name_printed = true;
0N/A }
0N/A // RC_TRACE macro has an embedded ResourceMark
0N/A RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
0N/A new_method->name()->as_C_string(),
0N/A new_method->signature()->as_C_string()));
0N/A }
0N/A
0N/A return true;
0N/A }
0N/A
0N/A // f1() is not used with virtual entries so bail out
0N/A return false;
0N/A }
0N/A
0N/A if ((oop)_f1 == NULL) {
0N/A // NULL f1() means this is a virtual entry so bail out
0N/A // We are assuming that the vtable index does not need change.
0N/A return false;
0N/A }
0N/A
0N/A if ((oop)_f1 == old_method) {
0N/A _f1 = new_method;
0N/A if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
0N/A if (!(*trace_name_printed)) {
0N/A // RC_TRACE_MESG macro has an embedded ResourceMark
0N/A RC_TRACE_MESG(("adjust: name=%s",
0N/A Klass::cast(old_method->method_holder())->external_name()));
0N/A *trace_name_printed = true;
0N/A }
0N/A // RC_TRACE macro has an embedded ResourceMark
0N/A RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
0N/A new_method->name()->as_C_string(),
0N/A new_method->signature()->as_C_string()));
0N/A }
0N/A
0N/A return true;
0N/A }
0N/A
0N/A return false;
0N/A}
0N/A
0N/Abool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
0N/A if (!is_method_entry()) {
0N/A // not a method entry so not interesting by default
0N/A return false;
0N/A }
0N/A
0N/A methodOop m = NULL;
0N/A if (is_vfinal()) {
0N/A // virtual and final so _f2 contains method ptr instead of vtable index
0N/A m = (methodOop)_f2;
0N/A } else if ((oop)_f1 == NULL) {
0N/A // NULL _f1 means this is a virtual entry so also not interesting
0N/A return false;
0N/A } else {
0N/A if (!((oop)_f1)->is_method()) {
0N/A // _f1 can also contain a klassOop for an interface
0N/A return false;
0N/A }
0N/A m = (methodOop)_f1;
0N/A }
0N/A
0N/A assert(m != NULL && m->is_method(), "sanity check");
0N/A if (m == NULL || !m->is_method() || m->method_holder() != k) {
0N/A // robustness for above sanity checks or method is not in
0N/A // the interesting class
0N/A return false;
0N/A }
0N/A
0N/A // the method is in the interesting class so the entry is interesting
0N/A return true;
0N/A}
0N/A
0N/Avoid ConstantPoolCacheEntry::print(outputStream* st, int index) const {
0N/A // print separator
0N/A if (index == 0) tty->print_cr(" -------------");
0N/A // print entry
726N/A tty->print_cr("%3d (%08x) ", index, this);
726N/A if (is_secondary_entry())
726N/A tty->print_cr("[%5d|secondary]", main_entry_index());
726N/A else
726N/A tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
0N/A tty->print_cr(" [ %08x]", (address)(oop)_f1);
0N/A tty->print_cr(" [ %08x]", _f2);
0N/A tty->print_cr(" [ %08x]", _flags);
0N/A tty->print_cr(" -------------");
0N/A}
0N/A
0N/Avoid ConstantPoolCacheEntry::verify(outputStream* st) const {
0N/A // not implemented yet
0N/A}
0N/A
0N/A// Implementation of ConstantPoolCache
0N/A
0N/Avoid constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
0N/A assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
1059N/A for (int i = 0; i < length(); i++) {
1059N/A ConstantPoolCacheEntry* e = entry_at(i);
1059N/A int original_index = inverse_index_map[i];
1059N/A if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
1059N/A int main_index = (original_index - Rewriter::_secondary_entry_tag);
1059N/A assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
1059N/A e->initialize_secondary_entry(main_index);
1059N/A } else {
1059N/A e->initialize_entry(original_index);
1059N/A }
1059N/A assert(entry_at(i) == e, "sanity");
1059N/A }
0N/A}
0N/A
0N/A// RedefineClasses() API support:
0N/A// If any entry of this constantPoolCache points to any of
0N/A// old_methods, replace it with the corresponding new_method.
0N/Avoid constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
0N/A int methods_length, bool * trace_name_printed) {
0N/A
0N/A if (methods_length == 0) {
0N/A // nothing to do if there are no methods
0N/A return;
0N/A }
0N/A
0N/A // get shorthand for the interesting class
0N/A klassOop old_holder = old_methods[0]->method_holder();
0N/A
0N/A for (int i = 0; i < length(); i++) {
0N/A if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
0N/A // skip uninteresting methods
0N/A continue;
0N/A }
0N/A
0N/A // The constantPoolCache contains entries for several different
0N/A // things, but we only care about methods. In fact, we only care
0N/A // about methods in the same class as the one that contains the
0N/A // old_methods. At this point, we have an interesting entry.
0N/A
0N/A for (int j = 0; j < methods_length; j++) {
0N/A methodOop old_method = old_methods[j];
0N/A methodOop new_method = new_methods[j];
0N/A
0N/A if (entry_at(i)->adjust_method_entry(old_method, new_method,
0N/A trace_name_printed)) {
0N/A // current old_method matched this entry and we updated it so
0N/A // break out and get to the next interesting entry if there one
0N/A break;
0N/A }
0N/A }
0N/A }
0N/A}