compiledIC.cpp revision 3157
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/oopFactory.hpp"
#include "oops/methodOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/icache.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/events.hpp"
// Every time a compiled IC is changed or its type is being accessed,
// either the CompiledIC_lock must be set or we must be at a safe point.
//-----------------------------------------------------------------------------
// Low-level access to an inline cache. Private, since they might not be
// MT-safe to use.
if (TraceCompiledIC) {
}
// fix up the relocations
r->fix_oop_relocation();
}
}
return;
}
if (!is_in_transition_state()) {
// If we let the oop value here be initialized to zero...
"no raw nulls in CompiledIC oops, because of patching races");
} else {
}
}
if (TraceCompiledIC) {
}
#ifdef ASSERT
#endif
}
if (!is_in_transition_state()) {
return _ic_call->destination();
} else {
}
}
bool CompiledIC::is_in_transition_state() const {
}
// Returns native address of 'call' instruction in inline-cache. Used by
// the InlineCacheBuffer when it needs to find the stub.
return _ic_call->destination();
}
//-----------------------------------------------------------------------------
// High-level access to an inline cache. Guaranteed to be MT-safe.
bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
if (is_invoke_interface) {
} else {
// Can be different than method->vtable_index(), due to package-private etc.
}
if (TraceICs) {
}
// We can't check this anymore. With lazy deopt we could have already
// cleaned this IC entry before we even return. This is possible if
// we ran out of space in the inline cache buffer trying to do the
// set_next and we safepointed to free up space. This is a benign
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_megamorphic(), "sanity check");
}
// true if destination is megamorphic stub
bool CompiledIC::is_megamorphic() const {
// Cannot rely on cached_oop. It is either an interface or a method.
}
bool CompiledIC::is_call_to_compiled() const {
// Use unsafe, since an inline cache might point to a zombie method. However, the zombie
// method is guaranteed to still exist, since we only remove methods after all inline caches
// has been cleaned up
// Check that the cached_oop is a klass for non-optimized monomorphic calls
// This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
// for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
#ifdef ASSERT
#ifdef TIERED
#else
#ifdef COMPILER1
bool is_c1_method = true;
#else
bool is_c1_method = false;
#endif // COMPILER1
#endif // TIERED
assert( is_c1_method ||
!is_monomorphic ||
is_optimized() ||
#endif // ASSERT
return is_monomorphic;
}
bool CompiledIC::is_call_to_interpreted() const {
// Call to interpreter if destination is either calling to a stub (if it
// is optimized), or calling to an I2C blob
bool is_call_to_interpreted = false;
if (!is_optimized()) {
// must use unsafe because the destination can be a zombie (and we're cleaning)
// and the print_compiled_ic code wants to know if site (in the non-zombie)
// is to the interpreter.
assert(!is_call_to_interpreted || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
} else {
// Check if we are calling into our own codeblob (i.e., to a stub)
#ifdef ASSERT
{
}
#endif /* ASSERT */
}
return is_call_to_interpreted;
}
void CompiledIC::set_to_clean() {
if (TraceInlineCacheClearing || TraceICs) {
print();
}
if (is_optimized()) {
} else {
}
// A zombie transition will always be safe, since the oop has already been set to NULL, so
// we only need to patch the destination
if (safe_transition) {
// Kill any leftover stub we might have too
if (is_in_transition_state()) {
}
} else {
// Unsafe transition - create stub.
}
// We can't check this anymore. With lazy deopt we could have already
// cleaned this IC entry before we even return. This is possible if
// we ran out of space in the inline cache buffer trying to do the
// set_next and we safepointed to free up space. This is a benign
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_clean(), "sanity check");
}
bool CompiledIC::is_clean() const {
bool is_clean = false;
return is_clean;
}
// Updating a cache to the wrong entry can cause bugs that are very hard
// to track down - if cache entry gets invalid - we just clean it. In
// this way it is always the same code path that is responsible for
// updating and resolving an inline cache
//
// The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
// callsites. In addition ic_miss code will update a site to monomorphic if it determines
// that an monomorphic call to the interpreter can now be monomorphic to compiled code.
//
// transitions are mt_safe
if (info._to_interpreter) {
// Call to interpreter
// the call analysis (callee structure) specifies that the call is optimized
// (either because of CHA or the static target is final)
// At code generation time, this call has been emitted as static call
// Call via stub
if (TraceICs) {
method->print_value_string());
}
} else {
// Call via method-klass-holder
if (TraceICs) {
}
}
} else {
// Call to compiled code
#ifdef ASSERT
#endif /* ASSERT */
// This is MT safe if we come from a clean-cache and go through a
// non-verified entry point
if (!safe) {
} else {
}
if (TraceICs) {
}
}
// We can't check this anymore. With lazy deopt we could have already
// cleaned this IC entry before we even return. This is possible if
// we ran out of space in the inline cache buffer trying to do the
// set_next and we safepointed to free up space. This is a benign
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
// is_optimized: Compiler has generated an optimized call (i.e., no inline
// cache) static_bound: The call can be static bound (i.e, no need to use
// inline cache)
bool is_optimized,
bool static_bound,
TRAPS) {
if (method_code != NULL) {
// Call to compiled code
if (static_bound || is_optimized) {
} else {
}
}
// Call to compiled code
if (static_bound || is_optimized) {
} else {
}
info._to_interpreter = false;
} else {
// Note: the following problem exists with Compiler1:
// - at compile time we may or may not know if the destination is final
// - if we know that the destination is final, we will emit an optimized
// virtual call (no inline cache), and need a methodOop to make a call
// to the interpreter
// - if we do not know if the destination is final, we emit a standard
// virtual call, and use CompiledICHolder to call interpreted code
// (no static call stub has been generated)
// However in that case we will now notice it is static_bound
// and convert the call into what looks to be an optimized
// virtual call. This causes problems in verifying the IC because
// it look vanilla but is optimized. Code in is_call_to_interpreted
// is aware of this and weakens its asserts.
info._to_interpreter = true;
// static_bound should imply is_optimized -- otherwise we have a
// performance bug (statically-bindable method is called via
// dynamically-dispatched call note: the reverse implication isn't
// necessarily true -- the call may have been optimized based on compiler
// analysis (static_bound is only based on "final" etc.)
#ifdef COMPILER2
#ifdef TIERED
#if defined(ASSERT)
// can't check the assert because we don't have the CompiledIC with which to
// find the address if the call instruction.
//
// CodeBlob* cb = find_blob_unsafe(instruction_address());
// assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
#endif // ASSERT
#else
#endif // TIERED
#endif // COMPILER2
if (is_optimized) {
// Use stub entry
} else {
// Use mkh entry
}
}
}
inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
// Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
}
{
}
{
}
// ----------------------------------------------------------------------------
void CompiledStaticCall::set_to_clean() {
// Reset call site
#ifdef ASSERT
#endif
// Do not reset stub here: It is too expensive to call find_stub.
// Instead, rely on caller (nmethod::clear_inline_caches) to clear
// both the call and its stub.
}
bool CompiledStaticCall::is_clean() const {
}
bool CompiledStaticCall::is_call_to_compiled() const {
}
bool CompiledStaticCall::is_call_to_interpreted() const {
// It is a call to interpreted, if it calls to a stub. Hence, the destination
// must be in the stub part of the nmethod that contains the call
}
if (TraceICs) {
}
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
// Update stub
// Update jump to call
}
// Updating a cache to the wrong entry can cause bugs that are very hard
// to track down - if cache entry gets invalid - we just clean it. In
// this way it is always the same code path that is responsible for
// updating and resolving an inline cache
if (info._to_interpreter) {
// Call to interpreted code
} else {
if (TraceICs) {
}
// Call to compiled code
}
}
// Compute settings for a CompiledStaticCall. Since we might have to set
// the stub when calling to the interpreter, we need to return arguments.
info._to_interpreter = false;
} else {
// Callee is interpreted code. In any case entering the interpreter
// puts a converter-frame on the stack to save arguments.
info._to_interpreter = true;
}
}
// Reset stub
method_holder->set_data(0);
}
// Find reloc. information containing this call-site
case relocInfo::static_call_type:
// We check here for opt_virtual_call_type, since we reuse the code
// from the CompiledIC implementation
case relocInfo::opt_virtual_call_type:
default:
}
}
}
return NULL;
}
//-----------------------------------------------------------------------------
// Non-product mode code
#ifndef PRODUCT
void CompiledIC::verify() {
// make sure code pattern is actually a call imm32 instruction
}
}
void CompiledIC::print() {
}
void CompiledIC::print_compiled_ic() {
}
void CompiledStaticCall::print() {
if (is_clean()) {
} else if (is_call_to_compiled()) {
} else if (is_call_to_interpreted()) {
}
}
void CompiledStaticCall::verify() {
// Verify call
NativeCall::verify();
}
// Verify stub
// Verify state
}
#endif