codeCache.cpp revision 2223
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/dependencies.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "gc_implementation/shared/markSweep.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/gcLocker.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "oops/methodOop.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/memoryService.hpp"
#include "utilities/xmlstream.hpp"
// Helper class for printing in CodeCache
class CodeBlob_sizes {
private:
int count;
int total_size;
int header_size;
int code_size;
int stub_size;
int relocation_size;
int scopes_oop_size;
int scopes_data_size;
int scopes_pcs_size;
public:
CodeBlob_sizes() {
count = 0;
total_size = 0;
header_size = 0;
code_size = 0;
stub_size = 0;
relocation_size = 0;
scopes_oop_size = 0;
scopes_data_size = 0;
scopes_pcs_size = 0;
}
int total() { return total_size; }
tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
total() / K,
}
count++;
if (cb->is_nmethod()) {
} else {
}
}
};
// CodeCache implementation
int CodeCache::_number_of_blobs = 0;
int CodeCache::_number_of_adapters = 0;
int CodeCache::_number_of_nmethods = 0;
bool CodeCache::_needs_cache_clean = false;
}
}
return cb;
}
}
}
}
}
}
// Do not seize the CodeCache lock here--if the caller has not
// already done so, we are going to lose bigtime, since the code
// cache will contain a garbage CodeBlob until the caller can
// run the constructor for the CodeBlob subclass he is busy
// instantiating.
while (true) {
// Expansion failed
return NULL;
}
if (PrintCodeCacheExtension) {
}
}
return cb;
}
if (cb->is_nmethod()) {
}
}
if (cb->is_adapter_blob()) {
}
}
// this is called by nmethod::nmethod, which must already own CodeCache_lock
if (cb->is_nmethod()) {
}
}
if (cb->is_adapter_blob()) {
}
// flush the hardware I-cache
}
}
// Iteration over CodeBlobs
#define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
#define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
// It should be ok to call contains without holding a lock
}
// This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
// looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
// We could potientially look up non_entrant methods
guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
return result;
}
}
FOR_ALL_BLOBS(p) {
f(p);
}
}
FOR_ALL_BLOBS(nm) {
}
}
int CodeCache::alignment_unit() {
return (int)_heap->alignment_unit();
}
int CodeCache::alignment_offset() {
return (int)_heap->alignment_offset();
}
// Mark nmethods for unloading if they contain otherwise unreachable
// oops.
bool unloading_occurred) {
}
}
f->do_code_blob(cb);
#ifdef ASSERT
if (cb->is_nmethod())
#endif //ASSERT
}
}
// Walk the list of methods which might contain non-perm oops.
#ifndef PRODUCT
if (TraceScavenge) {
}
#endif //PRODUCT
if (is_live) {
// Perform cur->oops_do(f), maybe just once per nmethod.
f->do_code_blob(cur);
}
}
// Check for stray marks.
}
}
else set_scavenge_root_nmethods(next);
return;
}
}
assert(false, "should have been on list");
}
void CodeCache::prune_scavenge_root_nmethods() {
&& cur->detect_scavenge_root_oops()) {
// Keep it. Advance 'last' to prevent deletion.
} else {
// Prune it from the list, so we don't have to look at it any more.
else set_scavenge_root_nmethods(next);
}
}
// Check for stray marks.
}
#ifndef PRODUCT
// While we are here, verify the integrity of the list.
}
}
// Temporarily mark nmethods that are claimed to be on the non-perm list.
void CodeCache::mark_scavenge_root_nmethods() {
if (cb->is_nmethod()) {
if (nm->on_scavenge_root_list())
}
}
}
// If the closure is given, run it on the unlisted nmethods.
// Also make sure that the effects of mark_scavenge_root_nmethods is gone.
if (cb->is_nmethod()) {
if (nm->on_scavenge_root_list())
call_f = false; // don't show this one to the client
} else {
call_f = false; // not an nmethod
}
}
}
#endif //PRODUCT
} else {
}
saved->set_speculatively_disconnected(false);
if (PrintMethodFlushing) {
}
}
return saved;
}
}
return NULL;
}
// For conc swpr this will be called with CodeCache_lock taken by caller
} else {
}
}
return;
}
}
}
assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
if (PrintMethodFlushing) {
}
}
nm->set_speculatively_disconnected(true);
}
void CodeCache::gc_prologue() {
}
void CodeCache::gc_epilogue() {
if (cb->is_nmethod()) {
if (needs_cache_clean()) {
}
}
}
set_needs_cache_clean(false);
}
void CodeCache::verify_oops() {
if (cb->is_nmethod()) {
}
}
}
}
}
void icache_init();
void CodeCache::initialize() {
assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
#ifdef COMPILER2
assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
#endif
assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
// This was originally just a check of the alignment, causing failure, instead, round
// the code cache to the page size. In particular, Solaris is moving to a larger
// default page size.
vm_exit_during_initialization("Could not reserve enough space for code cache");
}
// Initialize ICache flush mechanism
// This service is needed for os::register_code_area
icache_init();
// Give OS a chance to register generated code area.
// This is used on Windows 64 bit platforms to register
// Structured Exception Handlers for our generated code.
}
void codeCache_init() {
CodeCache::initialize();
}
//------------------------------------------------------------------------------------------------
}
void CodeCache::clear_inline_caches() {
}
}
#ifndef PRODUCT
// used to keep track of how much time is spent in mark_for_deoptimization
static elapsedTimer dependentCheckTime;
static int dependentCheckCount = 0;
#endif // PRODUCT
#ifndef PRODUCT
#endif // PRODUCT
int number_of_marked_CodeBlobs = 0;
// search the hierarchy looking for nmethods which are affected by the loading of this class
// then search the interfaces this class implements looking for nmethods
// which might be dependent of the fact that an interface only had one
// implementor.
}
}
if (VerifyDependencies) {
// Turn off dependency tracing while actually testing deps.
if (!nm->is_marked_for_deoptimization() &&
nm->check_all_dependencies()) {
nm->print_dependencies();
}
}
}
#ifndef PRODUCT
#endif // PRODUCT
return number_of_marked_CodeBlobs;
}
#ifdef HOTSWAP
int number_of_marked_CodeBlobs = 0;
// Deoptimize all methods of the evolving class itself
for (int i = 0; i < old_methods->length(); i++) {
}
}
if (nm->is_marked_for_deoptimization()) {
// ...Already marked in the previous pass; don't count it again.
} else {
// flush caches in case they refer to a redefined methodOop
}
}
return number_of_marked_CodeBlobs;
}
#endif // HOTSWAP
// Deoptimize all methods
}
}
int number_of_marked_CodeBlobs = 0;
}
}
return number_of_marked_CodeBlobs;
}
void CodeCache::make_marked_nmethods_zombies() {
if (nm->is_marked_for_deoptimization()) {
// If the nmethod has already been made non-entrant and it can be converted
// then zombie it now. Otherwise make it non-entrant and it will eventually
// be zombied when it is no longer seen on the stack. Note that the nmethod
// might be "entrant" and not on the stack and so could be zombied immediately
// but we can't tell because we don't track it on stack until it becomes
// non-entrant.
nm->make_zombie();
} else {
nm->make_not_entrant();
}
}
}
}
void CodeCache::make_marked_nmethods_not_entrant() {
if (nm->is_marked_for_deoptimization()) {
nm->make_not_entrant();
}
}
}
FOR_ALL_ALIVE_BLOBS(p) {
p->verify();
}
}
//------------------------------------------------------------------------------------------------
// Non-product version
#ifndef PRODUCT
void CodeCache::verify_if_often() {
if (VerifyCodeCacheOften) {
}
}
if (PrintCodeCache2) { // Need to add a new flag
}
}
void CodeCache::print_internals() {
int nmethodCount = 0;
int runtimeStubCount = 0;
int adapterCount = 0;
int deoptimizationStubCount = 0;
int uncommonTrapStubCount = 0;
int bufferBlobCount = 0;
int total = 0;
int nmethodAlive = 0;
int nmethodNotEntrant = 0;
int nmethodZombie = 0;
int nmethodUnloaded = 0;
int nmethodJava = 0;
int nmethodNative = 0;
int maxCodeSize = 0;
total++;
if (cb->is_nmethod()) {
}
nmethodCount++;
nmethodJava++;
}
}
} else if (cb->is_runtime_stub()) {
} else if (cb->is_deoptimization_stub()) {
} else if (cb->is_uncommon_trap_stub()) {
} else if (cb->is_adapter_blob()) {
adapterCount++;
} else if (cb->is_buffer_blob()) {
}
}
int bucketSize = 512;
if (cb->is_nmethod()) {
if(nm->is_java_method()) {
}
}
}
for(int i=0; i<bucketLimit; i++) {
if(buckets[i] != 0) {
}
}
FREE_C_HEAP_ARRAY(int, buckets);
}
FOR_ALL_BLOBS(p) {
if (!p->is_alive()) {
} else {
}
}
}
}
if (Verbose) {
// print the oop_map usage
int code_size = 0;
int number_of_blobs = 0;
int number_of_oop_maps = 0;
int map_size = 0;
FOR_ALL_BLOBS(p) {
if (p->is_alive()) {
}
}
}
}
}
#endif // PRODUCT
_heap->low_boundary(),
_heap->high_boundary());
" largest_free_block=" SIZE_FORMAT,
unallocated_capacity()/K, largest_free_block());
}
}