/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/compactPermGen.hpp"
#include "memory/filemap.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/permGen.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/vmThread.hpp"
#include "services/memoryService.hpp"
#include "services/memTracker.hpp"
#include "utilities/vmError.hpp"
#include "utilities/workgroup.hpp"
#ifndef SERIALGC
#endif
// The set of potentially parallel tasks in strong root scanning.
// We probably want to parallelize both of these internally, but for now...
// Leave this one last.
};
{
if (_gen_process_strong_tasks == NULL ||
!_gen_process_strong_tasks->valid()) {
vm_exit_during_initialization("Failed necessary allocation.");
}
_preloading_shared_classes = false;
}
int i;
// While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some
// cases incorrectly returns the size in wordSize units rather than
// HeapWordSize).
// The heap must be at least as aligned as generations.
// Make sure the sizes are all aligned.
for (i = 0; i < _n_gens; i++) {
}
// If we are dumping the heap, then allocate a wasted block of address
// space in order to push the heap to a lower address. This extra
// address range allows for other (or larger) libraries to be loaded
// without them occupying the space required for the shared spaces.
if (DumpSharedSpaces) {
while (reserved < SharedDummyBlockSize) {
reserved += block_size;
}
}
// Allocate space for the heap.
char* heap_address;
int n_covered_regions = 0;
ReservedSpace heap_rs(0);
&n_covered_regions, &heap_rs);
if (UseSharedSpaces) {
if (heap_rs.is_reserved()) {
}
&heap_rs);
}
}
if (!heap_rs.is_reserved()) {
"Could not reserve enough space for object heap");
return JNI_ENOMEM;
}
// It is important to do this in a way such that concurrent readers can't
// temporarily think somethings in the heap. (Seen this happen in asserts.)
- perm_gen_spec->misc_code_size();
_gch = this;
for (i = 0; i < _n_gens; i++) {
// tag generations in JavaHeap
}
// tag PermGen
#ifndef SERIALGC
// If we are running CMS, create the collector responsible
// for collecting the CMS generations.
if (collector_policy()->is_concurrent_mark_sweep_policy()) {
if (!success) return JNI_ENOMEM;
}
#endif // SERIALGC
return JNI_OK;
}
int* _n_covered_regions,
// Now figure out the total size.
int n_covered_regions = 0;
for (int i = 0; i < _n_gens; i++) {
}
// Add the size of the data area which shares the same reserved area
// as the heap, but which is not actually part of the heap.
if (UseLargePages) {
}
// Calculate the address at which the heap must reside in order for
// the shared data to be at the required address.
char* heap_address;
if (UseSharedSpaces) {
// Calculate the address of the first word beyond the heap.
// Calculate the address of the first word of the heap.
} else {
if (UseCompressedOops) {
// Failed to reserve at specified address - the requested memory
// region is taken already, for example, by 'java' launcher.
// Try again to reserver heap higher.
// Failed to reserve at specified address again - give up.
}
}
return heap_address;
}
}
return heap_address;
}
"Wrong generation kind");
"Wrong generation kind");
}
for (int i = 0; i < _n_gens; i++) {
_gens[i]->ref_processor_init();
}
}
for (int i = 0; i < _n_gens; i++) {
}
return res;
}
for (int i = 0; i < _n_gens; i++) {
}
return res;
}
// Save the "used_region" for generations level and lower,
// and, if perm is true, for perm gen.
for (int i = level; i >= 0; i--) {
_gens[i]->save_used_region();
}
if (perm) {
perm_gen()->save_used_region();
}
}
for (int i = 0; i < _n_gens; i++) {
}
return res;
}
// Update the _full_collections_completed counter
// at the end of a stop-world full GC.
"Can't complete more collections than were started");
ml.notify_all();
return _full_collections_completed;
}
// Update the _full_collections_completed counter, as appropriate,
// at the end of a concurrent GC cycle. Note the conditional update
// below to allow this method to be called by a concurrent collector
// without synchronizing in any manner with the VM thread (which
// may already have initiated a STW full collection "concurrently").
(count <= _total_full_collections),
"Can't complete more collections than were started");
if (count > _full_collections_completed) {
ml.notify_all();
}
return _full_collections_completed;
}
#ifndef PRODUCT
// Override of memory state checking method in CollectedHeap:
// Some collectors (CMS for example) can't have badHeapWordVal written
// in the first two words of an object. (For instance , in the case of
// CMS these words hold state used to synchronize between certain
// (concurrent) GC steps and direct allocating mutators.)
// The skip_header_HeapWords() method below, allows us to skip
// over the requisite number of HeapWord's. Note that (for
// generational collectors) this means that those many words are
// skipped in each object, irrespective of the generation in which
// that object lives. The resultant loss of precision seems to be
// harmless and the pain of avoiding that imprecision appears somewhat
// higher than we are prepared to pay for such rudimentary debugging
// support.
if (CheckMemoryInitialization && ZapUnusedHeapArea) {
// We are asked to check a size in HeapWords,
// but the memory is mangled in juint words.
"Found non badHeapWordValue in pre-allocation check");
}
}
}
#endif
bool is_tlab,
bool first_only) {
for (int i = 0; i < _n_gens; i++) {
else if (first_only) break;
}
}
// Otherwise...
return NULL;
}
bool* gc_overhead_limit_was_exceeded) {
false /* is_tlab */,
}
}
return UseConcMarkSweepGC &&
}
bool clear_all_soft_refs,
bool is_tlab,
int max_level) {
bool prepared_for_verification = false;
"incorrect thread type capability");
"the requesting thread should have the Heap_lock");
if (GC_locker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
}
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
{
int starting_level = 0;
if (full) {
// Search for the oldest generation which will collect all younger
// generations, and start collection loop there.
for (int i = max_level; i >= 0; i--) {
if (_gens[i]->full_collects_younger_generations()) {
starting_level = i;
break;
}
}
}
bool must_restore_marks_for_biased_locking = false;
for (int i = starting_level; i <= max_level; i++) {
if (!complete) {
// The full_collections increment was missed above.
}
}
// Timer for individual generations. Last argument is false: no CR
// FIXME: We should try to start the timing earlier to cover more of the GC pause
// Must be done anew before each collection because
// a previous collection will do mangling and will
// change top of some spaces.
i,
}
if (VerifyBeforeGC && i >= VerifyGCLevel &&
total_collections() >= VerifyGCStartAt) {
if (!prepared_for_verification) {
prepared_for_verification = true;
}
}
_gens[i]->performs_in_place_marking()) {
// We perform this mark word preservation work lazily
// because it's only at this point that we know whether we
// absolutely have to do it; we want to avoid doing it for
// scavenge-only collections where it's unnecessary
}
// Do collection work
{
// Note on ref discovery: For what appear to be historical reasons,
// GCH enables and disabled (by enqueing) refs discovery.
// In the future this should be moved into the generation's
// collect method so that ref discovery and enqueueing concerns
// are local to a generation. The collect method could return
// an appropriate indication in the case that notification on
// the ref lock was needed. This will make the treatment of
// weak refs more uniform (and indeed remove such concerns
// from GCH). XXX
save_marks(); // save marks for all gens
// We want to discover references, but not process them yet.
// This mode is disabled in process_discovered_references if the
// generation does some collection work, or in
// enqueue_discovered_references if the generation returns
// without doing any work.
// If the discovery of ("weak") refs in this generation is
// atomic wrt other collectors in this configuration, we
// are guaranteed to have empty discovered ref lists.
if (rp->discovery_is_atomic()) {
} else {
// collect() below will enable discovery as appropriate
}
if (!rp->enqueuing_is_done()) {
} else {
rp->set_enqueuing_is_done(false);
}
}
max_level_collected = i;
// Determine if allocation request was met.
if (size > 0) {
size = 0;
}
}
}
update_gc_stats(i, full);
if (VerifyAfterGC && i >= VerifyGCLevel &&
total_collections() >= VerifyGCStartAt) {
}
if (PrintGCDetails) {
}
}
}
// Update "complete" boolean wrt what actually transpired --
// for instance, a promotion failure could have led to
// a whole heap collection.
if (complete) { // We did a "major" collection
// FIXME: See comment at pre_full_gc_dump call
}
if (PrintGCDetails) {
// Print perm gen info for full GC with PrintGCDetails flag.
if (complete) {
}
}
for (int j = max_level_collected; j >= 0; j -= 1) {
// Adjust generation sizes.
_gens[j]->compute_new_size();
}
if (complete) {
// Ask the permanent generation to adjust size for full collections
perm()->compute_new_size();
}
// Track memory usage and detect low memory after GC finishes
}
}
#ifdef TRACESPINNING
#endif
}
}
}
void GenCollectedHeap::
bool younger_gens_as_roots,
bool activate_scope,
bool collecting_perm_gen,
bool do_code_roots,
// General strong roots.
if (!do_code_roots) {
} else {
}
if (younger_gens_as_roots) {
for (int i = 0; i < level; i++) {
}
}
}
// When collection is parallel, all threads get to cooperate to do
// older-gen scanning.
}
}
// "Local" "weak" refs
for (int i = 0; i < _n_gens; i++) {
}
}
void GenCollectedHeap:: \
OopClosureType* cur, \
OopClosureType* older) { \
} \
}
if (!_gens[i]->no_allocs_since_save_marks()) return false;
}
return perm_gen()->no_allocs_since_save_marks();
}
return _gens[0]->supports_inline_contig_alloc();
}
}
}
return _gens[0]->unsafe_max_alloc_nogc();
}
// public collection interfaces
if (should_do_concurrent_full_gc(cause)) {
#ifndef SERIALGC
// mostly concurrent full collection
#else // SERIALGC
#endif // SERIALGC
} else {
#ifdef ASSERT
// minor collection only
} else {
// Stop-the-world full collection
}
#else
// Stop-the-world full collection
#endif
}
}
// The caller doesn't have the Heap_lock
}
// This interface assumes that it's being called by the
// vm thread. It collects the heap assuming that the
// heap lock is already held and that we are executing in
// the context of the vm thread.
switch (cause) {
case GCCause::_heap_inspection:
case GCCause::_heap_dump: {
do_full_collection(false, // don't clear all soft refs
n_gens() - 1);
break;
}
default: // XXX FIX ME
ShouldNotReachHere(); // Unexpected use of this function
}
}
// The caller has the Heap_lock
}
// this is the private collection interface
// The Heap_lock is expected to be held on entry.
if (_preloading_shared_classes) {
}
// Read the GC count while holding the Heap_lock
{
}
}
#ifndef SERIALGC
"Unexpected generation kinds");
// Skip two header words in the block content verification
if (collector) {
delete collector; // Be nice in embedded situation
}
vm_shutdown_during_initialization("Could not create CMS collector");
return false;
}
return true; // success
}
// Read the GC counts while holding the Heap_lock
{
}
}
#endif // SERIALGC
int max_level) {
int local_max_level;
if (!incremental_collection_will_fail(false /* don't consult_young */) &&
local_max_level = 0;
} else {
}
do_collection(true /* full */,
clear_all_soft_refs /* clear_all_soft_refs */,
0 /* size */,
false /* is_tlab */,
local_max_level /* max_level */);
// Hack XXX FIX ME !!!
// A scavenge may not have been attempted, or may have
// been attempted and failed, because the old gen was too full
incremental_collection_will_fail(false /* don't consult_young */)) {
if (PrintGCDetails) {
"because scavenge failed");
}
// This time allow the old gen to be collected as well
do_collection(true /* full */,
clear_all_soft_refs /* clear_all_soft_refs */,
0 /* size */,
false /* is_tlab */,
}
}
return result;
}
// Returns "TRUE" iff "p" points into the committed areas of the heap.
#ifndef ASSERT
#endif
// This might be sped up with a cache of the last generation that
// answered yes.
for (int i = 0; i < _n_gens; i++) {
}
// Otherwise...
return false;
}
#ifdef ASSERT
// Don't implement this by using is_in_young(). This method is used
// in some cases to check that is_in_young() is correct.
"Does not work if address is non-null and outside of the heap");
// The order of the generations is young (low addr), old, perm (high addr)
}
#endif
for (int i = 0; i < _n_gens; i++) {
}
}
for (int i = 0; i < _n_gens; i++) {
}
}
for (int i = 0; i < _n_gens; i++) {
}
}
for (int i = 0; i < _n_gens; i++) {
}
}
for (int i = 0; i < _n_gens; i++) {
}
}
for (int i = 0; i < _n_gens; i++) {
}
// Otherwise...
assert(false, "Could not find containing space");
return NULL;
}
for (int i = 0; i < _n_gens; i++) {
"addr should be in allocated part of generation");
}
}
"addr should be in allocated part of perm gen");
}
assert(false, "Some generation should contain the address");
return NULL;
}
for (int i = 0; i < _n_gens; i++) {
"addr should be in allocated part of generation");
}
}
"addr should be in allocated part of perm gen");
}
assert(false, "Some generation should contain the address");
return 0;
}
for (int i = 0; i < _n_gens; i++) {
}
}
}
assert(false, "Some generation should contain the address");
return false;
}
for (int i = 0; i < _n_gens; i += 1) {
if (_gens[i]->supports_tlab_allocation()) {
return true;
}
}
return false;
}
for (int i = 0; i < _n_gens; i += 1) {
if (_gens[i]->supports_tlab_allocation()) {
}
}
return result;
}
for (int i = 0; i < _n_gens; i += 1) {
if (_gens[i]->supports_tlab_allocation()) {
}
}
return result;
}
true /* is_tlab */,
}
// Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
// from the list headed by "*prev_ptr".
bool first = true;
while (cur) {
first = false;
}
}
smallest = *smallest_ptr;
return smallest;
}
// Sort the scratch block list headed by res into decreasing size order,
// and set "res" to the result.
while (unsorted) {
}
}
for (int i = 0; i < _n_gens; i++) {
}
return res;
}
for (int i = 0; i < _n_gens; i++) {
_gens[i]->reset_scratch();
}
}
}
};
ensure_parsability(false); // no need to retire TLABs
generation_iterate(&blk, false);
perm_gen()->prepare_for_verify();
}
bool old_to_young) {
if (old_to_young) {
for (int i = _n_gens-1; i >= 0; i--) {
}
} else {
for (int i = 0; i < _n_gens; i++) {
}
}
}
for (int i = 0; i < _n_gens; i++) {
}
}
for (int i = 0; i < _n_gens; i++) { // skip perm gen
if (!_gens[i]->is_maximal_no_gc()) {
return false;
}
}
return true;
}
for (int i = 0; i < _n_gens; i++) {
_gens[i]->save_marks();
}
perm_gen()->save_marks();
}
for (int i = 0; i <= collectedGen; i++) {
_gens[i]->compute_new_size();
}
}
return _gch;
}
// Start by compacting into same gen.
while (scanning_gen != NULL) {
}
}
}
if (!silent) {
}
for (int i = _n_gens-1; i >= 0; i--) {
Generation* g = _gens[i];
if (!silent) {
}
g->verify();
}
if (!silent) {
}
}
for (int i = 0; i < _n_gens; i++) {
}
}
}
#ifndef SERIALGC
if (UseConcMarkSweepGC) {
}
#endif // SERIALGC
}
#ifndef SERIALGC
if (UseParNewGC) {
}
if (UseConcMarkSweepGC) {
}
#endif // SERIALGC
}
if (TraceGen0Time) {
get_gen(0)->print_summary_info();
}
if (TraceGen1Time) {
}
}
if (PrintGCDetails && Verbose) {
"->" SIZE_FORMAT
} else {
}
}
//New method to print perm gen info with PrintGCDetails flag
}
private:
bool _full;
public:
}
};
always_do_update_barrier = false;
// Fill TLAB's and such
ensure_parsability(true); // retire TLABs
// Call allocation profiler
// Walk generations
};
private:
bool _full;
public:
}
};
#ifdef COMPILER2
#endif /* COMPILER2 */
if (!CleanChunkPoolAsync) {
}
};
#ifndef PRODUCT
private:
public:
gen->record_spaces_top();
}
};
if (ZapUnusedHeapArea) {
perm_gen()->record_spaces_top();
}
}
#endif // not PRODUCT
public:
}
};
generation_iterate(&ep_cl, false);
perm_gen()->ensure_parsability();
}
// First give each higher generation a chance to allocate the promoted object.
do {
}
// Then give gen and higher generations a chance to expand and allocate the
// object.
do {
}
}
}
public:
}
};
// We need a monotonically non-deccreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
// iterate over generations getting the oldest
// time that a generation was collected
generation_iterate(&tolgc_cl, false);
// javaTimeNanos() is guaranteed to be monotonically non-decreasing
// provided the underlying platform provides such a time source
// (and it is bug free). So we still have to guard against getting
// back a time later than 'now'.
if (retVal < 0) {
return 0;
}
return retVal;
}