/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genMarkSweep.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/vmThread.hpp"
#include "services/memoryService.hpp"
#include "services/runtimeService.hpp"
// statics
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
// We split use of the CGC_lock into 2 "levels".
// The low-level locking is of the usual CGC_lock monitor. We introduce
// a higher level "token" (hereafter "CMS token") built on top of the
// low level monitor (hereafter "CGC lock").
// The token-passing protocol gives priority to the VM thread. The
// CMS-lock doesn't provide any fairness guarantees, but clients
// should ensure that it is only held for very short, bounded
// durations.
//
// When either of the CMS thread or the VM thread is involved in
// collection operations during which it does not want the other
// thread to interfere, it obtains the CMS token.
//
// If either thread tries to get the token while the other has
// it, that thread waits. However, if the VM thread and CMS thread
// both want the token, then the VM thread gets priority while the
// CMS thread waits. This ensures, for instance, that the "concurrent"
// phases of the CMS thread's work do not block out the VM thread
// for long periods of time as the CMS thread continues to hog
// the token. (See bug 4616232).
//
// The baton-passing functions are, however, controlled by the
// flags _foregroundGCShouldWait and _foregroundGCIsActive,
// and here the low-level CMS lock, not the high level token,
// ensures mutual exclusion.
//
// Two important conditions that we have to satisfy:
// 1. if a thread does a low-level wait on the CMS lock, then it
// relinquishes the CMS token if it were holding that token
// when it acquired the low-level CMS lock.
// 2. any low-level notifications on the low-level lock
// should only be sent when a thread has relinquished the token.
//
// In the absence of either property, we'd have potential deadlock.
//
// We protect each of the CMS (concurrent and sequential) phases
// with the CMS _token_, not the CMS _lock_.
//
// The only code protected by CMS lock is the token acquisition code
// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
// baton-passing code.
//
// Unfortunately, i couldn't come up with a good abstraction to factor and
// hide the naked CGC_lock manipulation in the baton-passing code
// further below. That's something we should try to do. Also, the proof
// of correctness of this 2-level locking scheme is far from obvious,
// and potentially quite slippery. We have an uneasy supsicion, for instance,
// that there may be a theoretical possibility of delay/starvation in the
// potential intereference with the priority scheme embodied in the
// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
// invocation further below and marked with "XXX 20011219YSR".
// Indeed, as we note elsewhere, this may become yet more slippery
private:
bool _is_cms_thread;
public:
"Incorrect argument to constructor");
}
~CMSTokenSync() {
"Incorrect state");
}
};
// Convenience class that does a CMSTokenSync, and then acquires
// upto three locks.
private:
// Note: locks are acquired in textual declaration order
// and released in the opposite order
public:
{ }
};
// Wrapper class to temporarily disable icms during a foreground cms collection.
public:
// The ctor disables icms and wakes up the thread so it notices the change;
// the dtor re-enables icms. Note that the CMSCollector methods will check
// CMSIncrementalMode.
};
//////////////////////////////////////////////////////////////////
// Concurrent Mark-Sweep Generation /////////////////////////////
//////////////////////////////////////////////////////////////////
// This struct contains per-thread things necessary to support parallel
// young-gen collection.
public:
// Constructor.
}
};
{
_numObjectsPromoted = 0;
_numWordsPromoted = 0;
_numObjectsAllocated = 0;
_numWordsAllocated = 0;
)
"CompactibleFreeListSpace allocation failure");
}
_gc_stats = new CMSGCStats();
// Verify the assumption that FreeChunk::_prev and OopDesc::_klass
// offsets match. The ability to tell free chunks from objects
// depends on this property.
"Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc");
)
if (CollectedHeap::use_parallel_gc_threads()) {
if (_par_gc_thread_states == NULL) {
vm_exit_during_initialization("Could not allocate par gc structs");
}
for (uint i = 0; i < ParallelGCThreads; i++) {
if (_par_gc_thread_states[i] == NULL) {
vm_exit_during_initialization("Could not allocate par gc structs");
}
}
} else {
}
_incremental_collection_failed = false;
// The "dilatation_factor" is the expansion that can occur on
// account of the fact that the minimum object size in the CMS
// generation may be larger than that in, say, a contiguous young
// generation.
// Ideally, in the calculation below, we'd compute the dilatation
// factor as: MinChunkSize/(promoting_gen's min object size)
// Since we do not have such a general query interface for the
// promoting generation, we'll instead just use the mimimum
// object size (which today is a header's worth of space);
// note that all arithmetic is in units of HeapWords.
}
// The field "_initiating_occupancy" represents the occupancy percentage
// at which we trigger a new collection cycle. Unless explicitly specified
// via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
// is calculated by:
//
// Let "f" be MinHeapFreeRatio in
//
// _intiating_occupancy = 100-f +
// f * (CMSTrigger[Perm]Ratio/100)
// where CMSTrigger[Perm]Ratio is the argument "tr" below.
//
// That is, if we assume the heap is at its desired maximum occupancy at the
// end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
// space be allocated before initiating a new collection cycle.
//
if (io >= 0) {
} else {
/ 100.0;
}
}
collector()->ref_processor_init();
}
if (_ref_processor == NULL) {
// Allocate and initialize a reference processor
(int) ParallelGCThreads, // mt processing degree
&_is_alive_closure, // closure for liveness info
false); // next field updates do not need write barrier
// Initialize the _ref_processor field of CMSGen
// Allocate a dummy ref processor for perm gen.
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
}
}
}
"Wrong type of heap");
"Wrong type of size policy");
return sp;
}
"Wrong gc policy counter kind");
return results;
}
// Generation Counters - generation 1, 1 subspace
this, _gen_counters);
}
{
// Initialize the alphas to the bootstrap value of 100.
_gc0_duration = 0.0;
_gc0_period = 0.0;
_gc0_promoted = 0;
_cms_duration = 0.0;
_cms_period = 0.0;
_cms_allocated = 0;
_cms_used_at_gc0_end = 0;
_allow_duty_cycle_reduction = false;
_valid_bits = 0;
}
// TBD: CR 6909490
return 1.0;
}
}
// If promotion failure handling is on use
// the padded average size of the promotion for each
// young generation collection.
if (cms_free > expected_promotion) {
// Start a cms collection if there isn't enough space to promote
// for the next minor collection. Use the padded average as
// a safety factor.
// Adjust by the safety factor.
// Apply a further correction factor which tries to adjust
// for recent occurance of concurrent mode failures.
if (PrintGCDetails && Verbose) {
}
// Add 1 in case the consumption rate goes to zero.
}
return 0.0;
}
// Compare the duration of the cms collection to the
// time remaining before the cms generation is empty.
// Note that the time from the start of the cms collection
// to the start of the cms sweep (less than the total
// duration of the cms collection) can be used. This
// has been tried and some applications experienced
// promotion failures early in execution. This was
// possibly because the averages were not accurate
// enough at the beginning.
// We add "gc0_period" to the "work" calculation
// below because this query is done (mostly) at the
// end of a scavenge, so we need to conservatively
// account for that much possible delay
// in the query so as to avoid concurrent mode failures
// due to starting the collection just a wee bit too
// late.
// If a concurrent mode failure occurred recently, we want to be
// more conservative and halve our expected time_until_cms_gen_full()
if (Verbose && PrintGCDetails) {
" CMSCollector: collect because of anticipated promotion "
"before full %3.7f + %3.7f > %3.7f ", cms_duration(),
}
return 0.0;
}
}
// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
// amount of change to prevent wild oscillation.
unsigned int new_duty_cycle) {
// Note: use subtraction with caution since it may underflow (values are
// unsigned). Addition is safe since we're in the range 0-100.
if (new_duty_cycle < old_duty_cycle) {
}
} else if (new_duty_cycle > old_duty_cycle) {
}
}
if (CMSTraceIncrementalPacing) {
}
return damped_duty_cycle;
}
"should be handled in icms_update_duty_cycle()");
// Avoid division by 0.
if (new_duty_cycle > _icms_duty_cycle) {
// Avoid very small duty cycles (1 or 2); 0 is allowed.
if (new_duty_cycle > 2) {
}
} else if (_allow_duty_cycle_reduction) {
// The duty cycle is reduced only once per cms cycle (see record_cms_end()).
// Respect the minimum duty cycle.
}
if (PrintGCDetails || CMSTraceIncrementalPacing) {
}
_allow_duty_cycle_reduction = false;
return _icms_duty_cycle;
}
#ifndef PRODUCT
cms_period(), cms_allocated());
if (CMSIncrementalMode) {
}
if (valid()) {
}
}
#endif // #ifndef PRODUCT
_abort_preclean(false),
_start_sampling(false),
_between_prologue_and_epilogue(false),
-1 /* lock-free */, "No_lock" /* dummy */),
// Adjust my span to cover old (cms) gen and perm gen
// Construct the is_alive_closure with _span & markBitMap
_eden_chunk_capacity(0), // -- ditto --
_eden_chunk_index(0), // -- ditto --
_survivor_chunk_capacity(0), // -- ditto --
_survivor_chunk_index(0), // -- ditto --
_ser_kac_ovflw(0),
_par_kac_ovflw(0),
#ifndef PRODUCT
_num_par_pushes(0),
#endif
_verifying(false),
_completed_initialization(false),
_should_unload_classes(false),
_cms_start_registered(false)
{
ExplicitGCInvokesConcurrent = true;
}
// Now expand the span and allocate the collection support structures
// (MUT, marking bit map etc.) to cover both generations subject to
// collection.
// First check that _permGen is adjacent to _cmsGen and above it.
"generations should not be of zero size");
"_cmsGen and _permGen should not overlap");
"_cmsGen->end() different from _permGen->start()");
// For use by dirty card to oop closures.
// Allocate MUT and marking bit map
{
warning("Failed to allocate CMS Bit Map");
return;
}
}
{
}
warning("Failed to allocate CMS Marking Stack");
return;
}
warning("Failed to allocate CMS Revisit Stack");
return;
}
// Support for multi-threaded concurrent phases
if (CMSConcurrentMTEnabled) {
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
// just for now
}
if (ConcGCThreads > 1) {
ConcGCThreads, true);
if (_conc_workers == NULL) {
"forcing -CMSConcurrentMTEnabled");
CMSConcurrentMTEnabled = false;
} else {
}
} else {
CMSConcurrentMTEnabled = false;
}
}
if (!CMSConcurrentMTEnabled) {
ConcGCThreads = 0;
} else {
// Turn off CMSCleanOnEnter optimization temporarily for
// the MT case where it's not fixed yet; see 6178663.
CMSCleanOnEnter = false;
}
"Inconsistency");
// Parallel task queues; these are shared for the
// concurrent and stop-world phases of CMS, but
// are not shared with parallel scavenge (ParNew).
{
uint i;
&& num_queues > 0) {
if (_task_queues == NULL) {
warning("task_queues allocation failure.");
return;
}
if (_hash_seed == NULL) {
warning("_hash_seed array allocation failure");
return;
}
for (i = 0; i < num_queues; i++) {
PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
if (q == NULL) {
warning("work_queue allocation failure.");
return;
}
_task_queues->register_queue(i, q);
}
for (i = 0; i < num_queues; i++) {
}
}
}
// Clip CMSBootstrapOccupancy between 0 and 100.
/(double)100;
// Now tell CMS generations the identity of their collector
// Create & start a CMS thread for this CMS collector
"CMS Thread should refer to this gen");
// Support for parallelizing young gen rescan
if (gch->supports_inline_contig_alloc()) {
_eden_chunk_index = 0;
if (_eden_chunk_array == NULL) {
_eden_chunk_capacity = 0;
}
}
// Support for parallelizing survivor space rescan
if (_survivor_plab_array != NULL) {
}
if (_survivor_chunk_array != NULL) {
}
}
} else {
for (uint i = 0; i < ParallelGCThreads; i++) {
warning("Failed to allocate survivor plab array");
for (int j = i; j > 0; j--) {
}
break;
} else {
}
}
}
}
&& _survivor_chunk_array != NULL)
|| ( _survivor_chunk_capacity == 0
&& _survivor_chunk_index == 0),
"Error");
// Choose what strong roots should be scanned depending on verification options
// and perm gen collection mode.
if (!CMSClassUnloadingEnabled) {
// If class unloading is disabled we want to include all classes into the root set.
} else {
}
_completed_initialization = true;
}
return "concurrent mark-sweep generation";
}
if (UsePerfData) {
}
}
// this is an optimized version of update_counters(). it takes the
// used value as a parameter rather than computing it.
//
if (UsePerfData) {
}
}
Generation::print();
}
#ifndef PRODUCT
cmsSpace()->printFLCensus(0);
}
#endif
if (PrintGCDetails) {
if (Verbose) {
} else {
}
}
if (Verbose) {
} else {
}
}
// dld proposes an improvement in precision here. If the committed
// part of the space ends in a free block we should add that to
// uncommitted size in the calculation below. Will make this
// change later, staying with the approximation below for the
// time being. -- ysr.
}
}
}
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
if (Verbose && PrintGCDetails) {
}
return res;
}
// At a promotion failure dump information on block layout in heap
// (cms old generation).
if (CMSDumpAtPromotionFailure) {
}
}
return _cmsSpace;
}
// Clear the promotion information. These pointers can be adjusted
// along with all the other pointers into the heap but
// compaction is expected to be a rare event with
// a heap using cms so don't do it without seeing the need.
if (CollectedHeap::use_parallel_gc_threads()) {
for (uint i = 0; i < ParallelGCThreads; i++) {
}
}
}
}
// If incremental collection failed, we just want to expand
// to the limit.
if (incremental_collection_failed()) {
return;
}
// compute expansion delta needed for reaching desired free percentage
if (free_percentage < desired_free_percentage) {
}
if (expand_bytes > 0) {
if (PrintGCDetails && Verbose) {
desired_capacity/1000);
if (prev_level >= 0) {
prev_size/1000);
}
unsafe_max_alloc_nogc()/1000);
contiguous_available()/1000);
}
// safe if expansion fails
if (PrintGCDetails && Verbose) {
}
}
}
return cmsSpace()->freelistLock();
}
bool tlab) {
}
bool tlab /* ignored */) {
// Allocate the object live (grey) if the background collector has
// started marking. This is necessary because the marker may
// have passed this address and consequently this object will
// not otherwise be greyed and would be incorrectly swept up.
// Note that if this object contains references, the writing
// of those references will dirty the card containing this object
// allowing the object to be blackened (and its references scanned)
// either during a preclean phase or at the final checkpoint.
// We may block here with an uninitialized object with
// its mark-bit or P-bits not yet set. Such objects need
// to be safely navigable by block_start().
// allocation counters
_numWordsAllocated += (int)adjustedSize;
)
}
return res;
}
// In the case of direct allocation by mutators in a generation that
// is being concurrently collected, the object must be allocated
// live (grey) if the background collector has started marking.
// This is necessary because the marker may
// have passed this address and consequently this object will
// not otherwise be greyed and would be incorrectly swept up.
// Note that if this object contains references, the writing
// of those references will dirty the card containing this object
// allowing the object to be blackened (and its references scanned)
// either during a preclean phase or at the final checkpoint.
if (_collectorState >= Marking) {
// [see comments preceding SweepClosure::do_blk() below for details]
// 1. need to mark the object as live so it isn't collected
// 2. need to mark the 2nd bit to indicate the object may be uninitialized
// 3. need to mark the end of the object so marking, precleaning or sweeping
// can skip over uninitialized or unparsable objects. An allocated
// object is considered uninitialized for our purposes as long as
// its klass word is NULL. (Unparsable objects are those which are
// initialized in the sense just described, but whose sizes can still
// not be correctly determined. Note that the class of unparsable objects
// can only occur in the perm gen. All old gen objects are parsable
// as soon as they are initialized.)
// mark end of object
}
// check that oop looks uninitialized
}
// See comment in direct_allocated() about when objects should
// be allocated live.
if (_collectorState >= Marking) {
// we already hold the marking bit map lock, taken in
// the prologue
if (par) {
} else {
}
// We don't need to mark the object as uninitialized (as
// in direct_allocated above) because this is being done with the
// world stopped and the object will be initialized by the
// time the marking, precleaning or sweeping get to look at it.
// But see the code for copying objects into the CMS generation,
// where we need to ensure that concurrent readers of the
// block offset table are able to safely navigate a block that
// is in flux from being free to being allocated (and in
// transition while being copied into) and subsequently
"expect promotion only at safepoints");
if (_collectorState < Sweeping) {
// Mark the appropriate cards in the modUnionTable, so that
// this object gets scanned before the sweep. If this is
// not done, CMS generation references in the object might
// not get marked.
// For the case of arrays, which are otherwise precisely
// marked, we need to dirty the entire array, not just its head.
if (is_obj_array) {
// The [par_]mark_range() method expects mr.end() below to
// be aligned to the granularity of a bit's representation
// in the heap. In the case of the MUT below, that's a
// card size.
if (par) {
} else {
}
} else { // not an obj array; we can just mark the head
if (par) {
} else {
}
}
}
}
}
{
}
{
if (CMSTraceIncrementalPacing) {
}
if (duty_cycle != 0) {
// The duty_cycle is a percentage between 0 and 100; convert to words and
// then compute the offset from the endpoints of the space.
// The limits may be adjusted (shifted to the right) by
// CMSIncrementalOffset, to allow the application more mutator time after a
// young gen gc (when all mutators were stopped) and before CMS starts and
// takes away one or more cpus.
if (CMSIncrementalOffset != 0) {
}
}
}
}
// Install the new start limit.
if (CMSTraceIncrementalMode) {
if (Verbose) {
}
}
}
// Any changes here should try to maintain the invariant
// that if this method is called with _icms_start_limit
// and _icms_stop_limit both NULL, then it should return NULL
// and not notify the icms thread.
{
// A start_limit equal to end() means the duty cycle is 0, so treat that as a
// nop.
if (top <= _icms_start_limit) {
if (CMSTraceIncrementalMode) {
gclog_or_tty->stamp();
", new limit=" PTR_FORMAT
}
return _icms_stop_limit;
}
// The allocation will cross both the _start and _stop limits, so do the
// stop notification also and return end().
if (CMSTraceIncrementalMode) {
gclog_or_tty->stamp();
", new limit=" PTR_FORMAT
}
}
if (top <= _icms_stop_limit) {
if (CMSTraceIncrementalMode) {
gclog_or_tty->stamp();
", new limit=" PTR_FORMAT
}
}
if (CMSTraceIncrementalMode) {
gclog_or_tty->stamp();
", new limit=" PTR_FORMAT,
}
}
return NULL;
}
// allocate, copy and if necessary update promoinfo --
// delegate to underlying space.
#ifndef PRODUCT
return NULL;
}
#endif // #ifndef PRODUCT
// expand and retry
// Since there's currently no next generation, we don't try to promote
// into a more senior generation.
"is made to pass on a possibly failing "
"promotion to next generation");
}
// See comment in allocate() about when objects should
// be allocated live.
// promotion counters
)
}
return res;
}
{
}
// IMPORTANT: Notes on object size recognition in CMS.
// ---------------------------------------------------
// A block of storage in the CMS generation is always in
// one of three states. A free block (FREE), an allocated
// object (OBJECT) whose size() method reports the correct size,
// and an intermediate state (TRANSIENT) in which its size cannot
// be accurately determined.
// STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
// -----------------------------------------------------
// FREE: klass_word & 1 == 1; mark_word holds block size
//
// OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
// obj->size() computes correct size
// [Perm Gen objects needs to be "parsable" before they can be navigated]
//
// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
//
// STATE IDENTIFICATION: (64 bit+COOPS)
// ------------------------------------
// FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
//
// OBJECT: klass_word installed; klass_word != 0;
// obj->size() computes correct size
// [Perm Gen comment above continues to hold]
//
// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
//
//
// STATE TRANSITION DIAGRAM
//
// mut / parnew mut / parnew
// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
// ^ |
// |------------------------ DEAD <------------------------------------|
// sweep mut
//
// While a block is in TRANSIENT state its size cannot be determined
// so readers will either need to come back later or stall until
// the size can be determined. Note that for the case of direct
// allocation, P-bits, when available, may be used to determine the
// size of an object that may not yet have been initialized.
// Things to support parallel young-gen collection.
#ifndef PRODUCT
return NULL;
}
#endif // #ifndef PRODUCT
// if we are tracking promotions, then first ensure space for
// promotion (including spooling space for saving header if necessary).
// then allocate and copy, then track promoted info if needed.
// When tracking (see PromotionInfo::track()), the mark word may
// be displaced and in this case restoration of the mark word
// occurs in the (oop_since_save_marks_)iterate phase.
// Out of space for allocating spooling buffers;
// try expanding and allocating spooling buffers.
return NULL;
}
}
return NULL;
}
}
// IMPORTANT: See note on object initialization for CMS above.
// Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object.
// Except with compressed oops it's the mark word.
// Restore the mark word copied above.
if (UseCompressedOops) {
// Copy gap missed by (aligned) header size calculation below
}
}
// Now we can track the promoted object, if necessary. We take care
// to delay the transition from uninitialized to full object
// (i.e., insertion of klass pointer) until after, so that it
// atomically becomes a promoted object.
}
// Finally, install the klass pointer (this should be volatile).
// We should now be able to calculate the right size for this object
assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
)
return obj;
}
void
// CMS does not support promotion undo.
}
void
par_promote_alloc_done(int thread_num) {
}
void
}
// XXXPERM
bool tlab)
{
// We allow a STW collection only if a full
// collection was requested.
// This and promotion failure handling are connected at the
// hip and should be fixed by untying them.
}
bool CMSCollector::shouldConcurrentCollect() {
if (_full_gc_requested) {
if (Verbose && PrintGCDetails) {
" gc request (or gc_locker)");
}
return true;
}
// For debugging purposes, change the type of collection.
// If the rotation is not on the concurrent collection
// type, don't start a concurrent collection.
if (RotateCMSCollectionTypes &&
(_cmsGen->debug_collection_type() !=
"Bad cms collection type");
return false;
}
)
FreelistLocker x(this);
// ------------------------------------------------------------------
// Print out lots of information which affects the initiation of
// a collection.
gclog_or_tty->stamp();
stats().time_until_cms_gen_full());
}
// ------------------------------------------------------------------
// If the estimated time to complete a cms collection (cms_duration())
// is less than the estimated time remaining until the cms generation
// is full, start a collection.
if (!UseCMSInitiatingOccupancyOnly) {
return true;
}
} else {
// We want to conservatively collect somewhat early in order
// this branch will not fire after the first successful CMS
// collection because the stats should then be valid.
if (Verbose && PrintGCDetails) {
" CMSCollector: collect for bootstrapping statistics:"
}
return true;
}
}
}
// Otherwise, we start a collection cycle if either the perm gen or
// old gen want a collection cycle started. Each may use
// an appropriate criterion for making this decision.
// XXX We need to make sure that the gen expansion
// criterion dovetails well with this. XXX NEED TO FIX THIS
if (_cmsGen->should_concurrent_collect()) {
if (Verbose && PrintGCDetails) {
}
return true;
}
// We start a collection if we believe an incremental collection may fail;
// this is not likely to be productive in practice because it's probably too
// late anyway.
"You may want to check the correctness of the following");
if (Verbose && PrintGCDetails) {
}
return true;
}
bool res = update_should_unload_classes();
if (res) {
if (Verbose && PrintGCDetails) {
}
return true;
}
}
return false;
}
// Clear _expansion_cause fields of constituent generations
void CMSCollector::clear_expansion_cause() {
}
// We should be conservative in starting a collection cycle. To
// start too eagerly runs the risk of collecting too often in the
// extreme. To collect too rarely falls back on full collections,
// which works, even if not optimum in terms of concurrent work.
// As a work around for too eagerly collecting, use the flag
// UseCMSInitiatingOccupancyOnly. This also has the advantage of
// giving the user an easily understandable way of controlling the
// collections.
// We want to start a new collection cycle if any of the following
// conditions hold:
// . our current occupancy exceeds the configured initiating occupancy
// for this generation, or
// . we recently needed to expand this space and have not, since that
// expansion, done a collection of this generation, or
// . the underlying space believes that it may be a good idea to initiate
// a concurrent collection (this may be based on criteria such as the
// following: the space uses linear allocation and linear allocation is
// going to fail, or there is believed to be excessive fragmentation in
// the generation, etc... or ...
// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
// the case of the old generation, not the perm generation; see CR 6543076):
// we may be approaching a point at which allocation requests may fail because
// we will be out of sufficient free space given allocation rate estimates.]
bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
if (occupancy() > initiating_occupancy()) {
if (PrintGCDetails && Verbose) {
}
return true;
}
return false;
}
if (PrintGCDetails && Verbose) {
short_name());
}
return true;
}
if (_cmsSpace->should_concurrent_collect()) {
if (PrintGCDetails && Verbose) {
short_name());
}
return true;
}
return false;
}
bool clear_all_soft_refs,
bool tlab)
{
}
bool clear_all_soft_refs,
bool tlab)
{
// For debugging purposes skip the collection if the state
// is not currently idle
if (TraceCMSState) {
}
return;
}
// The following "if" branch is present for defensive reasons.
// In the current uses of this interface, it can be replaced with:
// assert(!GC_locker.is_active(), "Can't be called otherwise");
// But I am not placing that assert here to allow future
// generality in invoking this interface.
// A consistency test for GC_locker
// Skip this foreground collection, instead
// expanding the heap if necessary.
// Need the free list locks for the call to free() in compute_new_size()
return;
}
}
if (gc_count == full_gc_count) {
_full_gc_requested = true;
} else {
}
}
bool CMSCollector::is_external_interruption() {
}
if (is_external_interruption()) {
if (PrintGCDetails) {
}
} else {
if (PrintGCDetails) {
}
}
}
// The foreground and background collectors need to coordinate in order
// to make sure that they do not mutually interfere with CMS collections.
// When a background collection is active,
// the foreground collector may need to take over (preempt) and
// synchronously complete an ongoing collection. Depending on the
// frequency of the background collections and the heap usage
// of the application, this preemption can be seldom or frequent.
// There are only certain
// points in the background collection that the "collection-baton"
// can be passed to the foreground collector.
//
// The foreground collector will wait for the baton before
// starting any part of the collection. The foreground collector
// will only wait at one location.
//
// The background collector will yield the baton before starting a new
// phase of the collection (e.g., before initial marking, marking from roots,
// precleaning, final re-mark, sweep etc.) This is normally done at the head
// of the loop which switches the phases. The background collector does some
// of the phases (initial mark, final re-mark) with the world stopped.
// Because of locking involved in stopping the world,
// the foreground collector should not block waiting for the background
// collector when it is doing a stop-the-world phase. The background
// collector will yield the baton at an additional point just before
// it enters a stop-the-world phase. Once the world is stopped, the
// background collector checks the phase of the collection. If the
// phase has not changed, it proceeds with the collection. If the
// phase has changed, it skips that phase of the collection. See
// the comments on the use of the Heap_lock in collect_in_background().
//
// Variable used in baton passing.
// _foregroundGCIsActive - Set to true by the foreground collector when
// it wants the baton. The foreground clears it when it has finished
// the collection.
// _foregroundGCShouldWait - Set to true by the background collector
// when it is running. The foreground collector waits while
// _foregroundGCShouldWait is true.
// CGC_lock - monitor used to protect access to the above variables
// and to notify the foreground and background collectors.
// _collectorState - current state of the CMS collection.
//
// The foreground collector
// acquires the CGC_lock
// sets _foregroundGCIsActive
// waits on the CGC_lock for _foregroundGCShouldWait to be false
// various locks acquired in preparation for the collection
// are released so as not to block the background collector
// that is in the midst of a collection
// proceeds with the collection
// clears _foregroundGCIsActive
// returns
//
// The background collector in a loop iterating on the phases of the
// collection
// acquires the CGC_lock
// sets _foregroundGCShouldWait
// if _foregroundGCIsActive is set
// clears _foregroundGCShouldWait, notifies _CGC_lock
// waits on _CGC_lock for _foregroundGCIsActive to become false
// and exits the loop.
// otherwise
// proceed with that phase of the collection
// if the phase is a stop-the-world phase,
// yield the baton once more just before enqueueing
// the stop-world CMS operation (executed by the VM thread).
// returns after all phases of the collection are done
//
bool clear_all_soft_refs) {
"shouldn't try to acquire control from self!");
// Start the protocol for acquiring control of the
// collection from the background collector (aka CMS thread).
"VM thread should have CMS token");
// Remember the possibly interrupted state of an ongoing
// concurrent collection
// Signal to a possibly ongoing concurrent collection that
// we want to do a foreground collection.
_foregroundGCIsActive = true;
// Disable incremental mode during a foreground collection.
// release locks and wait for a notify from the background collector
// releasing the locks in only necessary for phases which
// do yields to improve the granularity of the collection.
// We need to lock the Free list lock for the space that we are
// currently collecting.
bitMapLock()->unlock();
{
if (_foregroundGCShouldWait) {
// We are going to be waiting for action for the CMS thread;
// it had better not be gone (for instance at shutdown)!
"CMS thread must be running");
// Wait here until the background collector gives us the go-ahead
// Get a possibly blocked CMS thread going:
// Note that we set _foregroundGCIsActive true above,
// without protection of the CGC_lock.
"Possible deadlock");
while (_foregroundGCShouldWait) {
// wait for notification
// Possibility of delay/starvation here, since CMS token does
// not know to give priority to VM thread? Actually, i think
// there wouldn't be any delay/starvation, but the proof of
// that "fact" (?) appears non-trivial. XXX 20011219YSR
}
}
}
// The CMS_token is already held. Get back the other locks.
"VM thread should have CMS token");
if (TraceCMSState) {
}
// Check if we need to do a compaction, or if not, whether
// we need to start the mark-sweep from scratch.
bool should_compact = false;
bool should_start_over = false;
if (RotateCMSCollectionTypes) {
if (_cmsGen->debug_collection_type() ==
should_compact = true;
} else if (_cmsGen->debug_collection_type() ==
should_compact = false;
}
}
)
if (first_state > Idling) {
}
if (should_compact) {
// If the collection is being acquired from the background
// collector, there may be references on the discovered
// references lists that have NULL referents (being those
// that were concurrently cleared by a mutator) or
// that are no longer active (having been enqueued concurrently
// by the mutator).
// Scrub the list of those references because Mark-Sweep-Compact
// code assumes referents are not NULL and that all discovered
// Reference objects are active.
if (first_state > Idling) {
}
// Has the GC time limit been exceeded?
_cmsGen->max_capacity(),
full,
gch->collector_policy());
} else {
}
// Reset the expansion cause, now that we just completed
// a collection cycle.
_foregroundGCIsActive = false;
return;
}
// Resize the perm generation and the tenured generation
// after obtaining the free list locks for the
// two generations.
void CMSCollector::compute_new_size() {
FreelistLocker z(this);
}
// A work method used by foreground collection to determine
// what type of collection (compacting or not, continuing or fresh)
// it should do.
// NOTE: the intent is to make UseCMSCompactAtFullCollection
// and CMSCompactWhenClearAllSoftRefs the default in the future
// and do away with the flags after a suitable period.
bool clear_all_soft_refs, bool* should_compact,
bool* should_start_over) {
// Normally, we'll compact only if the UseCMSCompactAtFullCollection
// flag is set, and we have either requested a System.gc() or
// the number of full gc's since the last concurrent cycle
// has exceeded the threshold set by CMSFullGCsBeforeCompaction,
// or if an incremental collection has failed
"You may want to check the correctness of the following");
// Inform cms gen if this was due to partial collection failing.
// The CMS gen may use this fact to determine its expansion policy.
"Should have been noticed, reacted to and cleared");
}
*should_start_over = false;
if (clear_all_soft_refs && !*should_compact) {
// We are about to do a last ditch collection attempt
// so it would normally make sense to do a compaction
// to reclaim as much space as possible.
// Default: The rationale is that in this case either
// we are past the final marking phase, in which case
// we'd have to start over, or so little has been done
// that there's little point in saving that work. Compaction
// appears to be the sensible choice in either case.
*should_compact = true;
} else {
// We have been asked to clear all soft refs, but not to
// compact. Make sure that we aren't past the final checkpoint
// phase, for that is where we process soft refs. If we are already
// past that phase, we'll need to redo the refs discovery phase and
// if necessary clear soft refs that weren't previously
// cleared. We do so by remembering the phase in which
// we came in, and if we are past the refs processing
// phase, we'll choose to just redo the mark-sweep
// collection from scratch.
if (_collectorState > FinalMarking) {
// We are past the refs processing phase;
// start over and do a fresh synchronous CMS cycle
reset(false /* == !asynch */);
*should_start_over = true;
} // else we can continue a possibly ongoing current cycle
}
}
}
// A work method used by the foreground collector to do
// a mark-sweep-compact.
"collections passed to foreground collector", _full_gcs_since_conc_gc);
}
// Sample collection interval time and reset for collection pause.
if (UseAdaptiveSizePolicy) {
}
// Temporarily widen the span of the weak reference processing to
// the entire heap.
// Temporarily, clear the "is_alive_non_header" field of the
// reference processor.
// Temporarily make reference _processing_ single threaded (non-MT).
// Temporarily make refs discovery atomic
// Temporarily make reference _discovery_ single threaded (non-MT)
ref_processor()->set_enqueuing_is_done(false);
// If an asynchronous collection finishes, the _modUnionTable is
// all clear. If we are assuming the collection from an asynchronous
// collection, clear the _modUnionTable.
"_modUnionTable should be clear if the baton was not passed");
// We must adjust the allocation statistics being maintained
// in the free list space. We do so by reading and clearing
// the sweep timer and updating the block flux rate estimates below.
if (_inter_sweep_timer.is_active()) {
// Note that we do not use this sample to update the _inter_sweep_estimate.
}
#ifdef ASSERT
* HeapWordSize,
"All the free space should be compacted into one chunk at top");
cms_space->totalSizeInIndexedFreeLists() == 0,
"All the free space should be in a single chunk");
"There should be at most 2 free chunks after compaction");
#endif // ASSERT
"Should have been NULL'd before baton was passed");
reset(false /* == !asynch */);
if (verifying() && !should_unload_classes()) {
}
// Clear any data recorded in the PLAB chunk arrays.
if (_survivor_plab_array != NULL) {
}
// Adjust the per-size allocation stats for the next epoch.
// Restart the "inter sweep timer" for the next epoch.
// Sample collection pause time and reset for collection interval.
if (UseAdaptiveSizePolicy) {
}
// For a mark-sweep-compact, compute_new_size() will be called
// in the heap's do_collection() method.
}
// A work method used by the foreground collector to do
// a mark-sweep, after taking over from a possibly on-going
// concurrent mark-sweep collection.
"collector with count %d",
}
switch (_collectorState) {
case Idling:
// The background GC was not active, or should
// restarted from scratch; start the cycle.
}
// If first_state was not Idling, then a background GC
// was in progress and has now finished. No need to do it
// again. Leave the state as Idling.
break;
case Precleaning:
// In the foreground case don't do the precleaning since
// it is not done concurrently and there is extra work
// required.
}
// For a mark-sweep, compute_new_size() will be called
// in the heap's do_collection() method.
}
void CMSCollector::getFreelistLocks() const {
// Get locks for all free lists in all generations that this
// collector is responsible for
}
void CMSCollector::releaseFreelistLocks() const {
// Release locks for all free lists in all generations that this
// collector is responsible for
}
bool CMSCollector::haveFreelistLocks() const {
// Check locks for all free lists in all generations that this
// collector is responsible for
return true;
}
// A utility class that is used by the CMS collector to
// temporarily "release" the foreground collector from its
// usual obligation to wait for the background collector to
// complete an ongoing phase before proceeding.
class ReleaseForegroundGC: public StackObj {
private:
public:
// allow a potentially blocked foreground collector to proceed
_c->_foregroundGCShouldWait = false;
if (_c->_foregroundGCIsActive) {
}
"Possible deadlock");
}
~ReleaseForegroundGC() {
_c->_foregroundGCShouldWait = true;
}
};
// There are separate collect_in_background and collect_in_foreground because of
// the different locking requirements of the background collector and the
// foreground collector. There was originally an attempt to share
// one "collect" method between the background collector and the foreground
// collector but the if-then-else required made it cleaner to have
// separate methods.
"A CMS asynchronous collection is only allowed on a CMS thread.");
{
FreelistLocker fll(this);
if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
// The foreground collector is active or we're
// not using asynchronous collections. Skip this
// background collection.
return;
} else {
// Reset the expansion cause, now that we are about to begin
// a new cycle.
}
// Decide if we want to enable class unloading as part of the
// ensuing concurrent GC cycle.
_full_gc_requested = false; // acks all outstanding full gc requests
// Signal that we are about to start a collection
}
// Used for PrintGC
}
// The change of the collection state is normally done at this level;
// the exceptions are phases that are executed while the world is
// stopped. For those phases the change of state is done while the
// world is stopped. For baton passing purposes this allows the
// background collector to finish the phase and change state atomically.
// The foreground collector cannot wait on a phase that is done
// while the world is stopped because the foreground collector already
// has the world stopped and would deadlock.
while (_collectorState != Idling) {
if (TraceCMSState) {
}
// The foreground collector
// holds the Heap_lock throughout its collection.
// holds the CMS token (but not the lock)
// except while it is waiting for the background collector to yield.
//
// The foreground collector should be blocked (not for long)
// if the background collector is about to start a phase
// executed with world stopped. If the background
// collector has already started such a phase, the
// foreground collector is blocked waiting for the
// Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
// are executed in the VM thread.
//
// The locking order is
// PendingListLock (PLL) -- if applicable (FinalMarking)
// Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
// CMS token (claimed in
// stop_world_and_do() -->
// safepoint_synchronize() -->
// CMSThread::synchronize())
{
// Check if the FG collector wants us to yield.
CMSTokenSync x(true); // is cms thread
if (waitForForegroundGC()) {
// We yielded to a foreground GC, nothing more to be
// done this round.
"waitForForegroundGC()");
if (TraceCMSState) {
" exiting collection CMS state %d",
}
return;
} else {
// The background collector can run but check to see if the
// foreground collector has done a collection while the
// background collector was waiting to get the CGC_lock
// above. If yes, break so that _foregroundGCShouldWait
// is cleared before returning.
if (_collectorState == Idling) {
break;
}
}
}
"should be waiting");
switch (_collectorState) {
case InitialMarking:
{
ReleaseForegroundGC x(this);
stats().record_cms_begin();
}
// The collector state may be any legal state at this point
// since the background collector may have yielded to the
// foreground collector.
break;
case Marking:
// initial marking in checkpointRootsInitialWork has been completed
if (markFromRoots(true)) { // we were successful
"have changed");
} else {
}
break;
case Precleaning:
if (UseAdaptiveSizePolicy) {
}
// marking from roots in markFromRoots has been completed
preclean();
if (UseAdaptiveSizePolicy) {
}
"Collector state should have changed");
break;
case AbortablePreclean:
if (UseAdaptiveSizePolicy) {
}
if (UseAdaptiveSizePolicy) {
}
"have changed");
break;
case FinalMarking:
{
ReleaseForegroundGC x(this);
}
break;
case Sweeping:
if (UseAdaptiveSizePolicy) {
}
// final marking in checkpointRootsFinal has been completed
sweep(true);
"to Resizing must be done under the free_list_lock");
// Stop the timers for adaptive size policy for the concurrent phases
if (UseAdaptiveSizePolicy) {
}
case Resizing: {
// Sweeping has been completed...
// At this point the background collection has completed.
// Don't move the call to compute_new_size() down
// into code that might be executed if the background
// collection was preempted.
{
ReleaseForegroundGC x(this); // unblock FG collection
CMSTokenSync z(true); // not strictly needed.
if (_collectorState == Resizing) {
} else {
" because the foreground collector has finished the collection");
}
}
break;
}
case Resetting:
// CMS heap resizing has been completed
reset(true);
"have changed");
stats().record_cms_end();
// Don't move the concurrent_phases_end() and compute_new_size()
// calls to here because a preempted background collection
// has it's state set to "Resetting".
break;
case Idling:
default:
break;
}
if (TraceCMSState) {
}
}
// Should this be in gc_epilogue?
{
// Clear _foregroundGCShouldWait and, in the event that the
// foreground collector is waiting, notify it, before
// returning.
_foregroundGCShouldWait = false;
if (_foregroundGCIsActive) {
}
"Possible deadlock");
}
if (TraceCMSState) {
" exiting collection CMS state %d",
}
}
}
if (!_cms_start_registered) {
}
}
_cms_start_registered = true;
}
void CMSCollector::register_gc_end() {
if (_cms_start_registered) {
_cms_start_registered = false;
}
}
void CMSCollector::save_heap_summary() {
}
}
"Foreground collector should be waiting, not executing");
"may only be done by the VM Thread with the world stopped");
"VM thread should have CMS token");
true, NULL);)
if (UseAdaptiveSizePolicy) {
}
if (VerifyBeforeGC &&
}
// Snapshot the soft reference policy to be used in this collection cycle.
bool init_mark_was_synchronous = false; // until proven otherwise
while (_collectorState != Idling) {
if (TraceCMSState) {
}
switch (_collectorState) {
case InitialMarking:
init_mark_was_synchronous = true; // fact to be exploited in re-mark
checkpointRootsInitial(false);
" within checkpointRootsInitial()");
break;
case Marking:
// initial marking in checkpointRootsInitialWork has been completed
if (VerifyDuringGC &&
}
{
bool res = markFromRoots(false);
"have changed");
break;
}
case FinalMarking:
if (VerifyDuringGC &&
}
"have changed within checkpointRootsFinal()");
break;
case Sweeping:
// final marking in checkpointRootsFinal has been completed
if (VerifyDuringGC &&
}
sweep(false);
break;
case Resizing: {
// Sweeping has been completed; the actual resize in this case
// is done separately; nothing to be done in this state.
break;
}
case Resetting:
// The heap has been resized.
if (VerifyDuringGC &&
}
reset(false);
"have changed");
break;
case Precleaning:
case AbortablePreclean:
// Elide the preclean phase
break;
default:
}
if (TraceCMSState) {
}
}
if (UseAdaptiveSizePolicy) {
}
if (VerifyAfterGC &&
}
if (TraceCMSState) {
" exiting collection CMS state %d",
}
}
bool CMSCollector::waitForForegroundGC() {
bool res = false;
"CMS thread should have CMS token");
// Block the foreground collector until the
// background collectors decides whether to
// yield.
_foregroundGCShouldWait = true;
if (_foregroundGCIsActive) {
// The background collector yields to the
// foreground collector and returns a value
// indicating that it has yielded. The foreground
// collector can proceed.
res = true;
_foregroundGCShouldWait = false;
// Get a possibly blocked foreground thread going
if (TraceCMSState) {
}
while (_foregroundGCIsActive) {
}
}
if (TraceCMSState) {
}
return res;
}
// Because of the need to lock the free lists and other structures in
// the collector, common to all the generations that the collector is
// collecting, we need the gc_prologues of individual CMS generations
// delegate to their collector. It may have been simpler had the
// current infrastructure allowed one to call a prologue on a
// collector. In the absence of that we have the generation's
// prologue delegate to the collector, which delegates back
// some "local" work to a worker method in the individual generations
// that it's responsible for collecting, while itself doing any
// work common to all generations it's responsible for. A similar
// comment applies to the gc_epilogue()'s.
// The role of the varaible _between_prologue_and_epilogue is to
// enforce the invocation protocol.
// Call gc_prologue_work() for each CMSGen and PermGen that
// we are responsible for.
// The following locking discipline assumes that we are only called
// when the world is stopped.
// The CMSCollector prologue must call the gc_prologues for the
// "generations" (including PermGen if any) that it's responsible
// for.
"Incorrect thread type for prologue execution");
// We have already been invoked; this is a gc_prologue delegation
// from yet another CMS generation that we are responsible for, just
// ignore it since all relevant work has already been done.
return;
}
// set a bit saying prologue has been called; cleared in epilogue
_between_prologue_and_epilogue = true;
// Claim locks for common data structures, then call gc_prologue_work()
// for each CMSGen and PermGen that we are responsible for.
getFreelistLocks(); // gets free list locks on constituent spaces
// Should call gc_prologue_work() for all cms gens we are responsible for
&& _collectorState < Sweeping;
: &_modUnionClosure;
if (!full) {
stats().record_gc0_begin();
}
}
// Delegate to CMScollector which knows how to coordinate between
// this and any other CMS generations that it is responsible for
// collecting.
}
// This is a "private" interface for use by this generation's CMSCollector.
// Not to be called directly by any other entity (for instance,
// GenCollectedHeap, which calls the "public" gc_prologue method above).
"Should be NULL");
if (registerClosure) {
}
cmsSpace()->gc_prologue();
// Clear stat counters
SIZE_FORMAT" bytes concurrently",
}
_numObjectsAllocated = 0;
_numWordsAllocated = 0;
)
}
// The following locking discipline assumes that we are only called
// when the world is stopped.
"world is stopped assumption");
// Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
// if linear allocation blocks need to be appropriately marked to allow the
// the blocks to be parsable. We also check here whether we need to nudge the
// CMS collector thread to start a new cycle (if it's not already active).
"Incorrect thread type for epilogue execution");
if (!_between_prologue_and_epilogue) {
// We have already been invoked; this is a gc_epilogue delegation
// from yet another CMS generation that we are responsible for, just
// ignore it since all relevant work has already been done.
return;
}
// in case sampling was not already enabled, enable it
_start_sampling = true;
}
// reset _eden_chunk_array so sampling starts afresh
_eden_chunk_index = 0;
// update performance counters - this uses a special version of
// update_counters() that allows the utilization to be passed as a
// parameter, avoiding multiple calls to used().
//
if (CMSIncrementalMode) {
}
bitMapLock()->unlock();
if (!CleanChunkPoolAsync) {
}
_between_prologue_and_epilogue = false; // ready for next cycle
}
// Also reset promotion tracking in par gc thread states.
if (CollectedHeap::use_parallel_gc_threads()) {
for (uint i = 0; i < ParallelGCThreads; i++) {
}
}
}
cmsSpace()->gc_epilogue();
// Print stat counters
SIZE_FORMAT" bytes",
}
_numObjectsPromoted = 0;
_numWordsPromoted = 0;
)
// Call down the chain in contiguous_available needs the freelistLock
// so print this out before releasing the freeListLock.
}
}
#ifndef PRODUCT
bool CMSCollector::have_cms_token() {
if (thr->is_VM_thread()) {
return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
} else if (thr->is_ConcurrentGC_thread()) {
} else if (thr->is_GC_task_thread()) {
return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
}
return false;
}
#endif
// Check reachability of the given heap address in CMS generation,
// treating all other generations as roots.
// We could "guarantee" below, rather than assert, but i'll
// leave these as "asserts" so that an adventurous debugger
// could try this in the product build provided some subset of
// the conditions were met, provided they were intersted in the
// results and knew that the computation below wouldn't interfere
// with other concurrent computations mutating the structures
// being read or written.
"Else mutations in object graph will make answer suspect");
// Clear the marking bit map array before starting, but, just
// for kicks, first report if the given address is already marked
if (verify_after_remark()) {
return result;
} else {
return false;
}
}
////////////////////////////////////////////////////////
// CMS Verification Support
////////////////////////////////////////////////////////
// Following the remark phase, the following invariant
// should hold -- each object in the CMS heap which is
// marked in markBitMap() should be marked in the verification_mark_bm().
class VerifyMarkedClosure: public BitMapClosure {
bool _failed;
public:
_failed = true;
}
return true;
}
};
bool CMSCollector::verify_after_remark() {
static bool init = false;
"Else mutations in object graph will make answer suspect");
"Else there may be mutual interference in use of "
" verification data structures");
"Else marking info checked here may be obsolete");
// Allocate marking bit map if not already allocated
if (!init) { // first time
return false;
}
init = true;
}
// Turn off refs discovery -- so we will be tracing through refs.
// This is as intended, because by this time
// GC must already have cleared any refs that need to be cleared,
// and traced those that need to be marked; moreover,
// the marking done here is not going to intefere in any
// way with the marking information used by GC.
// Clear any marks from a previous round
// Update the saved marks which may affect the root scans.
gch->save_marks();
if (CMSRemarkVerifyVariant == 1) {
// In this first variant of verification, we complete
// all marking, then check if the new marks-verctor is
// a subset of the CMS marks-vector.
} else if (CMSRemarkVerifyVariant == 2) {
// In this second variant of verification, we flag an error
// (i.e. an object reachable in the new marks-vector not reachable
// in the CMS marks-vector) immediately, also indicating the
// identify of an object (A) that references the unmarked object (B) --
} else {
warning("Unrecognized value %d for CMSRemarkVerifyVariant",
}
return true;
}
void CMSCollector::verify_after_remark_work_1() {
// Mark from roots one level into CMS
true, // younger gens are roots
true, // activate StrongRootsScope
true, // collecting perm gen
¬Older,
true, // walk code active on stacks
NULL);
// Now mark from the roots
false /* don't yield */, true /* verifying */);
while (_restart_addr != NULL) {
// Deal with stack overflow: by restarting at the indicated
// address.
}
// Should reset the revisit stack above, since no class tree
// surgery is forthcoming.
// Marking completed -- now verify that each bit marked in
// verification_mark_bm() is also marked in markBitMap(); flag all
// errors by printing corresponding objects.
fatal("CMS: failed marking verification after remark");
}
}
void CMSCollector::verify_after_remark_work_2() {
// Mark from roots one level into CMS
markBitMap());
true, // younger gens are roots
true, // activate StrongRootsScope
true, // collecting perm gen
¬Older,
true, // walk code active on stacks
NULL);
// Now mark from the roots
while (_restart_addr != NULL) {
// Deal with stack overflow: by restarting at the indicated
// address.
}
// Should reset the revisit stack above, since no class tree
// surgery is forthcoming.
// Marking completed -- now verify that each bit marked in
// verification_mark_bm() is also marked in markBitMap(); flag all
// errors by printing corresponding objects.
}
void ConcurrentMarkSweepGeneration::save_marks() {
// delegate to CMS space
cmsSpace()->save_marks();
for (uint i = 0; i < ParallelGCThreads; i++) {
}
}
return cmsSpace()->no_allocs_since_save_marks();
}
\
void ConcurrentMarkSweepGeneration:: \
cl->set_generation(this); \
cl->reset_generation(); \
save_marks(); \
}
void
{
// Not currently implemented; need to do the following. -- ysr.
// dld -- I think that is used for some sort of allocation profiler. So it
// really means the objects allocated by the mutator since the last
// GC. We could potentially implement this cheaply by recording only
// the direct allocations in a side data structure.
//
// I think we probably ought not to be required to support these
// iterations at any arbitrary point; I think there ought to be some
// call to enable/disable allocation profiling in a generation/space,
// and the iterator ought to return the objects allocated in the
// will probably be at a GC.) That way, for gens like CM&S that would
// require some extra data structure to support this, we only pay the
// cost when it's in use...
}
void
cl->set_generation(this);
cl->reset_generation();
}
void
if (freelistLock()->owned_by_self()) {
} else {
}
}
void
if (freelistLock()->owned_by_self()) {
} else {
}
}
void
if (freelistLock()->owned_by_self()) {
} else {
}
}
void
if (freelistLock()->owned_by_self()) {
} else {
}
}
void
}
void
}
void
// Fix the linear allocation blocks to look like free blocks.
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
// are not called when the heap is verified during universe initialization and
// at vm shutdown.
if (freelistLock()->owned_by_self()) {
cmsSpace()->prepare_for_verify();
} else {
cmsSpace()->prepare_for_verify();
}
}
void
// Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
// are not called when the heap is verified during universe initialization and
// at vm shutdown.
if (freelistLock()->owned_by_self()) {
} else {
}
}
void CMSCollector::verify() {
}
#ifndef PRODUCT
bool CMSCollector::overflow_list_is_empty() const {
if (_overflow_list == NULL) {
}
return _overflow_list == NULL;
}
// The methods verify_work_stacks_empty() and verify_overflow_empty()
// merely consolidate assertion checks that appear to occur together frequently.
void CMSCollector::verify_work_stacks_empty() const {
}
void CMSCollector::verify_overflow_empty() const {
}
#endif // PRODUCT
// Decide if we want to enable class unloading as part of the
// ensuing concurrent GC cycle. We will collect the perm gen and
// unload classes if it's the case that:
// (1) an explicit gc request has been made and the flag
// ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
// (2) (a) class unloading is enabled at the command line, and
// (b) (i) perm gen threshold has been crossed, or
// (ii) old gen is getting really full, or
// (iii) the previous N CMS collections did not collect the
// perm gen
// NOTE: Provided there is no change in the state of the heap between
// calls to this method, it should have idempotent results. Moreover,
// its results should be monotonically increasing (i.e. going from 0 to 1,
// but not 1 to 0) between successive calls between which the heap was
// not collected. For the implementation below, it must thus rely on
// the property that concurrent_cycles_since_last_unload()
// will not decrease unless a collection cycle happened and that
// _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
// themselves also monotonic in that sense. See check_monotonicity()
// below.
bool CMSCollector::update_should_unload_classes() {
_should_unload_classes = false;
// Condition 1 above
_should_unload_classes = true;
} else if (CMSClassUnloadingEnabled) { // Condition 2.a above
// Disjuncts 2.b.(i,ii,iii) above
|| _cmsGen->is_too_full();
}
return _should_unload_classes;
}
bool ConcurrentMarkSweepGeneration::is_too_full() const {
bool res = should_concurrent_collect();
return res;
}
|| VerifyBeforeExit;
if (should_unload_classes()) { // Should unload classes this cycle
return; // Nothing else needs to be done at this time
}
// Not unloading classes this cycle
// We were not verifying, or we _were_ unloading classes in the last cycle,
// AND some verification options are enabled this cycle; in this case,
// we must make sure that the deadness map is allocated if not already so,
// and cleared (if already allocated previously --
// CMSBitMap::sizeInBits() is used to determine if it's allocated).
if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
"permanent generation verification disabled");
return; // Note that we leave verification disabled, so we'll retry this
// allocation next cycle. We _could_ remember this failure
// and skip further attempts and permanently disable verification
// attempts if that is considered more desirable.
}
"_perm_gen_ver_bit_map inconsistency?");
} else {
}
// Include symbols, strings and code cache elements to prevent their resurrection.
set_verifying(true);
} else if (verifying() && !should_verify) {
// We were verifying, but some verification flags got disabled.
set_verifying(false);
// Exclude symbols, strings and code cache elements from root scanning to
// reduce IM and RM pauses.
}
}
#ifndef PRODUCT
} else {
"Inconsistent _span?");
}
}
return NULL;
}
#endif
bool tlab,
bool parallel) {
if (GCExpandToAllocateDelayMillis > 0) {
}
}
// OneContigSpaceCardGeneration, which makes me wonder if we should move this
// to CardGeneration and share it...
}
{
// remember why we expanded; this information is used
// by shouldConcurrentCollect() when making decisions on whether to start
// a new CMS cycle.
if (success) {
if (PrintGCDetails && Verbose) {
}
}
}
HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
while (true) {
// Expansion by some other thread might make alloc OK now:
// If there's not enough expansion space available, give up.
return NULL;
}
// Otherwise, we try expansion.
// Now go around the loop and try alloc again;
// A competing par_promote might beat us to the expansion space,
// so we may go around the loop again if promotion fails agaion.
if (GCExpandToAllocateDelayMillis > 0) {
}
}
}
PromotionInfo* promo) {
while (true) {
// Expansion by some other thread might make alloc OK now:
if (promo->ensure_spooling_space()) {
"Post-condition of successful ensure_spooling_space()");
return true;
}
// If there's not enough expansion space available, give up.
return false;
}
// Otherwise, we try expansion.
// Now go around the loop and try alloc again;
// A competing allocation might beat us to the expansion space,
// so we may go around the loop again if allocation fails again.
if (GCExpandToAllocateDelayMillis > 0) {
}
}
}
if (size > 0) {
}
}
if (result) {
// Hmmmm... why doesn't CFLS::set_end verify locking?
// This is quite ugly; FIX ME XXX
// update the space and generation capacity counters
if (UsePerfData) {
}
}
}
return result;
}
bool success = true;
if (remaining_bytes > 0) {
}
return success;
}
// XXX Fix when compaction is implemented.
warning("Shrinking of CMS not yet implemented");
return;
}
// phases.
class CMSPhaseAccounting: public StackObj {
public:
const char *phase,
bool print_cr = true);
private:
const char *_phase;
bool _print_cr;
public:
// Not MT-safe; so do not pass around these StackObj's
// where they may be accessed by other threads.
return ret;
}
};
const char *phase,
bool print_cr) :
if (PrintCMSStatistics != 0) {
}
if (PrintGCDetails && PrintGCTimeStamps) {
gclog_or_tty->stamp();
}
_collector->resetTimer();
_wallclock.start();
_collector->startTimer();
}
_collector->stopTimer();
_wallclock.stop();
if (PrintGCDetails) {
if (_print_cr) {
}
if (PrintCMSStatistics != 0) {
_collector->yields());
}
}
}
// CMS work
// Checkpoint the roots into this generation from outside
// this generation. [Note this initial checkpoint need only
// be approximate -- we'll do a catch up phase subsequently.]
if (asynch) {
// acquire locks for subsequent manipulations
MutexLockerEx x(bitMapLock(),
// enable ("weak") refs discovery
} else {
// (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
// which recognizes if we are a CMS generation, and doesn't try to turn on
// discovery; verify that they aren't meddling.
"incorrect setting of discovery predicate");
"ref discovery for this generation kind");
// already have locks
// now enable ("weak") refs discovery
}
}
// If there has not been a GC[n-1] since last GC[n] cycle completed,
// precede our marking with a collection of all
// younger generations to keep floating garbage to a minimum.
// XXX: we won't do this for now -- it's an optimization to be done later.
// already have locks
// Setup the verification and class unloading state for this
// CMS collection cycle.
if (UseAdaptiveSizePolicy) {
}
// Reset all the PLAB chunk arrays if necessary.
}
// In the case of a synchronous collection, we will elide the
// remark step, so it's important to catch all the nmethod oops
// in this step.
// The final 'true' flag to gen_process_strong_roots will ensure this.
// If 'async' is true, we can relax the nmethod tracing.
// Update the saved marks which may affect the root scans.
gch->save_marks();
// weak reference processing has not started yet.
ref_processor()->set_enqueuing_is_done(false);
{
// This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
true, // younger gens are roots
true, // activate StrongRootsScope
true, // collecting perm gen
¬Older,
true, // walk all of code cache if (so & SO_CodeCache)
NULL);
}
// Clear mod-union table; it will be dirtied in the prologue of
// CMS generation per each younger generation collection.
"Was cleared in most recent final checkpoint phase"
" or no bits are set in the gc_prologue before the start of the next "
"subsequent marking phase.");
// Save the end of the used_region of the constituent generations
// to be used to limit the extent of sweep in each generation.
if (UseAdaptiveSizePolicy) {
}
}
// we might be tempted to assert that:
// assert(asynch == !SafepointSynchronize::is_at_safepoint(),
// "inconsistent argument?");
// However that wouldn't be right, because it's possible that
// a safepoint is indeed in progress as a younger generation
// stop-the-world GC happens even as we mark in this generation.
bool res;
if (asynch) {
// Start the timers for adaptive size policy for the concurrent phases
// Do it here so that the foreground MS can use the concurrent
// timer since a foreground MS might has the sweep done concurrently
// or STW.
if (UseAdaptiveSizePolicy) {
}
// Weak ref discovery note: We may be discovering weak
// refs in this generation concurrent (but interleaved) with
// weak ref discovery by a younger generation collector.
if (res) {
} else { // We failed and a foreground collection wants to take over
if (PrintGCDetails) {
}
}
if (UseAdaptiveSizePolicy) {
}
} else {
"inconsistent with asynch == false");
if (UseAdaptiveSizePolicy) {
}
// already have locks
if (UseAdaptiveSizePolicy) {
}
}
return res;
}
// iterate over marked bits in bit map, doing a full scan and mark
// from these roots using the following algorithm:
// . if oop is to the right of the current scan pointer,
// mark corresponding bit (we'll process it later)
// . else (oop is to left of current scan pointer)
// push oop on marking stack
// . drain the marking stack
// Note that when we do a marking step we need to hold the
// bit map lock -- recall that direct allocation (by mutators)
// and promotion (by younger generation collectors) is also
// marking the bit map. [the so-called allocate live policy.]
// Because the implementation of bit map marking is not
// robust wrt simultaneous marking of bits in the same word,
// we need to make sure that there is no such interference
// between concurrent such updates.
// already have locks
// Clear the revisit stack, just in case there are any
// obsolete contents from a short-circuited previous CMS cycle.
bool result = false;
if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
} else {
}
return result;
}
// Forward decl
class CMSConcMarkingTask;
class CMSConcMarkingTerminator: public ParallelTaskTerminator {
public:
virtual void yield();
// "n_threads" is the number of threads to be terminated.
// "queue_set" is a set of work queues of other threads.
// "collector" is the CMS collector associated with this task terminator.
// "yield" indicates whether we need the gang as a whole to yield.
_collector(collector) { }
}
};
class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
public:
bool should_exit_termination();
}
};
// MT Concurrent Marking Task
class CMSConcMarkingTask: public YieldingFlexibleGangTask {
int _n_workers; // requested/desired # workers
bool _asynch;
bool _result;
char _pad_back[64];
// Exposed here for yielding support
Mutex* const _bit_map_lock;
// The per thread work queues, available here for stealing
// Termination (and yielding) support
public:
bool asynch,
YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
{
_term_term.set_task(this);
"Finger incorrectly initialized below");
}
virtual void set_for_termination(int active_workers) {
}
bool should_yield() {
return ConcurrentMarkSweepThread::should_yield()
&& !_collector->foregroundGCIsActive()
&& _asynch;
}
virtual void coordinator_yield(); // stuff done by coordinator
}
private:
void do_work_steal(int i);
void bump_global_finger(HeapWord* f);
};
// Note that we do not need the disjunct || _task->should_yield() above
// because we want terminating threads to yield only if the task
// is already in the midst of yielding, which happens only after at least one
// thread has yielded.
}
void CMSConcMarkingTerminator::yield() {
if (_task->should_yield()) {
} else {
}
}
////////////////////////////////////////////////////////////////
// Concurrent Marking Algorithm Sketch
////////////////////////////////////////////////////////////////
// Until all tasks exhausted (both spaces):
// -- claim next available chunk
// -- bump global finger via CAS
// -- find first object that starts in this chunk
// and start scanning bitmap from that position
// -- scan marked objects for oops
// -- CAS-mark target, and if successful:
// . if target oop is above global finger (volatile read)
// nothing to do
// . if target oop is in chunk and above local finger
// then nothing to do
// . else push on work-queue
// -- Deal with possible overflow issues:
// . local work-queue overflow causes stuff to be pushed on
// global (common) overflow queue
// . always first empty local work queue
// . then get a batch of oops from global work queue if any
// . then do work stealing
// -- When all tasks claimed (both spaces)
// and local work queue empty,
// then in a loop do:
// . check global overflow stack; steal a batch of oops and trace
// . try to steal from other threads oif GOS is empty
// . if neither is available, offer termination
// -- Terminate and return result
//
// Before we begin work, our work queue should be empty
// Scan the bitmap covering _cms_space, tracing through grey objects.
if (PrintCMSStatistics != 0) {
}
// ... do the same for the _perm_space
if (PrintCMSStatistics != 0) {
}
// ... do work stealing
if (PrintCMSStatistics != 0) {
}
// Note that under the current task protocol, the
// following assertion is true even of the spaces
// expanded since the completion of the concurrent
// marking. XXX This will likely change under a strict
// ABORT semantics.
"All tasks have been completed");
}
while (f > read) {
// our cas succeeded
break;
}
}
}
// This is really inefficient, and should be redone by
// using (not yet available) block-read and -write interfaces to the
// stack and the work_queue. XXX FIX ME !!!
OopTaskQueue* work_q) {
// Fast lock-free check
return false;
}
// Grab up to 1/4 the size of the work queue
for (int i = (int) num; i > 0; i--) {
}
return num > 0;
}
// We allow that there may be no tasks to do here because
// we are restarting after a stack overflow.
// Align down to a card boundary for the start of 0th task
// for this space.
}
// Having claimed the nth task in this space,
// compute the chunk that it corresponds to:
// Try and bump the global finger via a CAS;
// note that we need to do the global finger bump
// _before_ taking the intersection below, because
// the task corresponding to that region will be
// deemed done even if the used_region() expands
// because of allocation -- as it almost certainly will
// during start-up while the threads yield in the
// closure below.
// There are null tasks here corresponding to chunks
// beyond the "top" address of the space.
"Inconsistency");
if (nth_task == 0) {
// For the 0th task, we'll not need to compute a block_start.
// In the case of a restart because of stack overflow,
// we might additionally skip a chunk prefix.
} else {
}
} else {
// We want to skip the first object because
// the protocol is to scan any object in its entirety
// that _starts_ in this span; a fortiori, any
// object starting in an earlier span is scanned
// as part of an earlier claimed task.
// Below we use the "careful" version of block_start
// so we do not try to navigate uninitialized objects.
// Below we use a variant of block_size that uses the
// Printezis bits to avoid waiting for allocated
// objects to become initialized/parsable.
if (sz > 0) {
} else {
// In this case we may end up doing a bit of redundant
// scanning, but that appears unavoidable, short of
// locking the free list locks; see bug 6324141.
break;
}
}
}
// Do the marking work within a non-empty span --
// the last argument to the constructor indicates whether the
// iteration should be incremental with periodic yields.
work_queue(i),
_asynch);
} // else nothing to do for this task
} // else nothing to do for this task
}
// We'd be tempted to assert here that since there are no
// more tasks left to claim in this space, the global_finger
// must exceed space->top() and a fortiori space->end(). However,
// that would not quite be correct because the bumping of
// global_finger occurs strictly after the claiming of a task,
// so by the time we reach here the global finger may not yet
// have been bumped up by the thread that claimed the last
// task.
}
class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
private:
protected:
public:
{ }
void do_yield_check() {
if (_task->should_yield()) {
}
}
};
// Grey object scanning during work stealing phase --
// the salient assumption here is that any references
// that are in these stolen objects being scanned must
// already have been initialized (else they would not have
// been published), so we do not need to check for
// uninitialized objects before pushing here.
// Check if oop points into the CMS generation
// and is not marked
// a white object ...
// If we manage to "claim" the object, by being the
// first thread to mark it, then we push it on our
// marking stack
// push on work queue (grey set)
bool simulate_overflow = false;
if (CMSMarkStackOverflowALot &&
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
if (simulate_overflow ||
// stack overflow
if (PrintCMSStatistics != 0) {
}
// We cannot assert that the overflow stack is full because
// it may have been emptied since.
"Else push should have succeeded");
}
} // Else, some other thread got there first
}
}
}
}
}
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
// in CMSCollector's _restart_address.
// We need to do this under a mutex to prevent other
// workers from interfering with the work done below.
// Remember the least grey address discarded
}
void CMSConcMarkingTask::do_work_steal(int i) {
while (true) {
cl.trim_queue(0);
// Can't assert below because the work obtained from the
// overflow stack may already have been stolen from us.
// assert(work_q->size() > 0, "Work from overflow stack");
continue;
break;
} else if (yielding() || should_yield()) {
yield();
}
}
}
// This is run by the CMS (coordinator) thread.
void CMSConcMarkingTask::coordinator_yield() {
"CMS thread should hold CMS token");
// First give up the locks, then yield, then re-lock
// We should probably use a constructor/destructor idiom to
// serve our purpose. XXX
_bit_map_lock->unlock();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
}
_collector->icms_wait();
// It is possible for whichever thread initiated the yield request
// not to get a chance to wake up and take the bitmap lock between
// this thread releasing it and reacquiring it. So, while the
// should_yield() flag is on, let's sleep for a bit to give the
// other thread a chance to wake up. The limit imposed on the number
// of iterations is defensive, to avoid any unforseen circumstances
// putting us into an infinite loop. Since it's always been this
// (coordinator_yield()) method that was observed to cause the
// problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
// which is by default non-zero. For the other seven methods that
// also perform the yield operation, as are using a different
// parameter (CMSYieldSleepCount) which is by default zero. This way we
// can enable the sleeping for those methods too, if necessary.
// See 6442774.
//
// We really need to reconsider the synchronization between the GC
// thread and the yield-requesting threads in the future and we
// way of doing this type of interaction. Additionally, we should
// consolidate the eight methods that do the yield operation and they
// are almost identical into one for better maintenability and
// readability. See 6445193.
//
// Tony 2006.06.29
for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
!CMSCollector::foregroundGCIsActive(); ++i) {
}
ConcurrentMarkSweepThread::synchronize(true);
_collector->startTimer();
}
conc_workers()->total_workers(),
conc_workers()->active_workers(),
CMSConcMarkingTask tsk(this,
conc_workers(),
task_queues());
// Since the actual number of workers we get may be different
// from the number we requested above, do we need to do anything different
// below? In particular, may be we need to subclass the SequantialSubTasksDone
// class?? XXX
// Refs discovery is already non-atomic.
}
// If the task was aborted, _restart_addr will be non-NULL
while (_restart_addr != NULL) {
// XXX For now we do not make use of ABORTED state and have not
// yet implemented the right abort semantics (even in the original
// single-threaded CMS case). That needs some more investigation
// and is deferred for now; see CR# TBF. 07252005YSR. XXX
// If _restart_addr is non-NULL, a marking stack overflow
// occurred; we need to do a fresh marking iteration from the
// indicated restart address.
if (_foregroundGCIsActive && asynch) {
// We may be running into repeated stack overflows, having
// reached the limit of the stack size, while making very
// slow forward progress. It may be best to bail out and
// let the foreground collector do its job.
// Clear _restart_addr, so that foreground GC
// works from scratch. This avoids the headache of
// a "rescan" which would otherwise be needed because
// of the dirty mod union table & card table.
return false;
}
// Adjust the task to restart from _restart_addr
// Get the workers going again
}
}
return true;
}
// Temporarily make refs discovery single threaded (non-MT)
// the last argument to iterate indicates whether the iteration
// should be incremental with periodic yields.
// If _restart_addr is non-NULL, a marking stack overflow
// occurred; we need to do a fresh iteration from the
// indicated restart address.
while (_restart_addr != NULL) {
if (_foregroundGCIsActive && asynch) {
// We may be running into repeated stack overflows, having
// reached the limit of the stack size, while making very
// slow forward progress. It may be best to bail out and
// let the foreground collector do its job.
// Clear _restart_addr, so that foreground GC
// works from scratch. This avoids the headache of
// a "rescan" which would otherwise be needed because
// of the dirty mod union table & card table.
return false; // indicating failure to complete marking
}
// Deal with stack overflow:
// we restart marking from _restart_addr
}
return true;
}
void CMSCollector::preclean() {
_abort_preclean = false;
if (CMSPrecleaningEnabled) {
_eden_chunk_index = 0;
// Don't start sampling unless we will get sufficiently
// many samples.
_start_sampling = true;
} else {
_start_sampling = false;
}
}
CMSTokenSync x(true); // is cms thread
if (CMSPrecleaningEnabled) {
sample_eden();
} else {
}
}
// Try and schedule the remark such that young gen
// occupancy is CMSScheduleRemarkEdenPenetration %.
void CMSCollector::abortable_preclean() {
// If Eden's current occupancy is below this threshold,
// immediately schedule the remark; else preclean
// past the next scavenge in an effort to
// schedule the pause as described avove. By choosing
// CMSScheduleRemarkEdenSizeThreshold >= max eden size
// we will never do an actual abortable preclean cycle.
if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
// We need more smarts in the abortable preclean
// loop below to deal with cases where allocation
// in young gen is very very slow, and our precleaning
// is running a losing race against a horde of
// mutators intent on flooding us with CMS updates
// (dirty cards).
// One, admittedly dumb, strategy is to give up
// after a certain number of abortable precleaning loops
// or after a certain maximum time. We want to make
// this smarter in the next iteration.
// XXX FIX ME!!! YSR
while (!(should_abort_preclean() ||
cumworkdone += workdone;
loops++;
// Voluntarily terminate abortable preclean phase if we have
// been at it for too long.
if ((CMSMaxAbortablePrecleanLoops != 0) &&
if (PrintGCDetails) {
}
break;
}
if (PrintGCDetails) {
}
break;
}
// If we are doing little work each iteration, we should
// take a short break.
// Sleep for some time, waiting for work to accumulate
stopTimer();
startTimer();
waited++;
}
}
if (PrintCMSStatistics > 0) {
}
}
CMSTokenSync x(true); // is cms thread
if (_collectorState != Idling) {
"Spontaneous state transition?");
} // Else, a foreground collection completed this CMS cycle.
return;
}
// Respond to an Eden sampling opportunity
void CMSCollector::sample_eden() {
// Make sure a young gc cannot sneak in between our
// reading and recording of a sample.
"Only the cms thread may collect Eden samples");
"Should collect samples while holding CMS token");
if (!_start_sampling) {
return;
}
if (_eden_chunk_array) {
if (_eden_chunk_index < _eden_chunk_capacity) {
"Unexpected state of Eden");
// We'd like to check that what we just sampled is an oop-start address;
// however, we cannot do that here since the object may not yet have been
// initialized. So we'll instead do the check when we _use_ this sample
// later.
if (_eden_chunk_index == 0 ||
>= CMSSamplingGrain)) {
_eden_chunk_index++; // commit sample
}
}
}
_abort_preclean = true;
}
}
}
// Precleaning is currently not MT but the reference processor
// may be set for MT. Disable it temporarily here.
// Do one pass of scrubbing the discovered reference lists
// to remove any reference objects with strongly-reachable
// referents.
if (clean_refs) {
true /* preclean */);
&keep_alive, true /* preclean */);
// We don't want this step to interfere with a young
// collection because we don't want to take CPU
// or memory bandwidth away from the young GC threads
// (which may be as many as there are CPUs).
// Note that we don't need to protect ourselves from
// interference with mutators because they can't
// manipulate the discovered reference lists nor affect
// the computed reachability of the referents, the
// only properties manipulated by the precleaning
// of these reference lists.
stopTimer();
CMSTokenSyncWithLocks x(true /* is cms thread */,
bitMapLock());
startTimer();
sample_eden();
// The following will yield to allow foreground
// collection to proceed promptly. XXX YSR:
// The code in this method may need further
// tweaking for better performance and some restructuring
// for cleaner interfaces.
}
if (clean_survivor) { // preclean the active survivor space(s)
"incorrect type for cast");
true /* precleaning phase */);
stopTimer();
bitMapLock());
startTimer();
unsigned int before_count =
}
true /* precleaning phase */);
// CAUTION: The following closure has persistent state that may need to
// be reset upon a decrease in the sequence of addresses it
// processes.
// Preclean dirty cards in ModUnionTable and CardTable using
// appropriate convergence criterion;
// repeat CMSPrecleanIter times unless we find that
// we are losing.
"Bad convergence multiplier");
"Unreasonably low CMSPrecleanThreshold");
if (CMSPermGenPrecleaningEnabled) {
}
if (Verbose && PrintGCDetails) {
}
// Either there are very few dirty cards, so re-mark
// pause will be small anyway, or our pre-cleaning isn't
// that much faster than the rate at which cards are being
// dirtied, so we might as well stop and re-mark since
// precleaning won't improve our re-mark time by much.
if (curNumCards <= CMSPrecleanThreshold ||
(numIter > 0 &&
lastNumCards * CMSPrecleanNumerator))) {
numIter++;
break;
}
}
if (CMSPermGenPrecleaningEnabled) {
}
if (PrintGCDetails && PrintCMSStatistics != 0) {
}
return cumNumCards; // as a measure of useful work done
}
// PRECLEANING NOTES:
// Precleaning involves:
// . reading the bits of the modUnionTable and clearing the set bits.
// . For the cards corresponding to the set bits, we scan the
// objects on those cards. This means we need the free_list_lock
// so that we can safely iterate over the CMS space when scanning
// for oops.
// . When we scan the objects, we'll be both reading and setting
// marks in the marking bit map, so we'll need the marking bit map.
// . For protecting _collector_state transitions, we take the CGC_lock.
// Note that any races in the reading of of card table entries by the
// CMS thread on the one hand and the clearing of those entries by the
// VM thread or the setting of those entries by the mutator threads on the
// other are quite benign. However, for efficiency it makes sense to keep
// the VM thread from racing with the CMS thread while the latter is
// dirty card info to the modUnionTable. We therefore also use the
// CGC_lock to protect the reading of the card table and the mod union
// table by the CM thread.
// . We run concurrently with mutator updates, so scanning
// needs to be done carefully -- we should not try to scan
// potentially uninitialized objects.
//
// Locking strategy: While holding the CGC_lock, we scan over and
// reset a maximal dirty range of the mod union / card tables, then lock
// the free_list_lock and bitmap lock to do a full marking, then
// release these locks; and repeat the cycle. This allows for a
// certain amount of fairness in the sharing of these locks between
// the CMS collector on the one hand, and the VM thread and the
// mutators on the other.
// NOTE: preclean_mod_union_table() and preclean_card_table()
// further below are largely identical; if you need to modify
// one of these methods, please check the other method too.
// Turn off checking for this method but turn it back on
// selectively. There are yield points in this method
// but it is difficult to turn the checking off just around
// the yield points. It is simpler to selectively turn
// it on.
// strategy: starting with the first card, accumulate contiguous
// ranges of dirty cards; clear these cards, then scan the region
// covered by these cards.
// Since all of the MUT is committed ahead, we can just use
// that, in case the generations expand while we are precleaning.
// It might also be fine to just use the committed part of the
// generation, but we might potentially miss cards when the
// generation is rapidly expanding while we are in the midst
// of precleaning.
for (cumNumDirtyCards = numDirtyCards = 0,
{
stopTimer();
// Potential yield point
CMSTokenSync ts(true);
startTimer();
sample_eden();
// Get dirty region starting at nextOffset (inclusive),
// simultaneously clearing it.
"returned region inconsistent?");
}
// Remember where the next search should begin.
// The returned region (if non-empty) is a right open interval,
// so lastOffset is obtained from the right end of that
// interval.
// Should do something more transparent and less hacky XXX
// We'll scan the cards in the dirty region (with periodic
// yields for foreground GC as needed).
if (!dirtyRegion.is_empty()) {
stopTimer();
// Potential yield point
bitMapLock());
startTimer();
{
sample_eden();
}
if (stop_point != NULL) {
// The careful iteration stopped early either because it found an
// uninitialized object, or because we were in the midst of an
// "abortable preclean", which should now be aborted. Redirty
// the bits corresponding to the partially-scanned or unscanned
// cards. We'll either restart at the next block boundary or
// abort the preclean.
"Unparsable objects should only be in perm gen.");
if (should_abort_preclean()) {
break; // out of preclean loop
} else {
// Compute the next address at which preclean should pick up;
// might need bitMapLock in order to read P-bits.
}
}
} else {
break;
}
}
return cumNumDirtyCards;
}
// NOTE: preclean_mod_union_table() above and preclean_card_table()
// below are largely identical; if you need to modify
// one of these methods, please check the other method too.
// strategy: it's similar to precleamModUnionTable above, in that
// we accumulate contiguous ranges of dirty cards, mark these cards
// precleaned, then scan the region covered by these cards.
for (cumNumDirtyCards = numDirtyCards = 0,
{
// See comments in "Precleaning notes" above on why we
// do this locking. XXX Could the locking overheads be
// too high when dirty cards are sparse? [I don't think so.]
stopTimer();
CMSTokenSync x(true); // is cms thread
startTimer();
sample_eden();
// Get and clear dirty region from card table
true,
"returned region inconsistent?");
}
if (!dirtyRegion.is_empty()) {
stopTimer();
startTimer();
sample_eden();
if (stop_point != NULL) {
// The careful iteration stopped early because it found an
// uninitialized object. Redirty the bits corresponding to the
// partially-scanned or unscanned cards, and start again at the
// next block boundary.
"Unparsable objects should only be in perm gen.");
if (should_abort_preclean()) {
break; // out of preclean loop
} else {
// Compute the next address at which preclean should pick up.
}
}
} else {
break;
}
}
return cumNumDirtyCards;
}
bool clear_all_soft_refs, bool init_mark_was_synchronous) {
// world is stopped at this checkpoint
"world should be stopped");
if (PrintGCDetails) {
_young_gen->used() / K,
_young_gen->capacity() / K);
}
if (asynch) {
if (CMSScavengeBeforeRemark) {
// Temporarily set flag to false, GCH->do_collection will
// expect it to be false and set to true
if (level >= 0) {
false, // !clear_all_soft_refs
0, // size
false, // is_tlab
level // max_level
);
}
}
FreelistLocker x(this);
MutexLockerEx y(bitMapLock(),
} else {
// already have all the locks
}
}
bool clear_all_soft_refs, bool init_mark_was_synchronous) {
if (UseAdaptiveSizePolicy) {
}
if (should_unload_classes()) {
CodeCache::gc_prologue();
}
if (!init_mark_was_synchronous) {
// We might assume that we need not fill TLAB's when
// CMSScavengeBeforeRemark is set, because we may have just done
// a scavenge which would have filled all TLAB's -- and besides
// Eden would be empty. This however may not always be the case --
// for instance although we asked for a scavenge, it may not have
// happened because of a JNI critical section. We probably need
// a policy for deciding whether we can in that case wait until
// the critical section releases and then do the remark following
// the scavenge, and skip it here. In the absence of that policy,
// or of an indication of whether the scavenge did indeed occur,
// we cannot rely on TLAB's having been filled and must do
// so here just in case a scavenge did not happen.
// Update the saved marks which may affect the root scans.
gch->save_marks();
{
// Note on the role of the mod union table:
// Since the marker in "markFromRoots" marks concurrently with
// mutators, it is possible for some reachable objects not to have been
// scanned. For instance, an only reference to an object A was
// placed in object B after the marker scanned B. Unless B is rescanned,
// A would be collected. Such updates to references in marked objects
// are detected via the mod union table which is the set of all cards
// dirtied since the first checkpoint in this GC cycle and prior to
// the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning.
} else {
}
}
} else {
// The initial mark was stop-world, so there's no rescanning to
// do; go straight on to the next step below.
}
{
}
if (should_unload_classes()) {
CodeCache::gc_epilogue();
}
// If we encountered any (marking stack / work queue) overflow
// events during the current CMS cycle, take appropriate
// remedial measures, where possible, so as to try and avoid
// recurrence of that condition.
if (ser_ovflw > 0) {
if (PrintCMSStatistics != 0) {
}
_markStack.expand();
_ser_kac_ovflw = 0;
}
if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
if (PrintCMSStatistics != 0) {
}
_par_kac_ovflw = 0;
}
if (PrintCMSStatistics != 0) {
if (_markStack._hit_limit > 0) {
}
if (_markStack._failed_double > 0) {
" current capacity "SIZE_FORMAT,
_markStack.capacity());
}
}
_markStack._hit_limit = 0;
_markStack._failed_double = 0;
// Check that all the klasses have been checked
if ((VerifyAfterGC || VerifyDuringGC) &&
}
// Change under the freelistLocks.
// Call isAllClear() under bitMapLock
" final marking");
if (UseAdaptiveSizePolicy) {
}
}
// Parallel remark task
class CMSParRemarkTask: public AbstractGangTask {
int _n_workers;
// The per-thread work queues, available here for stealing.
public:
// A value of 0 passed to n_workers will cause the number of
// workers to be taken from the active workers in the work gang.
AbstractGangTask("Rescan roots and grey objects in parallel"),
int n_workers() { return _n_workers; }
private:
// Work method in support of parallel rescan ... of young gen spaces
// ... of dirty cards in old space
// ... work stealing for the above
};
// work_queue(i) is passed to the closure
// Par_MarkRefsIntoAndScanClosure. The "i" parameter
// also is passed to do_dirty_card_rescan_tasks() and to
// do_work_steal() to select the i-th task_queue.
// ---------- rescan from roots --------------
&(_collector->_markBitMap),
// Rescan young gen roots first since these are likely
// coarsely partitioned and may, on that account, constitute
// the critical path; thus, it's best to start off that
// work first.
// ---------- young gen roots --------------
{
if (PrintCMSStatistics != 0) {
"Finished young gen rescan work in %dth thread: %3.3f sec",
}
}
// ---------- remaining roots --------------
false, // yg was scanned above
false, // this is parallel code
true, // collecting perm gen
true, // walk all of code cache if (so & SO_CodeCache)
NULL);
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
if (PrintCMSStatistics != 0) {
"Finished remaining root rescan work in %dth thread: %3.3f sec",
}
// ---------- rescan dirty cards ------------
// Do the rescan tasks for each of the two spaces
// (cms_space and perm_space) in turn.
// "worker_id" is passed to select the task_queue for "worker_id"
if (PrintCMSStatistics != 0) {
"Finished dirty card rescan work in %dth thread: %3.3f sec",
}
// ---------- steal work from other threads ...
// ---------- ... and drain overflow list.
if (PrintCMSStatistics != 0) {
"Finished work stealing in %dth thread: %3.3f sec",
}
}
// Note that parameter "i" is not used.
void
// Until all tasks completed:
// . claim an unclaimed task
// . compute region boundaries corresponding to task claimed
// using chunk_array
// . par_oop_iterate(cl) over that region
// We claimed task # nth_task; compute its boundaries.
if (chunk_top == 0) { // no samples were taken
} else if (nth_task == 0) {
} else {
}
// Verify that mr is in space
"Should be in space");
// Verify that "start" is an object boundary
"Should be an oop");
}
}
void
CompactibleFreeListSpace* sp, int i,
// Until all tasks completed:
// . claim an unclaimed task
// . compute region boundaries corresponding to task claimed
// . transfer dirty bits ct->mut for that region
// . apply rescanclosure to dirty mut bits for that region
// CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
// CAUTION: This closure has state that persists across calls to
// the work method dirty_range_iterate_clear() in that it has
// imbedded in it a (subtype of) UpwardsObjectClosure. The
// use of that state in the imbedded UpwardsObjectClosure instance
// assumes that the cards are always iterated (even if in parallel
// by several threads) in monotonically increasing order per each
// thread. This is true of the implementation below which picks
// card ranges (chunks) in monotonically increasing order globally
// and, a-fortiori, in monotonically increasing order per thread
// (the latter order being a subsequence of the former).
// If the work code below is ever reorganized into a more chaotic
// work-partitioning form than the current "sequential tasks"
// paradigm, the use of that persistent state will have to be
// revisited and modified appropriately. See also related
// bug 4756801 work on which should examine this code to make
// sure that the changes there do not run counter to the
// assumptions made here and necessary for correctness and
// efficiency. Note also that this code might yield inefficient
// behaviour in the case of very large objects that span one or
// more work chunks. Such objects would potentially be scanned
// several times redundantly. Work on 4756801 should try and
// address that performance anomaly if at all possible. XXX
start_addr, "Check alignment");
chunk_size, "Check alignment");
// Having claimed the nth_task, compute corresponding mem-region,
// which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
// The alignment restriction ensures that we do not need any
// synchronization with other gang-workers while setting or
// clearing bits in thus chunk of the MUT.
// The last chunk's end might be way beyond end of the
// used region. In that case pull back appropriately.
}
// Iterate over the dirty cards covering this chunk, marking them
// precleaned, and setting the corresponding bits in the mod union
// table. Since we have been careful to partition at Card and MUT-word
// boundaries no synchronization is needed between parallel threads.
// Having transferred these marks into the modUnionTable,
// rescan the marked objects on the dirty cards in the modUnionTable.
// Even if this is at a synchronous collection, the initial marking
// may have been done during an asynchronous collection so there
// may be dirty bits in the mod-union table.
}
}
// . see if we can share work_queues with ParNew? XXX
void
int* seed) {
NOT_PRODUCT(int num_steals = 0;)
while (true) {
// Completely finish any left over work from (an) earlier round(s)
cl->trim_queue(0);
// Now check if there's any work in the overflow list
// Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
// only affects the number of attempts made to get work from the
// overflow list and does not affect the number of workers. Just
// pass ParallelGCThreads so this behavior is unchanged.
// found something in global overflow list;
// not yet ready to go stealing work from others.
// We'd like to assert(work_q->size() != 0, ...)
// because we just took work from the overflow list,
// but of course we can't since all of that could have
// been already stolen from us.
// "He giveth and He taketh away."
continue;
}
// Verify that we have no work before we resort to stealing
// Try to steal from other queues that have work
// Do scanning work
// Loop around, finish this work, and try to steal some more
} else if (terminator()->offer_termination()) {
break; // nirvana from the infinite cycle
}
}
if (PrintCMSStatistics != 0) {
}
)
"Else our work is not yet done");
}
// Return a thread-local PLAB recording array, as appropriate.
if (_survivor_plab_array != NULL &&
return (void*) ca;
} else {
return NULL;
}
}
// Reset all the thread-local PLAB recording arrays
void CMSCollector::reset_survivor_plab_arrays() {
for (uint i = 0; i < ParallelGCThreads; i++) {
_survivor_plab_array[i].reset();
}
}
// Merge the per-thread plab arrays into the global survivor chunk
// array which will provide the partitioning of the survivor space
// for CMS rescan.
int no_of_gc_threads) {
for (int j = 0; j < no_of_gc_threads; j++) {
_cursor[j] = 0;
}
size_t i;
for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
for (int j = 0; j < no_of_gc_threads; j++) {
continue;
}
min_tid = j;
} else {
}
}
// At this point min_val and min_tid are respectively
// the least address in _survivor_plab_array[j]->nth(_cursor[j])
// and the thread (j) that witnesses that address.
// We record this address in the _survivor_chunk_array[i]
// and increment _cursor[min_tid] prior to the next round i.
break;
}
_survivor_chunk_array[i] = min_val;
}
// We are all done; record the size of the _survivor_chunk_array
_survivor_chunk_index = i; // exclusive: [0, i)
if (PrintCMSStatistics > 0) {
}
// Verify that we used up all the recorded entries
#ifdef ASSERT
for (int j = 0; j < no_of_gc_threads; j++) {
}
// Check that the merged array is in sorted order
if (total > 0) {
if (PrintCMSStatistics > 0) {
i, _survivor_chunk_array[i]);
}
"Not sorted");
}
}
#endif // ASSERT
}
// Set up the space's par_seq_tasks structure for work claiming
// for parallel rescan of young gen.
// See ParRescanTask where this is currently used.
void
// Eden space
{
// Each valid entry in [0, _eden_chunk_index) represents a task.
// Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done).
}
// Merge the survivor plab arrays into _survivor_chunk_array
if (_survivor_plab_array != NULL) {
} else {
}
// To space
{
// Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done).
}
// From space
{
// Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done).
}
}
// Parallel version of remark
void CMSCollector::do_remark_parallel() {
// Choose to use the number of GC workers most recently set
// into "active_workers". If active_workers is not set, set it
// to ParallelGCThreads.
if (n_workers == 0) {
}
CMSParRemarkTask tsk(this,
// Set up for parallel process_strong_roots work.
// We won't be iterating over the cards in the card table updating
// the younger_gen cards, so we shouldn't call the following else
// the verification code as well as subsequent younger_refs_iterate
// code would get confused. XXX
// gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
// The young gen rescan work will not be done as part of
// process_strong_roots (which currently doesn't knw how to
// parallelize such a scan), but rather will be broken up into
// a set of parallel tasks (via the sampling that the [abortable]
// preclean phase did of EdenSpace, plus the [two] tasks of
// scanning the [two] survivor spaces. Further fine-grain
// parallelization of the scanning of the survivor spaces
// themselves, and of precleaning of the younger gen itself
// is deferred to the future.
// The dirty card rescan work is broken up into a "sequence"
// of parallel tasks (per constituent space) that are dynamically
// claimed by the parallel threads.
// It turns out that even when we're using 1 thread, doing the work in a
// separate thread causes wide variance in run times. We can't help this
// in the multi-threaded case, but we special-case n=1 here to get
// repeatable measurements of the 1-thread overhead of the parallel code.
if (n_workers > 1) {
// Make refs discovery MT-safe, if it isn't already: it may not
// necessarily be so, since it's possible that we are doing
// ST marking.
} else {
}
// restore, single-threaded for now, any preserved marks
// as a result of work_q overflow
}
// Non-parallel version of remark
void CMSCollector::do_remark_non_parallel() {
&_markStack, &_revisitStack, this,
false /* should_yield */, false /* not precleaning */);
markFromDirtyCardsClosure(this, _span,
NULL, // space is set further below
&mrias_cl);
{
// Iterate over the dirty cards, setting the corresponding bits in the
// mod union table.
{
_cmsGen->used_region(),
_permGen->used_region(),
}
// Having transferred these marks into the modUnionTable, we just need
// to rescan the marked objects on the dirty cards in the modUnionTable.
// The initial marking may have been done during an asynchronous
// collection so there may be dirty bits in the mod-union table.
const int alignment =
{
// ... First handle dirty cards in CMS gen
if (PrintCMSStatistics != 0) {
}
}
{
// .. and then repeat for dirty cards in perm gen
if (PrintCMSStatistics != 0) {
}
}
}
if (VerifyDuringGC &&
}
{
true, // younger gens as roots
false, // use the local StrongRootsScope
true, // collecting perm gen
&mrias_cl,
true, // walk code active on stacks
NULL);
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
}
// Restore evacuated mark words, if any, used for overflow list links
if (!CMSOverflowEarlyRestoration) {
}
}
////////////////////////////////////////////////////////
// Parallel Reference Processing Task Proxy Class
////////////////////////////////////////////////////////
class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
public:
// XXX Should superclass AGTWOQ also know about AWG since it knows
// about the task_queues used by the AWG? Then it could initialize
// the terminator() object. See 6984287. The set_for_termination()
// below is a temporary band-aid for the regression in 6984287.
AbstractGangTaskWOopQueues("Process referents by policy in parallel",
{
"Inconsistency in _span");
}
void do_work_steal(int i,
int* seed);
};
if (_task.marks_oops_alive()) {
}
}
class CMSRefEnqueueTaskProxy: public AbstractGangTask {
public:
: AbstractGangTask("Enqueue reference objects in parallel"),
{ }
{
}
};
{ }
// . see if we can share work_queues with ParNew? XXX
void CMSRefProcTaskProxy::do_work_steal(int i,
int* seed) {
NOT_PRODUCT(int num_steals = 0;)
while (true) {
// Completely finish any left over work from (an) earlier round(s)
drain->trim_queue(0);
// Now check if there's any work in the overflow list
// Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
// only affects the number of attempts made to get work from the
// overflow list and does not affect the number of workers. Just
// pass ParallelGCThreads so this behavior is unchanged.
// Found something in global overflow list;
// not yet ready to go stealing work from others.
// We'd like to assert(work_q->size() != 0, ...)
// because we just took work from the overflow list,
// but of course we can't, since all of that might have
// been already stolen from us.
continue;
}
// Verify that we have no work before we resort to stealing
// Try to steal from other queues that have work
// Do scanning work
// Loop around, finish this work, and try to steal some more
} else if (terminator()->offer_termination()) {
break; // nirvana from the infinite cycle
}
}
if (PrintCMSStatistics != 0) {
}
)
}
{
}
{
}
// Process weak references.
false /* !preclean */);
&cmsKeepAliveClosure, false /* !preclean */);
{
if (rp->processing_is_mt()) {
// Set the degree of MT here. If the discovery is done MT, there
// may have been a different number of threads doing the discovery
// and a different number of discovered lists may have Ref objects.
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
int active_workers = ParallelGCThreads;
// The expectation is that active_workers will have already
// been set to a reasonable value. If it has not been set,
// investigate.
}
CMSRefProcTaskExecutor task_executor(*this);
} else {
NULL,
}
}
if (should_unload_classes()) {
{
// Follow SystemDictionary roots and unload classes
// Follow CodeCache roots and unload any methods marked for unloading
// Update subklass/sibling/implementor links in KlassKlass descendants
oop k;
}
assert(!ClassUnloading ||
"Should not have found new reachable objects");
}
{
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
}
if (should_unload_classes() || !JavaObjectsInPerm) {
// Now clean up stale oops in StringTable
}
// Restore any preserved marks as a result of mark stack or
// work queue overflow
restore_preserved_marks_if_any(); // done single-threaded for now
rp->set_enqueuing_is_done(true);
if (rp->processing_is_mt()) {
rp->balance_all_queues();
CMSRefProcTaskExecutor task_executor(*this);
} else {
}
}
#ifndef PRODUCT
void CMSCollector::check_correct_thread_executing() {
// Only the VM thread or the CMS thread should be here.
"Unexpected thread type");
// If this is the vm thread, the foreground process
// should not be waiting. Note that _foregroundGCIsActive is
// true while the foreground collector is waiting.
if (_foregroundGCShouldWait) {
// We cannot be the VM thread
assert(t->is_ConcurrentGC_thread(),
"Should be CMS thread");
} else {
// We can be the CMS thread only if we are in a stop-world
// phase of CMS collection.
if (t->is_ConcurrentGC_thread()) {
"Should be a stop-world phase");
// The CMS thread should be holding the CMS_token.
"Potential interference with concurrently "
"executing VM thread");
}
}
}
#endif
// PermGen verification support: If perm gen sweeping is disabled in
// this cycle, we preserve the perm gen object "deadness" information
// in the perm_gen_verify_bit_map. In order to do that we traverse
// all blocks in perm gen and mark all dead objects.
if (verifying() && !should_unload_classes()) {
"Should have already been allocated");
if (asynch) {
bitMapLock());
} else {
// In the case of synchronous sweep, we already have
}
}
if (asynch) {
// First sweep the old gen then the perm gen
{
bitMapLock());
}
// Now repeat for perm gen
if (should_unload_classes()) {
bitMapLock());
}
// Update Universe::_heap_*_at_gc figures.
// We need all the free list locks to make the abstract state
// transition from Sweeping to Resetting. See detailed note
// further below.
{
_permGen->freelistLock());
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
}
} else {
// already have needed locks
if (should_unload_classes()) {
}
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
}
// We need to use a monotonically non-deccreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
// NOTE on abstract state transitions:
// based on the state of the collection. The former is done in
// the interval [Marking, Sweeping] and the latter in the interval
// [Marking, Sweeping). Thus the transitions into the Marking state
// and out of the Sweeping state must be synchronously visible
// globally to the mutators.
// The transition into the Marking state happens with the world
// stopped so the mutators will globally see it. Sweeping is
// done asynchronously by the background collector so the transition
// from the Sweeping state to the Resizing state must be done
// under the freelistLock (as is the check for whether to
// allocate-live and whether to dirty the mod-union table).
" Resizing must be done under the freelistLocks (plural)");
// Now that sweeping has been completed, we clear
// the incremental_collection_failed flag,
// thus inviting a younger gen collection to promote into
// this generation. If such a promotion may still fail,
// the flag will be set again when a young collection is
// attempted.
}
// FIX ME!!! Looks like this belongs in CFLSpace, with
// CMSGen merely delegating to it.
if (largestAddr == NULL) {
// The dictionary appears to be empty. In this case
// try to coalesce at the end of the heap.
}
if (PrintFLSStatistics != 0) {
}
}
}
return _cmsSpace->find_chunk_at_end();
}
bool full) {
// The next lower level has been collected. Gather any statistics
// that are of interest at this point.
// Gather statistics on the young generation collection.
}
}
"Wrong type of heap");
"Wrong type of size policy");
return sp;
}
if (PrintGCDetails && Verbose) {
}
if (PrintGCDetails && Verbose) {
}
}
bool asynch) {
// We iterate over the space(s) underlying this generation,
// checking the mark bit map to see if the bits corresponding
// to specific blocks are marked or not. Blocks that are
// marked are live and are not swept up. All remaining blocks
// are swept up, with coalescing on-the-fly as we sweep up
// We need to ensure that the sweeper synchronizes with allocators
// and stop-the-world collectors. In particular, the following
// locks are used:
// . CMS token: if this is held, a stop the world collection cannot occur
// . freelistLock: if this is held no allocation can occur from this
// generation by another thread
// . bitMapLock: if this is held, no other thread can access or update
//
// Note that we need to hold the freelistLock if we use
// block iterate below; else the iterator might go awry if
// a mutator (or promotion) causes block contents to change
// (for instance if the allocator divvies up a block).
// If we hold the free list lock, for all practical purposes
// young generation GC's can't occur (they'll usually need to
// promote), so we might as well prevent all young generation
// GC's while we do a sweeping step. For the same reason, we might
// as well take the bit map lock for the entire duration
// check that we hold the requisite locks
"Should possess CMS token to sweep");
{
// co-terminal free run. This is done in the SweepClosure
// destructor; so, do not remove this scope, else the
// end-of-sweep-census below will be off by a little bit.
}
if (should_unload_classes()) { // unloaded classes this cycle,
_concurrent_cycles_since_last_unload = 0; // ... reset count
} else { // did not unload classes,
_concurrent_cycles_since_last_unload++; // ... increment count
}
}
// Reset CMS data structures (for now just the marking bit map)
// preparatory for the next cycle.
if (asynch) {
// If the state is not "Resetting", the foreground thread
// has done a collection and the resetting.
if (_collectorState != Resetting) {
" because the foreground collector has finished the collection");
return;
}
// Clear the mark bitmap (no grey objects to start with)
// for the next cycle.
if (ConcurrentMarkSweepThread::should_yield() &&
!foregroundGCIsActive() &&
CMSYield) {
"CMS thread should hold CMS token");
bitMapLock()->unlock();
stopTimer();
if (PrintCMSStatistics != 0) {
}
icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
!CMSCollector::foregroundGCIsActive(); ++i) {
}
ConcurrentMarkSweepThread::synchronize(true);
startTimer();
}
}
// A successful mostly concurrent collection has been done.
// Because only the full (i.e., concurrent mode failure) collections
// are being measured for gc overhead limits, clean the "near" flag
// and count.
} else {
// already have the lock
}
// Stop incremental mode after a cycle completes, so that any future cycles
// are triggered by allocation.
stop_icms();
if (RotateCMSCollectionTypes) {
}
)
}
switch (op) {
case CMS_op_checkpointRootsInitial: {
checkpointRootsInitial(true); // asynch
if (PrintGC) {
}
break;
}
case CMS_op_checkpointRootsFinal: {
checkpointRootsFinal(true, // asynch
false, // !clear_all_soft_refs
false); // !init_mark_was_synchronous
if (PrintGC) {
}
break;
}
default:
fatal("No such CMS_op");
}
}
#ifndef PRODUCT
return FreeChunk::header_size();
}
// Try and collect here conditions that should hold when
// CMS thread is exiting. The idea is that the foreground GC
// thread should not be blocked if it wants to terminate
// the CMS thread and yet continue to run the VM for a while
// after that.
void CMSCollector::verify_ok_to_terminate() const {
"should be called by CMS thread");
// We could check here that all the various low-level locks
// are not held by the CMS thread, but that is overkill; see
// also CMSThread::verify_ok_to_terminate() where the CGC_lock
// is checked.
}
#endif
"missing Printezis mark?");
"alignment problem");
return size;
}
// A variant of the above (block_size_using_printezis_bits()) except
// that we return 0 if the P-bits are not yet set.
"alignment problem");
return size;
}
return 0;
}
} else {
}
"must be different cards");
return next_card;
}
// CMS Bit Map Wrapper /////////////////////////////////////////
// Construct a CMS bit map infrastructure, but don't create the
// bit vector itself. That is done by a separate call CMSBitMap::allocate()
// further below.
_bm(),
{
_bmStartWord = 0;
_bmWordSize = 0;
}
if (!brs.is_reserved()) {
warning("CMS bit map allocation failure");
return false;
}
// For now we'll just commit all of the bit map up fromt.
// Later on we'll try to be more parsimonious with swap.
warning("CMS bit map backing store failure");
return false;
}
"didn't reserve backing store for all of CMS bit map?");
_bmWordSize, "inconsistency in bit map sizing");
// bm.clear(); // can we rely on getting zero'd memory? verify below
assert(isAllClear(),
"Expected zero'd memory from ReservedSpace constructor");
"consistency check");
return true;
}
// XXX assert that start and end are appropriately aligned
if (!dirty_region.is_empty()) {
} else {
return;
}
}
}
#ifndef PRODUCT
void CMSBitMap::assert_locked() const {
}
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
"size inconsistency");
}
}
// verify that there are no 1 bits in the interval [left, right)
}
{
// mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
// convert address range into offset range
// Make sure that end() is appropriately aligned
"Misaligned mr.end()");
}
#endif
// allocate a stack of the requisite depth
if (!rs.is_reserved()) {
warning("CMSMarkStack allocation failure");
return false;
}
warning("CMSMarkStack backing store failure");
return false;
}
"didn't reserve backing store for all of CMS stack?");
_index = 0;
NOT_PRODUCT(_max_depth = 0);
return true;
}
// XXX FIX ME !!! In the MT case we come in here holding a
// leaf lock. For printing we need to take a further lock
// which has lower rank. We need to recallibrate the two
// lock-ranks involved in order to be able to rpint the
// messages below. (Or defer the printing to the caller.
// For now we take the expedient path of just disabling the
// messages for the problematic case.)
void CMSMarkStack::expand() {
if (_capacity == MarkStackSizeMax) {
// We print a warning message only once per CMS cycle.
}
return;
}
// Double capacity if possible
// Do not give up existing stack until we have managed to
// get the double capacity that we desired.
new_capacity * sizeof(oop)));
if (rs.is_reserved()) {
// Release the backing store associated with old stack
// Reinitialize virtual space for new stack
fatal("Not enough swap for expanded marking stack");
}
_index = 0;
// Failed to double capacity, continue;
// we print a detail message only once per CMS cycle.
SIZE_FORMAT"K",
_capacity / K, new_capacity / K);
}
}
// Closures
// XXX: there seems to be a lot of code duplication here;
// should refactor and consolidate common code.
// This closure is used to mark refs into the CMS generation in
// the CMS bit map. Called at the first checkpoint. This closure
// assumes that we do not need to re-mark dirty cards; if the CMS
// generation on which this is used is not an oldest (modulo perm gen)
// generation then this will lose younger_gen cards!
{
}
// if p points into _span, then mark corresponding bit in _markBitMap
// this should be made more efficient
}
}
// A variant of the above, used for CMS marking verification.
{
}
// if p points into _span, then mark corresponding bit in _markBitMap
fatal("... aborting");
}
}
}
//////////////////////////////////////////////////
// MarkRefsIntoAndScanClosure
//////////////////////////////////////////////////
bool should_yield,
bool concurrent_precleaning):
{
_ref_processor = rp;
}
// This closure is used to mark refs into the CMS generation at the
// second (final) checkpoint, and to scan and transitively follow
// the unmarked oops. It is also used during the concurrent precleaning
// phase while scanning objects on dirty cards in the CMS generation.
// The marks are made in the marking bit map and the marking stack is
// used for keeping the (newly) grey objects during the scan.
// The parallel version (Par_...) appears further below.
"overflow list should be empty");
// mark bit map (object is now grey)
// push on marking stack (stack should be empty), and drain the
// stack by applying this closure to the oops in the oops popped
// from the stack (i.e. blacken the grey objects)
do {
"only grey objects on this stack");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
// check if it's time to yield
} while (!_mark_stack->isEmpty() ||
// if marking stack is empty, and we are not doing this
// during precleaning, then check the overflow list
}
"overflow list was drained above");
// We could restore evacuated mark words, if any, used for
// overflow list links here because the overflow list is
// provably empty here. That would reduce the maximum
// size requirements for preserved_{oop,mark}_stack.
// But we'll just postpone it until we are all done
// so we can just stream through.
}
"All preserved marks should have been restored above");
}
}
void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
void MarkRefsIntoAndScanClosure::do_yield_work() {
"CMS thread should hold CMS token");
// relinquish the free_list_lock and bitMaplock()
_freelistLock->unlock();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
}
_collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0;
i < CMSYieldSleepCount &&
++i) {
}
ConcurrentMarkSweepThread::synchronize(true);
_collector->startTimer();
}
///////////////////////////////////////////////////////////
// Par_MarkRefsIntoAndScanClosure: a parallel version of
// MarkRefsIntoAndScanClosure
///////////////////////////////////////////////////////////
{
_ref_processor = rp;
}
// This closure is used to mark refs into the CMS generation at the
// second (final) checkpoint, and to scan and transitively follow
// the unmarked oops. The marks are made in the marking bit map and
// the work_queue is used for keeping the (newly) grey objects during
// the scan phase whence they are also available for stealing by parallel
// threads. Since the marking bit map is shared, updates are
// synchronized (via CAS).
// Ignore mark word because this could be an already marked oop
// that may be chained at the end of the overflow list.
// mark bit map (object will become grey):
// It is possible for several threads to be
// trying to "claim" this object concurrently;
// the unique thread that succeeds in marking the
// object first will do the subsequent push on
// to the work queue (or overflow list).
// push on work_queue (which may not be empty), and trim the
// queue to an appropriate length by applying this closure to
// the oops in the oops popped from the stack (i.e. blacken the
// grey objects)
} // Else, another thread claimed the object
}
}
}
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
// This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper.
// check if it's time to yield
if (do_yield_check()) {
// We yielded for some foreground stop-world work,
// and we have been asked to abort this ongoing preclean cycle.
return 0;
}
// it's marked; is it potentially uninitialized?
if (p->klass_or_null() != NULL) {
// If is_conc_safe is false, the object may be undergoing
// change by the VM outside a safepoint. Don't try to
// scan it, but rather leave it for the remark phase.
if (CMSPermGenPrecleaningEnabled &&
(!p->is_conc_safe() || !p->is_parsable())) {
// Signal precleaning to redirty the card since
// the klass pointer is already installed.
} else {
// an initialized object; ignore mark word in verification below
// since we are running concurrent with mutators
if (p->is_objArray()) {
// objArrays are precisely marked; restrict scanning
// to dirty cards only.
} else {
// A non-array may have been imprecisely marked; we need
// to scan object in its entirety.
p->oop_iterate(_scanningClosure));
}
#ifdef DEBUG
} else {
"inconsistent Printezis mark");
}
#endif // DEBUG
}
} else {
// an unitialized object
"alignment problem");
// Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
// will dirty the card when the klass pointer is installed in the
// object (signalling the completion of initialization).
}
} else {
// Either a not yet marked object or an uninitialized object
// An uninitialized object, skip to the next card, since
// we may not be able to read its P-bits yet.
} else {
// An object not (yet) reached by marking: we merely need to
// compute its size so as to go look at the next block.
}
}
return size;
}
"CMS thread should hold CMS token");
// relinquish the free_list_lock and bitMaplock()
_freelistLock->unlock();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
}
_collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
!CMSCollector::foregroundGCIsActive(); ++i) {
}
ConcurrentMarkSweepThread::synchronize(true);
_collector->startTimer();
}
//////////////////////////////////////////////////////////////////
// SurvivorSpacePrecleanClosure
//////////////////////////////////////////////////////////////////
// This (single-threaded) closure is used to preclean the oops in
// the survivor spaces.
// an initialized object; ignore mark word in verification below
// since we are running concurrent with mutators
// Note that we do not yield while we iterate over
// the interior oops of p, pushing the relevant ones
// on our marking stack.
// Observe that below, we do not abandon the preclean
// phase as soon as we should; rather we empty the
// marking stack before returning. This is to satisfy
// some existing assertions. In general, it may be a
// good idea to abort immediately and complete the marking
// from the grey objects at a later time.
while (!_mark_stack->isEmpty()) {
"only grey objects on this stack");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
// check if it's time to yield
}
unsigned int after_count =
}
void SurvivorSpacePrecleanClosure::do_yield_work() {
"CMS thread should hold CMS token");
// Relinquish the bit map lock
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
}
_collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
!CMSCollector::foregroundGCIsActive(); ++i) {
}
ConcurrentMarkSweepThread::synchronize(true);
_collector->startTimer();
}
// This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper. In the parallel
// case, although the bitMap is shared, we do a single read so the
// isMarked() query is "safe".
// Ignore mark word because we are running concurrent with mutators
bool is_obj_array = false;
#ifdef DEBUG
if (!_parallel) {
"overflow list should be empty");
}
#endif // DEBUG
// Obj arrays are precisely marked, non-arrays are not;
// so we scan objArrays precisely and non-arrays in their
// entirety.
if (p->is_objArray()) {
is_obj_array = true;
if (_parallel) {
} else {
}
} else {
if (_parallel) {
} else {
p->oop_iterate(_scan_closure);
}
}
}
#ifdef DEBUG
if (!_parallel) {
"overflow list should be empty");
}
#endif // DEBUG
return is_obj_array;
}
bool should_yield, bool verifying):
_skipBits(0)
{
}
}
// Should revisit to see if this should be restructured for
// greater efficiency.
if (_skipBits > 0) {
_skipBits--;
return true;
}
// convert offset into a HeapWord*
"address out of range");
// this is an allocated but not yet initialized object
DEBUG_ONLY(if (!_verifying) {)
// We re-dirty the cards on which this object lies and increase
// the _threshold so that we'll come back to scan this object
// during the preclean or remark phase. (CMSCleanOnEnter)
if (CMSCleanOnEnter) {
// Bump _threshold to end_card_addr; note that
// _threshold cannot possibly exceed end_card_addr, anyhow.
// This prevents future clearing of the card as the scan proceeds
// to the right.
"Because we are just scanning into this object");
if (_threshold < end_card_addr) {
}
if (p->klass_or_null() != NULL) {
// Redirty the range of cards...
} // ...else the setting of klass will dirty the card anyway.
}
DEBUG_ONLY(})
return true;
}
}
return true;
}
// We take a break if we've been at this for a while,
// so as to avoid monopolizing the locks involved.
void MarkFromRootsClosure::do_yield_work() {
// First give up the locks, then yield, then re-lock
// We should probably use a constructor/destructor idiom to
// serve our purpose. XXX
"CMS thread should hold CMS token");
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
}
_collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
!CMSCollector::foregroundGCIsActive(); ++i) {
}
ConcurrentMarkSweepThread::synchronize(true);
_collector->startTimer();
}
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
// advance the finger to right end of this object
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
// this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table --
// these mutation records are redundant until we have
// actually traced into the corresponding card.
// Here, we check whether advancing the finger would make
// us cross into a new card, and if so clear corresponding
// cards in the MUT (preclean them in the card-table in the
// future).
DEBUG_ONLY(if (!_verifying) {)
// The clean-on-enter optimization is disabled by default,
// until we fix 6178663.
// [_threshold, _finger) represents the interval
// of cards to be cleared in MUT (or precleaned in card table).
// The set of cards to be cleared is all those that overlap
// with the interval [_threshold, _finger); note that
// _threshold is always kept card-aligned but _finger isn't
// always card-aligned.
"_threshold should always be card-aligned");
// XXX When _finger crosses from old gen into perm gen
// we may be doing unnecessary cleaning; do better in the
// future by detecting that condition and clearing fewer
}
DEBUG_ONLY(})
// Note: the finger doesn't advance while we drain
// the stack below.
_finger, this);
while (!_markStack->isEmpty()) {
// Skip verifying header mark word below because we are
// running concurrent with mutators.
// now scan this oop's oops
}
}
bool should_yield):
_skip_bits(0),
{
}
// Should revisit to see if this should be restructured for
// greater efficiency.
if (_skip_bits > 0) {
_skip_bits--;
return true;
}
// convert offset into a HeapWord*
"address out of range");
// this is an allocated object that might not yet be initialized
// in the case of Clean-on-Enter optimization, redirty card
// and avoid clearing card by increasing the threshold.
return true;
}
}
return true;
}
// Should we assert that our work queue is empty or
// below some drain limit?
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
// advance the finger to right end of this object
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
// this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table --
// these mutation records are redundant until we have
// actually traced into the corresponding card.
// Here, we check whether advancing the finger would make
// us cross into a new card, and if so clear corresponding
// cards in the MUT (preclean them in the card-table in the
// future).
// The clean-on-enter optimization is disabled by default,
// until we fix 6178663.
// [_threshold, _finger) represents the interval
// of cards to be cleared in MUT (or precleaned in card table).
// The set of cards to be cleared is all those that overlap
// with the interval [_threshold, _finger); note that
// _threshold is always kept card-aligned but _finger isn't
// always card-aligned.
"_threshold should always be card-aligned");
// XXX When _finger crosses from old gen into perm gen
// we may be doing unnecessary cleaning; do better in the
// future by detecting that condition and clearing fewer
}
// Note: the local finger doesn't advance while we drain
// the stack below, but the global finger sure can and will.
gfa, this);
while (true) {
// We emptied our work_queue; check if there's stuff that can
// be gotten from the overflow stack.
continue;
} else { // done
break;
}
}
// Skip verifying header mark word below because we are
// running concurrent with mutators.
// now scan this oop's oops
}
}
// Yield in response to a request from VM Thread or
// from mutators.
void Par_MarkFromRootsClosure::do_yield_work() {
}
// A variant of the above used for verifying CMS marking work.
{
}
}
// Should revisit to see if this should be restructured for
// greater efficiency.
// convert offset into a HeapWord*
"address out of range");
"should drain stack to limit stack usage");
// convert addr to an oop preparatory to scanning
// advance the finger to right end of this object
// Note: the finger doesn't advance while we drain
// the stack below.
while (!_mark_stack->isEmpty()) {
// now scan this oop's oops
}
return true;
}
{ }
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
// in CMSCollector's _restart_address.
// Remember the least grey address discarded
}
// Oop lies in _span and isn't yet grey or black
addr);
fatal("... aborting");
}
if (PrintCMSStatistics != 0) {
}
}
// anything including and to the right of _finger
// will be scanned as we iterate over the remainder of the
// bit map
}
}
{ }
{ }
// Assumes thread-safe access by callers, who are
// responsible for mutual exclusion.
if (_restart_addr == NULL) {
_restart_addr = low;
} else {
}
}
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
// in CMSCollector's _restart_address.
// Remember the least grey address discarded
}
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
// in CMSCollector's _restart_address.
// We need to do this under a mutex to prevent other
// workers from interfering with the work done below.
// Remember the least grey address discarded
}
// Ignore mark word because we are running concurrent with mutators.
// Oop lies in _span and isn't yet grey or black
// the bit map iteration has already either passed, or
// sampled, this bit in the bit map; we'll need to
// use the marking stack to scan this oop's oops.
bool simulate_overflow = false;
if (CMSMarkStackOverflowALot &&
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
if (PrintCMSStatistics != 0) {
}
}
}
// anything including and to the right of _finger
// will be scanned as we iterate over the remainder of the
// bit map
}
}
// Ignore mark word because we are running concurrent with mutators.
// Oop lies in _span and isn't yet grey or black
// We read the global_finger (volatile read) strictly after marking oop
// Should we push this marked oop on our stack?
// -- if someone else marked it, nothing to do
// -- if target oop is above global finger nothing to do
// -- if target oop is in chunk and above local finger
// then nothing to do
// -- else push on work queue
if ( !res // someone else marked it, they will deal with it
return;
}
// the bit map iteration has already either passed, or
// sampled, this bit in the bit map; we'll need to
// use the marking stack to scan this oop's oops.
bool simulate_overflow = false;
if (CMSMarkStackOverflowALot &&
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
if (simulate_overflow ||
// stack overflow
if (PrintCMSStatistics != 0) {
}
// We cannot assert that the overflow stack is full because
// it may have been emptied since.
"Else push should have succeeded");
}
}
}
OopClosure(rp),
bool concurrent_precleaning):
{
}
// Grey object rescan during pre-cleaning and second checkpoint phases --
// the non-parallel version (the parallel version appears further below.)
// Ignore mark word verification. If during concurrent precleaning,
// the object monitor may be locked. If during the checkpoint
// phases, the object may already have been reached by a different
// path and may be at the end of the global overflow list (so
// the mark word may be NULL).
"expected an oop or NULL");
// Check if oop points into the CMS generation
// and is not marked
// a white object ...
// push on the marking stack (grey set)
bool simulate_overflow = false;
if (CMSMarkStackOverflowALot &&
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
if (_concurrent_precleaning) {
// During precleaning we can just dirty the appropriate card(s)
// in the mod union table, thus ensuring that the object remains
// in the grey set and continue. In the case of object arrays
// we need to dirty all of the cards that the object spans,
// since the rescan of object arrays will be limited to the
// dirty cards.
// Note that no one can be intefering with us in this action
// of dirtying the mod union table, so no locking or atomics
// are required.
if (obj->is_objArray()) {
} else {
}
} else {
// During the remark phase, we need to remember this oop
// in the overflow list.
}
}
}
}
{
}
// Grey object rescan during second checkpoint phase --
// the parallel version.
// In the assert below, we ignore the mark word because
// this oop may point to an already visited object that is
// on the overflow stack (in which case the mark word has
// been hijacked for chaining into the overflow stack --
// if this is the last object in the overflow stack then
// its mark word will be NULL). Because this object may
// have been subsequently popped off the global overflow
// stack, and the mark word possibly restored to the prototypical
// value, by the time we get to examined this failing assert in
// the debugger, is_oop_or_null(false) may subsequently start
// to hold.
"expected an oop or NULL");
// Check if oop points into the CMS generation
// and is not marked
// a white object ...
// If we manage to "claim" the object, by being the
// first thread to mark it, then we push it on our
// marking stack
// push on work queue (grey set)
bool simulate_overflow = false;
if (CMSMarkStackOverflowALot &&
// simulate a stack overflow
simulate_overflow = true;
}
)
}
} // Else, some other thread got there first
}
}
// TBD
}
// TBD
}
void CMSPrecleanRefsYieldClosure::do_yield_work() {
"CMS thread should hold CMS token");
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
}
_collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
!CMSCollector::foregroundGCIsActive(); ++i) {
}
ConcurrentMarkSweepThread::synchronize(true);
_collector->startTimer();
}
bool CMSPrecleanRefsYieldClosure::should_return() {
if (ConcurrentMarkSweepThread::should_yield()) {
}
return _collector->foregroundGCIsActive();
}
"mr should be aligned to start at a card boundary");
// We'd like to assert:
// assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
// "mr should be a range of cards");
// However, that would be too strong in one case -- the last
// partition ends at _unallocated_block which, in general, can be
// an arbitrary boundary, not necessarily card aligned.
if (PrintCMSStatistics != 0) {
}
}
_g(g),
_inFreeRange(false), // No free range at beginning of sweep
_freeRangeInFreeLists(false), // No free range at beginning of sweep
_lastFreeRangeCoalesced(false),
{
_numObjectsFreed = 0;
_numWordsFreed = 0;
_numObjectsLive = 0;
_numWordsLive = 0;
_numWordsAlreadyFree = 0;
)
"sweep _limit out of bounds");
if (CMSTraceSweeper) {
_limit);
}
}
}
#ifndef PRODUCT
// Assertion checking only: no useful work in product mode --
// however, if any of the flags below become product flags,
// you may need to review this code to see if it needs to be
// enabled in product mode.
SweepClosure::~SweepClosure() {
"sweep _limit out of bounds");
if (inFreeRange()) {
warning("inFreeRange() should have been reset; dumping state of SweepClosure");
print();
}
SIZE_FORMAT" bytes "
* sizeof(HeapWord);
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
}
}
if (CMSTraceSweeper) {
_limit);
}
}
#endif // PRODUCT
bool freeRangeInFreeLists) {
if (CMSTraceSweeper) {
}
set_inFreeRange(true);
set_lastFreeRangeCoalesced(false);
if (CMSTestInFreeList) {
if (freeRangeInFreeLists) {
}
}
}
// Note that the sweeper runs concurrently with mutators. Thus,
// it is possible for direct allocation in this generation to happen
// in the middle of the sweep. Note that the sweeper also coalesces
// contiguous free blocks. Thus, unless the sweeper and the allocator
// synchronize appropriately freshly allocated blocks may get swept up.
// This is accomplished by the sweeper locking the free lists while
// it is sweeping. Thus blocks that are determined to be free are
// indeed free. There is however one additional complication:
// blocks that have been allocated since the final checkpoint and
// mark, will not have been marked and so would be treated as
// unreachable and swept up. To prevent this, the allocator marks
// the bit map when allocating during the sweep phase. This leads,
// however, to a further complication -- objects may have been allocated
// but not yet initialized -- in the sense that the header isn't yet
// installed. The sweeper can not then determine the size of the block
// in order to skip over it. To deal with this case, we use a technique
// (due to Printezis) to encode such uninitialized block sizes in the
// bit map. Since the bit map uses a bit per every HeapWord, but the
// CMS generation has a minimum object size of 3 HeapWords, it follows
// that "normal marks" won't be adjacent in the bit map (there will
// always be at least two 0 bits between successive 1 bits). We make use
// of these "unused" bits to represent uninitialized blocks -- the bit
// corresponding to the start of the uninitialized object and the next
// bit are both set. Finally, a 1 bit marks the end of the object that
// started with the two consecutive 1 bits to indicate its potentially
// uninitialized state.
// Check if we are done sweeping. Below we check "addr >= _limit" rather
// than "addr == _limit" because although _limit was a block boundary when
// we started the sweep, it may no longer be one because heap expansion
// may have caused us to coalesce the block ending at the address _limit
// with a newly expanded chunk (this happens when _limit was set to the
// previous _end of the space), so we may have stepped past _limit:
// see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
"sweep _limit out of bounds");
// Flush any free range we might be holding as a single
// coalesced chunk to the appropriate free list.
if (inFreeRange()) {
if (CMSTraceSweeper) {
}
}
// help the iterator loop finish
}
// check if we should yield
// Chunk that is already free
// If we flush the chunk at hand in lookahead_and_flush()
// and it's coalesced with a preceding chunk, then the
// process of "mangling" the payload of the coalesced block
// will cause erasure of the size information from the
// (erstwhile) header of all the coalesced blocks but the
// first, so the first disjunct in the assert will not hold
// in that specific case (in which case the second disjunct
// will hold).
"Otherwise the size info doesn't change at this step");
)
// Chunk is fresh garbage
_numWordsFreed += res;
)
} else {
// Chunk that is alive.
_numWordsLive += res;
)
}
return res;
}
// For the smart allocation, record following
// split deaths - a free chunk is removed from its free list because
// it is being split into two or more chunks.
// split birth - a free chunk is being added to its free list because
// a larger free chunk has been split and resulted in this free chunk.
// coal death - a free chunk is being removed from its free list because
// it is being coalesced into a large free chunk.
// coal birth - a free chunk is being added to its free list because
// it was created when two or more free chunks where coalesced into
// this free chunk.
//
// These statistics are used to determine the desired number of free
// chunks of a given size. The desired number is chosen to be relative
// to the end of a CMS sweep. The desired number at the end of a sweep
// is the
// count-at-end-of-previous-sweep (an amount that was enough)
// - count-at-beginning-of-current-sweep (the excess)
// + split-births (gains in this size during interval)
// - split-deaths (demands on this size during interval)
// where the interval is from the end of one sweep to the end of the
// next.
//
// When sweeping the sweeper maintains an accumulated chunk which is
// the chunk that is made up of chunks that have been coalesced. That
// will be termed the left-hand chunk. A new chunk of garbage that
// is being considered for coalescing will be referred to as the
// right-hand chunk.
//
// When making a decision on whether to coalesce a right-hand chunk with
// the current left-hand chunk, the current count vs. the desired count
// of the left-hand chunk is considered. Also if the right-hand chunk
// is near the large chunk at the end of the heap (see
// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
// left-hand chunk is coalesced.
//
// When making a decision about whether to split a chunk, the desired count
// vs. the current count of the candidate to be split is also considered.
// If the candidate is underpopulated (currently fewer chunks than desired)
// a chunk of an overpopulated (currently more chunks than desired) size may
// be chosen. The "hint" associated with a free list, if non-null, points
// to a free list which may be overpopulated.
//
// Chunks that cannot be coalesced are not in the
// free lists.
"free chunk should be in free lists");
}
// a chunk that is already free, should not have been
// marked in the bit map
// Verify that the bit map has no bits marked between
// addr and purported end of this block.
// Some chunks cannot be coalesced under any circumstances.
// See the definition of cantCoalesce().
if (!fc->cantCoalesce()) {
// This chunk can potentially be coalesced.
if (_sp->adaptive_freelists()) {
// All the work is done in
} else { // Not adaptive free lists
// this is a free chunk that can potentially be coalesced by the sweeper;
if (!inFreeRange()) {
// if the next chunk is a free block that can't be coalesced
// it doesn't make sense to remove this chunk from the free lists
// nothing to do
} else {
// Potentially the start of a new free range:
// Don't eagerly remove it from the free lists.
// No need to remove it if it will just be put
// back again. (Also from a pragmatic point of view
// if it is a free block in a region that is beyond
// any allocated blocks, an assertion will fail)
// Remember the start of a free run.
initialize_free_range(addr, true);
// end - can coalesce with next chunk
}
} else {
// the midst of a free range, we are coalescing
if (CMSTraceSweeper) {
}
// remove it from the free lists
set_lastFreeRangeCoalesced(true);
// If the chunk is being coalesced and the current free range is
// in the free lists, remove the current free range so that it
// will be returned to the free lists in its entirety - all
// the coalesced pieces included.
if (freeRangeInFreeLists()) {
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
"free range is not in free lists");
}
set_freeRangeInFreeLists(false);
}
}
}
// Note that if the chunk is not coalescable (the else arm
// below), we unconditionally flush, without needing to do
// a "lookahead," as we do below.
} else {
// Code path common to both original and adaptive free lists.
// cant coalesce with previous block; this should be treated
// as the end of a free run if any
if (inFreeRange()) {
// we kicked some butt; time to pick up the garbage
}
// else, nothing to do, just continue
}
}
// This is a chunk of garbage. It is not in any free list.
// Add it to a free list or let it possibly be coalesced into
// a larger chunk.
if (_sp->adaptive_freelists()) {
// Verify that the bit map has no bits marked between
// addr and purported end of just dead object.
} else {
if (!inFreeRange()) {
// start of a new free range
initialize_free_range(addr, false);
} else {
// this will be swept up when we hit the end of the
// free range
if (CMSTraceSweeper) {
}
// If the chunk is being coalesced and the current free range is
// in the free lists, remove the current free range so that it
// will be returned to the free lists in its entirety - all
// the coalesced pieces included.
if (freeRangeInFreeLists()) {
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
"free range is not in free lists");
}
set_freeRangeInFreeLists(false);
}
set_lastFreeRangeCoalesced(true);
}
// this will be swept up when we hit the end of the free range
// Verify that the bit map has no bits marked between
// addr and purported end of just dead object.
}
"A freshly garbage chunk can't possibly straddle over _limit");
return size;
}
// The sweeper has just found a live object. Return any accumulated
// left hand chunk to the free lists.
if (inFreeRange()) {
}
// This object is live: we'd normally expect this to be
// an oop, and like to assert the following:
// assert(oop(addr)->is_oop(), "live block should be an oop");
// However, as we commented above, this may be an object whose
// header hasn't yet been initialized.
// Determine the size from the bit map, rather than trying to
// compute it from the object header.
"alignment problem");
#ifdef DEBUG
// Ignore mark word because we are running concurrent with mutators
// is_conc_safe is checked before performing this assertion
// because an object that is not is_conc_safe may yet have
// the return from size() correct.
"P-mark and computed size do not agree");
}
#endif
} else {
// This should be an initialized object that's alive.
"Should be an initialized object");
// Note that there are objects used during class redefinition,
// e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
// which are discarded with their is_conc_safe state still
// false. These object may be floating garbage so may be
// seen here. If they are floating garbage their size
// should be attainable from their klass. Do not that
// is_conc_safe() is true for oop(addr).
// Ignore mark word because we are running concurrent with mutators
// Verify that the bit map has no bits marked between
// addr and purported end of this block.
}
return size;
}
// do_post_free_or_garbage_chunk() should only be called in the case
// of the adaptive free list allocator.
if (CMSTestInFreeList && fcInFreeLists) {
}
if (CMSTraceSweeper) {
}
bool coalesce;
switch (FLSCoalescePolicy) {
// numeric value forms a coalition aggressiveness metric
case 0: { // never coalesce
coalesce = false;
break;
}
case 1: { // coalesce if left & right chunks on overpopulated lists
break;
}
case 2: { // coalesce if left chunk on overpopulated list (default)
break;
}
case 3: { // coalesce if left OR right chunk on overpopulated list
break;
}
case 4: { // always coalesce
coalesce = true;
break;
}
default:
}
// Should the current free range be coalesced?
// If the chunk is in a free range and either we decided to coalesce above
// or the chunk is near the large block at the end of the heap
// (isNearLargestChunk() returns true), then coalesce this chunk.
const bool doCoalesce = inFreeRange()
if (doCoalesce) {
// Coalesce the current free range on the left with the new
// chunk on the right. If either is on a free list,
// it must be removed from the list and stashed in the closure.
if (freeRangeInFreeLists()) {
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
"Chunk is not in free lists");
}
set_freeRangeInFreeLists(false);
}
if (fcInFreeLists) {
"The chunk has the wrong size or is not in the free lists");
}
set_lastFreeRangeCoalesced(true);
// Return the current free range and start a new one.
if (inFreeRange()) {
// In a free range but cannot coalesce with the right hand chunk.
// Put the current free range into the free lists.
}
// Set up for new free range. Pass along whether the right hand
// chunk is in the free lists.
}
}
// Lookahead flush:
// If we are tracking a free range, and this is the last chunk that
// we'll look at because its end crosses past _limit, we'll preemptively
// flush it along with any free range we may be holding on to. Note that
// this can be the case only for an already free or freshly garbage
// chunk. If this block is an object, it can never straddle
// over _limit. The "straddling" occurs when _limit is set at
// the previous end of the space when this cycle started, and
// a subsequent heap expansion caused the previously co-terminal
// free block to be coalesced with the newly expanded portion,
// thus rendering _limit a non-block-boundary making it dangerous
// for the sweeper to step over and examine.
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
if (CMSTraceSweeper) {
}
// Return the storage we are tracking back into the free lists.
if (CMSTraceSweeper) {
}
}
}
"A zero sized chunk cannot be added to the free lists.");
if (!freeRangeInFreeLists()) {
if (CMSTestInFreeList) {
"chunk should not be in free lists yet");
}
if (CMSTraceSweeper) {
}
// A new free range is going to be starting. The current
// free range has not been added to the free lists yet or
// was removed so add it back.
// If the current free range was coalesced, then the death
// of the free range was recorded. Record a birth now.
if (lastFreeRangeCoalesced()) {
}
} else if (CMSTraceSweeper) {
}
set_inFreeRange(false);
set_freeRangeInFreeLists(false);
}
// We take a break if we've been at this for a while,
// so as to avoid monopolizing the locks involved.
// Return current free chunk being used for coalescing (if any)
// to the appropriate freelist. After yielding, the next
// free block encountered will start a coalescing range of
// free blocks. If the next free block is adjacent to the
// chunk just flushed, they will need to wait for the next
// sweep to be coalesced.
if (inFreeRange()) {
}
// First give up the locks, then yield, then re-lock.
// We should probably use a constructor/destructor idiom to
// serve our purpose. XXX
"CMS thread should hold CMS token");
_freelistLock->unlock();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
}
_collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
!CMSCollector::foregroundGCIsActive(); ++i) {
}
ConcurrentMarkSweepThread::synchronize(true);
_freelistLock->lock();
_collector->startTimer();
}
#ifndef PRODUCT
// This is actually very useful in a product build if it can
// be called from the debugger. Compile it into the product
// as needed.
}
#endif
if (CMSTraceSweeper) {
}
}
// CMSIsAliveClosure
}
}
// CMSKeepAliveClosure: the serial version
bool simulate_overflow = false;
if (CMSMarkStackOverflowALot &&
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
if (_concurrent_precleaning) {
// We dirty the overflown object and let the remark
// phase deal with it.
// In the case of object arrays, we need to dirty all of
// the cards that the object spans. No locking or atomics
// are needed since no one else can be mutating the mod union
// table.
if (obj->is_objArray()) {
} else {
}
} else {
}
}
}
}
// CMSParKeepAliveClosure: a parallel version of the above.
// The work queues are private to each closure (thread),
// but (may be) available for stealing by other threads.
// In general, during recursive tracing, several threads
// may be concurrently getting here; the first one to
// "tag" it, claims it.
// Do a recursive trim in the hope that this will keep
// stack usage lower, but leave some oops for potential stealers
} // Else, another thread got there first
}
}
"no white objects on this stack!");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
}
}
}
_work_queue(work_queue) { }
bool simulate_overflow = false;
if (CMSMarkStackOverflowALot &&
// simulate a stack overflow
simulate_overflow = true;
}
)
}
} // Else another thread got there already
}
}
void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
//////////////////////////////////////////////////////////////////
// CMSExpansionCause /////////////////////////////
//////////////////////////////////////////////////////////////////
switch (cause) {
case _no_expansion:
return "No expansion";
case _satisfy_free_ratio:
return "Free ratio";
case _satisfy_promotion:
return "Satisfy promotion";
case _satisfy_allocation:
return "allocation";
case _allocate_par_lab:
return "Par LAB";
return "Par Spooling Space";
case _adaptive_size_policy:
return "Ergonomics";
default:
return "unknown";
}
}
void CMSDrainMarkingStackClosure::do_void() {
// the max number to take from overflow list at a time
"Overflow list should be NULL during concurrent phases");
while (!_mark_stack->isEmpty() ||
// if stack is empty, check the overflow list
}
}
void CMSParDrainMarkingStackClosure::do_void() {
// drain queue
trim_queue(0);
}
// Trim our work_queue so its length is below max at return
"no white objects on this stack!");
// iterate over the oops in this oop, marking and pushing
// the ones in CMS heap (i.e. in _span).
}
}
}
////////////////////////////////////////////////////////////////////
// Support for Marking Stack Overflow list handling and related code
////////////////////////////////////////////////////////////////////
// Much of the following code is similar in shape and spirit to the
// code used in ParNewGC. We should try and share that code
// as much as possible in the future.
#ifndef PRODUCT
// Debugging support for CMSStackOverflowALot
// It's OK to call this multi-threaded; the worst thing
// that can happen is that we'll get a bunch of closely
// spaced simulated oveflows, but that's OK, in fact
// probably good as it would exercise the overflow code
// under contention.
bool CMSCollector::simulate_overflow() {
if (_overflow_counter-- <= 0) { // just being defensive
return true;
} else {
return false;
}
}
bool CMSCollector::par_simulate_overflow() {
return simulate_overflow();
}
#endif
// Single-threaded
NOT_PRODUCT(ssize_t n = 0;)
NOT_PRODUCT(n++;)
}
#ifndef PRODUCT
_num_par_pushes -=n;
#endif
}
// (MT-safe) Get a prefix of at most "num" from the list.
// The overflow list is chained through the mark word of
// each object in the list. We fetch the entire list,
// break off a prefix of the right size and return the
// remainder. If other threads try to take objects from
// the overflow list at that time, they will wait for
// some time to see if data becomes available. If (and
// only if) another thread places one or more object(s)
// on the global list before we have returned the suffix
// to the global list, we will walk down our local list
// to find its end and append the global list to
// our suffix before returning it. This suffix walk can
// prove to be expensive (quadratic in the amount of traffic)
// when there are many objects in the overflow list and
// there is much producer-consumer contention on the list.
// *NOTE*: The overflow list manipulation code here and
// in ParNewGeneration:: are very similar in shape,
// copy of the object to thread the list via its klass word.
// Because of the common code, if you make any changes in
// the code below, please check the ParNew version to see if
// similar changes might be needed.
// CR 6797058 has been filed to consolidate the common code.
int no_of_gc_threads) {
if (_overflow_list == NULL) {
return false;
}
// Grab the entire list; we'll put back a suffix
// Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
// set to ParallelGCThreads.
// If the list is busy, we spin for a short while,
// sleeping between attempts to get the list.
if (_overflow_list == NULL) {
// Nothing left to take
return false;
} else if (_overflow_list != BUSY) {
// Try and grab the prefix
}
}
// If the list was found to be empty, or we spun long
// enough, we give up and return empty-handed. If we leave
// the list in the BUSY state below, it must be the case that
// some other thread holds the overflow list and will set it
// to a non-BUSY state in the future.
// Nothing to take or waited long enough
// Write back the NULL in case we overwrote it with BUSY above
// and it is still the same value.
}
return false;
}
// Walk down the first "num" objects, unless we reach the end.
// We have "num" or fewer elements in the list, so there
// is nothing to return to the global list.
// Write back the NULL in lieu of the BUSY we wrote
// above, if it is still the same value.
if (_overflow_list == BUSY) {
}
} else {
// Chop off the suffix and rerturn it to the global list.
// It's possible that the list is still in the empty(busy) state
// we left it in a short while ago; in that case we may be
// able to place back the suffix without incurring the cost
// of a walk down the list.
bool attached = false;
if (cur_overflow_list == observed_overflow_list) {
attached = true;
break;
} else cur_overflow_list = observed_overflow_list;
}
if (!attached) {
// Too bad, someone else sneaked in (at least) an element; we'll need
// to do a splice. Find tail of suffix so we can prepend suffix to global
// list.
"Tautology");
do {
if (cur_overflow_list != BUSY) {
// Do the splice ...
} else { // cur_overflow_list == BUSY
}
// ... and try to place spliced list back on overflow_list ...
} while (cur_overflow_list != observed_overflow_list);
// ... until we have succeeded in doing so.
}
}
// Push the prefix elements on work_q
NOT_PRODUCT(ssize_t n = 0;)
NOT_PRODUCT(n++;)
}
#ifndef PRODUCT
#endif
return true;
}
// Single-threaded
_overflow_list = p;
}
// Multi-threaded; use CAS to prepend to overflow list
do {
if (cur_overflow_list != BUSY) {
} else {
}
} while (cur_overflow_list != observed_overflow_list);
}
// Single threaded
// General Note on GrowableArray: pushes may silently fail
// because we are (temporarily) out of C-heap for expanding
// the stack. The problem is quite ubiquitous and affects
// a lot of code in the JVM. The prudent thing for GrowableArray
// to do (for now) is to exit with an error. However, that may
// be too draconian in some cases because the caller may be
// able to recover without much harm. For such cases, we
// should probably introduce a "soft_push" method which returns
// an indication of success or failure with the assumption that
// the caller may be able to recover from a failure; code in
// the VM can then be changed, incrementally, to deal with such
// failures where possible, thus, incrementally hardening the VM
// in such low resource situations.
"bijection");
}
// Single threaded
if (m->must_be_preserved(p)) {
preserve_mark_work(p, m);
}
}
if (m->must_be_preserved(p)) {
// Even though we read the mark word without holding
// the lock, we are assured that it will not change
// because we "own" this oop, so no other thread can
// be trying to push it on the overflow list; see
// the assertion in preserve_mark_work() that checks
// that m == p->mark().
preserve_mark_work(p, m);
}
}
// We should be able to do this multi-threaded,
// a chunk of stack being a task (this is
// correct because each oop only ever appears
// once in the overflow list. However, it's
// not very easy to completely overlap this with
// other operations, so will generally not be done
// until all work's been completed. Because we
// expect the preserved oop stack (set) to be small,
// it's probably fine to do this single-threaded.
// We can explore cleverer concurrent/overlapped/parallel
// processing of preserved marks if we feel the
// need for this in the future. Stack overflow should
// be so rare in practice and, when it happens, its
// effect on performance so great that this will
// likely just be in the noise anyway.
void CMSCollector::restore_preserved_marks_if_any() {
"world should be stopped");
"should be single-threaded");
"bijection");
while (!_preserved_oop_stack.is_empty()) {
"Set when taken from overflow list");
p->set_mark(m);
}
"stacks were cleared above");
}
#ifndef PRODUCT
bool CMSCollector::no_preserved_marks() const {
}
#endif
{
"Wrong type for size policy");
return size_policy;
}
if (cur_promo_size < desired_promo_size) {
if (PrintAdaptiveSizePolicy && Verbose) {
}
} else if (desired_promo_size < cur_promo_size) {
if (PrintAdaptiveSizePolicy && Verbose) {
}
}
}
"Wrong kind of counters");
return counters;
}
if (UsePerfData) {
"Wrong gc statistics type");
}
}
if (UsePerfData) {
"Wrong gc statistics type");
}
}
// The desired expansion delta is computed so that:
// . desired free percentage or greater is used
// If incremental collection failed, we just want to expand
// to the limit.
if (incremental_collection_failed()) {
return;
}
"Wrong type of heap");
"Wrong type of young generation");
cur_eden);
// Record the new size of the space in the cms generation
// that is available for promotions. This is temporary.
// It should be the desired promo size.
if (UsePerfData) {
}
}
if (chunk_at_end == NULL) {
// No room to shrink
if (PrintGCDetails && Verbose) {
" chunk_at_end " PTR_FORMAT,
}
return;
} else {
// Find the chunk at the end of the space and determine
// how much it can be shrunk.
"Inconsistent chunk at end of space");
// Shrink the underlying space
if (PrintGCDetails && Verbose) {
" desired_bytes " SIZE_FORMAT
" shrinkable_size_in_bytes " SIZE_FORMAT
" aligned_shrinkable_size_in_bytes " SIZE_FORMAT
" bytes " SIZE_FORMAT,
" unallocated_start " SIZE_FORMAT,
}
// If the space did shrink (shrinking is not guaranteed),
// shrink the chunk at the end by the appropriate amount.
// Have to remove the chunk from the dictionary because it is changing
// size and might be someplace elsewhere in the dictionary.
// Get the chunk at end, shrink it, and put it
// back.
"Shrink is too large");
// update the space and generation capacity counters
if (UsePerfData) {
}
if (Verbose && PrintGCDetails) {
}
}
"Inconsistency at end of space");
"Shrinking is inconsistent");
return;
}
}
// Transfer some number of overflown objects to usual marking
// stack. Return true if some objects were transferred.
"If list is not empty, we should have taken something");
"If we took something, it should now be on our stack");
return res;
}
// It can't have been dead in a previous cycle
} else {
}
}
// Could be 0, if the block size could not be computed without stalling.
return res;
}
TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
switch (phase) {
case CMSCollector::InitialMarking:
initialize(true /* fullGC */ ,
cause /* cause of the GC */,
true /* recordGCBeginTime */,
true /* recordPreGCUsage */,
false /* recordPeakUsage */,
false /* recordPostGCusage */,
true /* recordAccumulatedGCTime */,
false /* recordGCEndTime */,
false /* countCollection */ );
break;
case CMSCollector::FinalMarking:
initialize(true /* fullGC */ ,
cause /* cause of the GC */,
false /* recordGCBeginTime */,
false /* recordPreGCUsage */,
false /* recordPeakUsage */,
false /* recordPostGCusage */,
true /* recordAccumulatedGCTime */,
false /* recordGCEndTime */,
false /* countCollection */ );
break;
case CMSCollector::Sweeping:
initialize(true /* fullGC */ ,
cause /* cause of the GC */,
false /* recordGCBeginTime */,
false /* recordPreGCUsage */,
true /* recordPeakUsage */,
true /* recordPostGCusage */,
false /* recordAccumulatedGCTime */,
true /* recordGCEndTime */,
true /* countCollection */ );
break;
default:
}
}