/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gSpaceCounters.hpp"
#include "gc_implementation/shared/gcStats.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/generation.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memoryService.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/stack.inline.hpp"
#include "utilities/taskqueue.hpp"
#include "utilities/yieldingWorkgroup.hpp"
// ConcurrentMarkSweepGeneration is in support of a concurrent
// mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
// style. We assume, for now, that this generation is always the
// seniormost generation (modulo the PermGeneration), and for simplicity
// in the first implementation, that this generation is a single compactible
// space. Neither of these restrictions appears essential, and will be
// relaxed in the future when more time is available to implement the
// greater generality (and there's a need for it).
//
// Concurrent mode failures are currently handled by
// means of a sliding mark-compact.
class CMSAdaptiveSizePolicy;
class CMSConcMarkingTask;
class CMSGCAdaptivePolicyCounters;
class CMSTracer;
class ConcurrentGCTimer;
class ConcurrentMarkSweepPolicy;
class ConcurrentMarkSweepThread;
class CompactibleFreeListSpace;
class FreeChunk;
class PromotionInfo;
class SerialOldTracer;
// A generic CMS bit map. It's the basis for both the CMS marking bit map
// as well as for the mod union table (in each case only a subset of the
// methods are used). This is essentially a wrapper around the BitMap class,
// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
// we have _shifter == 0. and for the mod union table we have
// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
// XXX 64-bit issues in BitMap?
friend class VMStructs;
public:
public:
// constructor
// allocates the actual storage for the map
// field getter
// locking verifier convenience function
// inquiries
// the following is one past the last word in space
// reading marks
bool isAllClear() const;
// writing marks
// For marking by parallel GC threads;
// returns true if we did, false if another thread did
void clear_all();
void clear_all_incrementally(); // Not yet implemented!!
// checks the memory region for validity
)
// iteration
}
// auxiliary support for iteration
// conversion utilities
// debugging
// is this address range covered by the bit-map?
)
};
// Represents a marking stack used by the CMS collector.
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
//
friend class CMSCollector; // to get at expasion stats further below
//
protected:
public:
CMSMarkStack():
_hit_limit(0),
_failed_double(0) {}
if (!isEmpty()) {
}
return NULL;
}
if (isFull()) {
return false;
} else {
return true;
}
}
bool isFull() const {
}
// "Parallel versions" of some of the above
// lock and pop
return pop();
}
// lock and push
}
// Forcibly reset the stack, losing all of its contents.
void reset() {
_index = 0;
}
// Expand the stack, typically in response to an overflow condition
void expand();
// Compute the least valued stack element.
}
return least;
}
// Exposed here to allow stack expansion in || case
};
class CardTableRS;
class CMSParGCThreadState;
protected:
public:
};
public:
};
// Survivor Chunk Array in support of parallelization of
// Survivor Space rescan.
public:
return _index;
} // exclusive
return _array[n];
}
void reset() {
_index = 0;
}
_overflows = 0;
}
// For now we do not do anything with the size
} else {
++_overflows;
"): out of bounds at overflow#" SIZE_FORMAT,
}
}
};
//
// Timing, allocation and promotion statistics for gc scheduling and incremental
// mode pacing. Most statistics are exponential averages.
//
private:
// The following are exponential averages with factor alpha:
// avg = (100 - alpha) * avg + alpha * cur_sample
//
// The durations measure: end_time[n] - start_time[n]
// The periods measure: start_time[n] - start_time[n-1]
//
// The cms period and duration include only concurrent collections; time spent
// in foreground cms collections due to System.gc() or because of a failure to
// keep up are not included.
//
// There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
// real value, but is used only after the first period. A value of 100 is
// used for the first sample so it gets the entire weight.
unsigned int _gc0_alpha;
unsigned int _cms_alpha;
double _gc0_duration;
double _gc0_period;
double _cms_duration;
double _cms_duration_per_mb;
double _cms_period;
// Timers.
// Snapshots of the amount used in the CMS generation.
// Used to prevent the duty cycle from being reduced in the middle of a cms
// cycle.
enum {
};
unsigned int _valid_bits;
protected:
// Return a duty cycle that avoids wild oscillations, by limiting the amount
// of change between old_duty_cycle and new_duty_cycle (the latter is treated
// as a recommended value).
static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
unsigned int new_duty_cycle);
unsigned int icms_update_duty_cycle_impl();
// In support of adjusting of cms trigger ratios based on history
// of concurrent mode failure.
public:
unsigned int alpha = CMSExpAvgFactor);
// Whether or not the statistics contain valid data; higher level statistics
// cannot be called until this returns true (they require at least one young
// gen and one cms cycle to have completed).
bool valid() const;
// Record statistics.
void record_gc0_begin();
void record_cms_begin();
void record_cms_end();
// yield points.
// Basic statistics; units are seconds or bytes.
// Seconds since the last background cms cycle began or ended.
double cms_time_since_begin() const;
double cms_time_since_end() const;
// Higher level statistics--caller must check that valid() returns true before
// calling.
// Returns bytes promoted per second of wall clock time.
double promotion_rate() const;
// Returns bytes directly allocated per second of wall clock time.
double cms_allocation_rate() const;
// Rate at which space in the cms generation is being consumed (sum of the
// above two).
double cms_consumption_rate() const;
// Returns an estimate of the number of seconds until the cms generation will
// fill up, assuming no collection work is done.
double time_until_cms_gen_full() const;
// Returns an estimate of the number of seconds remaining until
// the cms generation collection should start.
double time_until_cms_start() const;
// End of higher level statistics.
// Returns the cms incremental mode duty cycle, as a percentage (0-100).
// Update the duty cycle and return the new value.
unsigned int icms_update_duty_cycle();
// Debugging.
};
// A closure related to weak references processing which
// we embed in the CMSCollector, since we need to pass
// it to the reference processor for secondary filtering
// of references based on reachability of referent;
// see role of _is_alive_non_header closure in the
// ReferenceProcessor class.
// For objects in the CMS generation, this closure checks
// if the object is "live" (reachable). Used in weak
// reference processing.
friend class CMSCollector;
public:
}
assert(false, "not to be invoked");
}
};
// Implements AbstractRefProcTaskExecutor for CMS.
public:
{ }
// Executes a task using worker threads.
private:
};
friend class VMStructs;
friend class ConcurrentMarkSweepThread;
friend class ConcurrentMarkSweepGeneration;
friend class CompactibleFreeListSpace;
friend class CMSParRemarkTask;
friend class CMSConcMarkingTask;
friend class CMSRefProcTaskProxy;
friend class CMSRefProcTaskExecutor;
friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
friend class SurvivorSpacePrecleanClosure; // --- ditto -------
friend class PushOrMarkClosure; // to access _restart_addr
friend class Par_PushOrMarkClosure; // to access _restart_addr
friend class MarkFromRootsClosure; // -- ditto --
// ... and for clearing cards
friend class Par_MarkFromRootsClosure; // to access _restart_addr
// ... and for clearing cards
friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
friend class MarkFromRootsVerifyClosure; // to access _restart_addr
friend class PushAndMarkVerifyClosure; // -- ditto --
friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
friend class PushAndMarkClosure; // -- ditto --
friend class Par_PushAndMarkClosure; // -- ditto --
friend class CMSKeepAliveClosure; // -- ditto --
friend class CMSDrainMarkingStackClosure; // -- ditto --
friend class CMSInnerParMarkAndPushClosure; // -- ditto --
friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
friend class VM_CMS_Operation;
friend class VM_CMS_Initial_Mark;
friend class VM_CMS_Final_Remark;
friend class TraceCMSMemoryManagerStats;
private:
}
// Overflow list of grey objects, threaded through mark-word
// Manipulated with CAS in the parallel/multi-threaded case.
// The following array-pair keeps track of mark words
// displaced for accomodating overflow list above.
// This code will likely be revisited under RFE#4922830.
int* _hash_seed;
// In support of multi-threaded concurrent phases
// Performance Counters
// Initialization Errors
// In support of ExplicitGCInvokesConcurrent
static bool _full_gc_requested;
unsigned int _collection_count_start;
// Should we unload classes this concurrent cycle?
bool _should_unload_classes;
unsigned int _concurrent_cycles_since_last_unload;
unsigned int concurrent_cycles_since_last_unload() const {
}
// Did we (allow) unload classes in the previous concurrent cycle?
bool unloaded_classes_last_cycle() const {
return concurrent_cycles_since_last_unload() == 0;
}
// Root scanning options for perm gen
// Verification support
void verify_after_remark_work_1();
void verify_after_remark_work_2();
// true if any verification flag is on.
bool _verifying;
// Collector policy
// XXX Move these to CMSStats ??? FIX ME !!!
// padded decaying average estimates of the above
bool _cms_start_registered;
void register_gc_end();
void save_heap_summary();
protected:
// CMS marking support structures
// to revisit
// Counters in support of marking stack / work queue overflow handling:
// a non-zero value indicates certain types of overflow events during
// the current CMS cycle and could lead to stack resizing efforts at
// an opportune future time.
// ("Weak") Reference processing support
// keep this textually after _markBitMap and _span; c'tor dependency
// CMS abstract state machine
// initial_state: Idling
// next_state(Idling) = {Marking}
// next_state(Marking) = {Precleaning, Sweeping}
// next_state(Precleaning) = {AbortablePreclean, FinalMarking}
// next_state(AbortablePreclean) = {FinalMarking}
// next_state(FinalMarking) = {Sweeping}
// next_state(Sweeping) = {Resizing}
// next_state(Resizing) = {Resetting}
// next_state(Resetting) = {Idling}
// The numeric values below are chosen so that:
// . _collectorState <= Idling == post-sweep && pre-mark
// . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
// precleaning || abortablePrecleanb
public:
enum CollectorState {
Resizing = 0,
};
protected:
// Signalling/State related to coordination between fore- and backgroud GC
// Note: When the baton has been passed from background GC to foreground GC,
// _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
// wants to go active
// yet passed the baton to the foreground GC
// Support for CMSScheduleRemark (abortable preclean)
bool _abort_preclean;
bool _start_sampling;
int _numYields;
// number of full gc's since the last concurrent gc.
// occupancy used for bootstrapping stats
double _bootstrap_occupancy;
// timer
// Timing, allocation and promotion statistics, used for scheduling.
// Allocation limits installed in the young gen, used only in
// CMSIncrementalMode. When an allocation in the young gen would cross one of
// these limits, the cms generation is notified and the cms thread is started
// or stopped, respectively.
enum CMS_op_type {
};
// Support for parallelizing Eden rescan in CMS remark phase
void sample_eden(); // ... sample Eden space top
private:
// Support for parallelizing young gen rescan in CMS remark phase
// Support for parallelizing survivor space rescan
// Support for marking stack overflow handling
int no_of_gc_threads);
void push_on_overflow_list(oop p);
void par_push_on_overflow_list(oop p);
// the following is, obviously, not, in general, "MT-stable"
bool overflow_list_is_empty() const;
void preserve_mark_if_necessary(oop p);
void par_preserve_mark_if_necessary(oop p);
void restore_preserved_marks_if_any();
NOT_PRODUCT(bool no_preserved_marks() const;)
// in support of testing overflow code
NOT_PRODUCT(int _overflow_counter;)
// CMS work methods
// a return value of false indicates failure due to stack overflow
public: // FIX ME!!! only for testing
private:
// concurrent precleaning work
// Does precleaning work, returning a quantity indicative of
// the amount of "useful work" done.
void abortable_preclean(); // Preclean while looking for possible abort
void initialize_sequential_subtasks_for_young_gen_rescan(int i);
// Helper function for above; merge-sorts the per-thread plab samples
// Resets (i.e. clears) the per-thread plab sample vectors
void reset_survivor_plab_arrays();
// final (second) checkpoint work
bool init_mark_was_synchronous);
// work routine for parallel version of remark
void do_remark_parallel();
// work routine for non-parallel version of remark
void do_remark_non_parallel();
// reference processing work routine (during second checkpoint)
// concurrent sweeping work
// (concurrent) resetting of support data structures
// Clear _expansion_cause fields of constituent generations
void clear_expansion_cause();
// An auxilliary method used to record the ends of
// used regions of each generation to limit the extent of sweep
void save_sweep_limits();
// Resize the generations included in the collector.
void compute_new_size();
// A work method used by foreground collection to determine
// what type of collection (compacting or not, continuing or fresh)
// it should do.
bool* should_compact, bool* should_start_over);
// A work method used by the foreground collector to do
// a mark-sweep-compact.
void do_compaction_work(bool clear_all_soft_refs);
// A work method used by the foreground collector to do
// a mark-sweep, after taking over from a possibly on-going
// concurrent mark-sweep collection.
void do_mark_sweep_work(bool clear_all_soft_refs,
// Work methods for reporting concurrent mode interruption or failure
bool is_external_interruption();
// If the backgrould GC is active, acquire control from the background
// GC and do the collection.
// For synchronizing passing of control from background to foreground
// GC. waitForForegroundGC() is called by the background
// collector. It if had to wait for a foreground collection,
// it returns true and the background collection should assume
// that the collection was finished by the foreground
// collector.
bool waitForForegroundGC();
// Incremental mode triggering: recompute the icms duty cycle and set the
// allocation limits in the young gen.
void icms_update_allocation_limits();
public:
void ref_processor_init();
bool should_abort_preclean() const; // Whether preclean should be aborted.
size_t get_eden_used() const;
size_t get_eden_capacity() const;
// locking checks
NOT_PRODUCT(static bool have_cms_token();)
// XXXPERM bool should_collect(bool full, size_t size, bool tlab);
bool shouldConcurrentCollect();
bool clear_all_soft_refs,
bool tlab);
// In support of ExplicitGCInvokesConcurrent
// Should we unload classes in a particular concurrent cycle?
bool should_unload_classes() const {
return _should_unload_classes;
}
bool update_should_unload_classes();
// Object is dead if not marked and current phase is sweeping.
// After a promotion (of "start"), do any necessary marking.
// If "par", then it's being done by a parallel GC thread.
// The last two args indicate if we need precise marking
// and if so the size of the object so it can be dirtied
// in its entirety.
void getFreelistLocks() const;
void releaseFreelistLocks() const;
bool haveFreelistLocks() const;
// GC prologue and epilogue
void gc_prologue(bool full);
void gc_epilogue(bool full);
if (_collectorState <= Idling) {
// gc not in progress
return _time_of_last_gc;
} else {
// collection in progress
return now;
}
}
// Support for parallel remark of survivor space
void* get_data_recorder(int thr_num);
// main CMS steps and related support
void checkpointRootsInitial(bool asynch);
// due to stack overflow
void preclean();
bool init_mark_was_synchronous);
// Check that the currently executing thread is the expected
// one (foreground collector or background collector).
// XXXPERM void print_statistics() PRODUCT_RETURN;
// Performance Counter Support
// timer stuff
// Convenience methods that check whether CMSIncrementalMode is enabled and
// forward to the corresponding methods in ConcurrentMarkSweepThread.
static void start_icms();
static void stop_icms(); // Called at the end of the cms cycle.
static void disable_icms(); // Called before a foreground collection.
static void enable_icms(); // Called after a foreground collection.
void icms_wait(); // Called at yield points.
// Adaptive size policy
// debugging
void verify();
bool verify_after_remark();
// convenience methods in support of debugging
// accessors
// Get the bit map with a perm gen "deadness" information.
// Initialization errors
};
public:
enum Cause {
};
// Return a string describing the cause of the expansion.
};
friend class VMStructs;
friend class ConcurrentMarkSweepThread;
friend class ConcurrentMarkSweep;
friend class CMSCollector;
protected:
// Performance Counters
// Words directly allocated, used by CMSStats.
// Non-product stat counters
)
// Used for sizing decisions
bool incremental_collection_failed() {
return _incremental_collection_failed;
}
void set_incremental_collection_failed() {
_incremental_collection_failed = true;
}
void clear_incremental_collection_failed() {
_incremental_collection_failed = false;
}
// accessors
private:
// For parallel young-gen GC support.
// Reason generation was expanded
// In support of MinChunkSize being larger than min object size
const double _dilatation_factor;
enum CollectionTypes {
};
// Fraction of current occupancy at which to start a CMS collection which
// will collect this generation (at least).
double _initiating_occupancy;
protected:
// Shrink generation by specified size (returns false if unable to shrink)
// Update statistics for GC
// Maximum available space in the generation (including uncommitted)
// space.
size_t max_available() const;
// getter and initializer for _initiating_occupancy field.
public:
bool use_adaptive_freelists,
// Accessors
}
Mutex* freelistLock() const;
// Adaptive size policy
bool refs_discovery_is_atomic() const { return false; }
bool refs_discovery_is_mt() const {
// Note: CMS does MT-discovery during the parallel-remark
// phases. Use ReferenceProcessorMTMutator to make refs
// discovery MT-safe during such phases or other parallel
// discovery phases in the future. This may all go away
// rare that the cost of the CAS's involved is in the
// noise. That's a measurement that should be done, and
// the code simplified if that turns out to be the case.
return ConcGCThreads > 1;
}
// Override
virtual void ref_processor_init();
// Grow generation by specified size (returns false if unable to grow)
// Grow generation to reserved size.
bool grow_to_reserved();
// Space enquiries
size_t contiguous_available() const;
size_t unsafe_max_alloc_nogc() const;
// over-rides
MemRegion used_region() const;
MemRegion used_region_at_save_marks() const;
// Does a "full" (forced) collection invoked on this generation collect
// all younger generations as well? Note that the second conjunct is a
// hack to allow the collection of the younger gen first if the flag is
// set. This is better than using th policy's should_collect_gen0_first()
// since that causes us to do an extra unnecessary pair of restart-&-stop-world.
virtual bool full_collects_younger_generations() const {
return UseCMSCompactAtFullCollection && !CollectGen0First;
}
// Support for compaction
CompactibleSpace* first_compaction_space() const;
// Adjust quantites in the generation affected by
// the compaction.
void reset_after_compaction();
// Allocation support
}
// Incremental mode triggering.
// Used by CMSStats to track direct allocation. The value is sampled and
// reset after each young gen collection.
// Overrides for parallel promotion.
// This one should not be called for CMS.
virtual void par_promote_alloc_undo(int thread_num,
virtual void par_promote_alloc_done(int thread_num);
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
// Inform this (non-young) generation that a promotion failure was
// encountered during a collection of a younger generation that
// promotes into this generation.
virtual void promotion_failure_occurred();
virtual bool should_concurrent_collect() const;
virtual bool is_too_full() const;
bool clear_all_soft_refs,
bool tlab);
bool tlab,
bool parallel = false);
// GC prologue and epilogue
void gc_prologue(bool full);
void gc_epilogue(bool full);
void gc_epilogue_work(bool full);
// Time since last GC of this generation
}
}
// Allocation failure
// Iteration support and related enquiries
void save_marks();
bool no_allocs_since_save_marks();
// Iteration support specific to CMS generations
void save_sweep_limit();
// More iteration support
// Need to declare the full complement of closures, whether we'll
// override them or not, or get message from the compiler:
// oop_since_save_marks_iterate_nv hides virtual function...
// Smart allocation XXX -- move to CFLSpace?
void setNearLargestChunk();
// Get the chunk at the end of the space. Delagates to
// the space.
// Overriding of unused functionality (sharing not yet supported with CMS)
void pre_adjust_pointers();
void post_compact();
// Debugging
void prepare_for_verify();
void verify();
// Performance Counters support
virtual void update_counters();
void initialize_performance_counters();
// Support for parallel remark of survivor space
//Delegate to collector
}
// Printing
const char* name() const;
void print() const;
void printOccupancy(const char* s);
bool must_be_youngest() const { return false; }
bool must_be_oldest() const { return true; }
void compute_new_size();
void rotate_debug_collection_type();
};
// Return the size policy from the heap's collector
// policy casted to CMSAdaptiveSizePolicy*.
CMSAdaptiveSizePolicy* cms_size_policy() const;
// Resize the generation based on the adaptive size
// policy.
// Return the GC counters from the collector policy
public:
virtual void compute_new_size();
bool use_adaptive_freelists,
virtual void update_counters();
};
//
// Closures of various sorts used by CMS to accomplish its work
//
// This closure is used to check that a certain set of oops is empty.
public:
};
// This closure is used to do concurrent marking from the roots
// following the first checkpoint.
bool _yield;
int _skipBits;
DEBUG_ONLY(bool _verifying;)
public:
bool should_yield, bool verifying = false);
inline void do_yield_check();
private:
void do_yield_work();
};
// This closure is used to do concurrent multi-threaded
// marking from the roots following the first checkpoint.
// XXX This should really be a subclass of The serial version
// above, but i have not had the time to refactor things cleanly.
// That willbe done for Dolphin.
bool _yield;
int _skip_bits;
public:
bool should_yield);
inline void do_yield_check();
private:
void do_yield_work();
bool get_work_from_overflow_stack();
};
// The following closures are used to do certain kinds of verification of
// CMS marking.
protected:
template <class T> inline void do_oop_work(T *p) {
}
public:
// Deal with a stack overflow condition
};
public:
};
// This closure is used to check that a certain set of bits is
// "empty" (i.e. the bit vector doesn't have any 1-bits).
public:
guarantee(false, "Should not have a 1 bit");
return true;
}
};
// This closure is used during the second checkpointing phase
// to rescan the marked objects on the dirty cards in the mod
// union table and the card table proper. It's invoked via
// MarkFromDirtyCardsClosure below. It uses either
// [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
// declared in genOopClosures.hpp to accomplish some of its work.
// In the parallel case the bitMap is shared, so access to
// it needs to be suitably synchronized for updates by embedded
// closures that update it; however, this closure itself only
// reads the bit_map and because it is idempotent, is immune to
// reading stale values.
#ifdef ASSERT
union {
};
#endif // ASSERT
bool _parallel;
union {
};
public:
#ifdef ASSERT
#endif // ASSERT
_parallel(false),
_scan_closure(cl) { }
#ifdef ASSERT
#endif // ASSERT
_parallel(true),
_par_scan_closure(cl) { }
guarantee(false, "Call do_object_b(oop, MemRegion) instead");
}
guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
return false;
}
};
// This closure is used during the second checkpointing phase
// to rescan the marked objects on the dirty cards in the mod
// union table and the card table proper. It invokes
// ScanMarkedObjectsAgainClosure above to accomplish much of its work.
// In the parallel case, the bit map is shared and requires
// synchronized access.
public:
_num_dirty_cards(0),
_num_dirty_cards(0),
};
// This closure is used in the non-product build to check
// that there are no MemRegions with a certain property.
guarantee(false, "Should never be here");
}
};
// This closure is used during the precleaning phase
// to "carefully" rescan marked objects on dirty cards.
// It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
// to accomplish some of its work.
bool _yield;
public:
bool should_yield):
}
guarantee(false, "call do_object_careful instead");
}
guarantee(false, "Unexpected caller");
return 0;
}
_freelistLock = m;
}
private:
inline bool do_yield_check();
void do_yield_work();
};
bool _yield;
unsigned int _before_count;
public:
unsigned int before_count,
bool should_yield):
{ }
guarantee(false, "call do_object_careful instead");
}
guarantee(false, "Unexpected caller");
return 0;
}
private:
inline void do_yield_check();
void do_yield_work();
};
// This closure is used to accomplish the sweeping work
// after the second checkpoint but before the concurrent reset
// phase.
//
// Terminology
// left hand chunk (LHC) - block of one or more chunks currently being
// coalesced. The LHC is available for coalescing with a new chunk.
// right hand chunk (RHC) - block that is currently being swept that is
// free or garbage that can be coalesced with the LHC.
// _inFreeRange is true if there is currently a LHC
// _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
// _freeRangeInFreeLists is true if the LHC is in the free lists.
// _freeFinger is the address of the current LHC
// because we do not expect newly garbage blocks
// eligible for sweeping past that address.
// generation)
// midst of a free run
bool _freeRangeInFreeLists;
// Often, we have just found
// a free chunk and started
// a new free range; we do not
// eagerly remove this chunk from
// the free lists unless there is
// a possibility of coalescing.
// When true, this flag indicates
// that the _freeFinger below
// points to a potentially free chunk
// that may still be in the free lists
bool _lastFreeRangeCoalesced;
// free range contains chunks
// coalesced
bool _yield;
// Whether sweeping should be
// done with yields. For instance
// when done by the foreground
// collector we shouldn't yield.
// pointer to the "left hand
// chunk"
// When _inFreeRange is set, this
// indicates the accumulated size
// of the "left hand chunk"
)
private:
// Code that is common to a free chunk or garbage when
// encountered during sweeping.
// Process a free chunk during sweeping.
// Work method called when processing an already free or a
// freshly garbage chunk to do a lookahead and possibly a
// premptive flush if crossing over _limit.
// Process a garbage chunk during sweeping.
// Process a live chunk during sweeping.
// Accessors.
// Initialize a free range.
// Return this chunk to the free lists.
// Check if we should yield and do so when necessary.
// Yield
public:
};
// Closures related to weak references processing
// During CMS' weak reference processing, this is a
// work-routine/closure used to complete transitive
// marking of objects as live after a certain point
// in which an initial set has been completely accumulated.
// This closure is currently used both during the final
// remark stop-world phase, as well as during the concurrent
// precleaning of the discovered reference lists.
bool _concurrent_precleaning;
public:
bool cpc):
"Mismatch");
}
void do_void();
};
// A parallel version of CMSDrainMarkingStackClosure above.
public:
public:
void do_void();
};
// Allow yielding or short-circuiting of reference list
// prelceaning work.
void do_yield_work();
public:
_collector(collector) {}
virtual bool should_return();
};
// Convenience class that locks free list locks for given CMS collector
private:
public:
}
~FreelistLocker() {
}
};
// Mark all dead objects in a given space.
public:
const CompactibleFreeListSpace* sp,
};
public:
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP