/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/icBuffer.hpp"
#include "gc_implementation/g1/bufferingOopClosure.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "gc_implementation/g1/g1EvacFailure.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/referenceProcessor.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/vmThread.hpp"
// turn it on so that the contents of the young list (scan-only /
// to-be-collected) are printed at "strategic" points before / during
// / after the collection --- this is useful for debugging
#define YOUNG_LIST_VERBOSE 0
// CURRENT STATUS
// This file is under construction. Search for "FIXME".
// INVARIANTS/NOTES
//
// All allocation activity covered by the G1CollectedHeap interface is
// serialized by acquiring the HeapLock. This happens in mem_allocate
// and allocate_new_tlab, which are the "entry" points to the
// allocation code from the rest of the JVM. (Note that this does not
// apply to TLAB allocation, which is not part of this interface: it
// is done by clients of this interface.)
// Notes on implementation of parallelism in different tasks.
//
// G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
// The number of GC workers is passed to heap_region_par_iterate_chunked().
// It does use run_task() which sets _n_workers in the task.
// G1ParTask executes g1_process_strong_roots() ->
// SharedHeap::process_strong_roots() which calls eventually to
// CardTableModRefBS::par_non_clean_card_iterate_work() which uses
// SequentialSubTasksDone. SharedHeap::process_strong_roots() also
// directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
//
// Local to this file.
bool _concurrent;
public:
{}
// This path is executed by the concurrent refine or mutator threads,
// concurrently, and so we do not care if card_ptr contains references
// that point into the collection set.
// Caller will actually yield.
return false;
}
// Otherwise, we finished successfully; return true.
return true;
}
};
int _calls;
public:
_calls(0)
{
for (int i = 0; i < 256; i++) _histo[i] = 0;
}
_calls++;
*card_ptr = -1;
}
return true;
}
void print_histo() {
for (int i = 0; i < 256; i++) {
if (_histo[i] != 0) {
}
}
}
};
int _calls;
public:
_calls(0)
{
}
_calls++;
*card_ptr = 0;
}
return true;
}
};
public:
return true;
}
};
}
++_length;
}
if (_survivor_head == NULL) {
_survivor_tail = hr;
}
_survivor_head = hr;
}
list->set_not_young();
}
}
_length = 0;
_survivor_length = 0;
}
bool ret = true;
"incorrectly tagged (y: %d, surv: %d)",
ret = false;
}
++length;
}
if (!ret) {
}
return ret;
}
bool ret = true;
if (_length != 0) {
_length);
ret = false;
}
if (check_sample && _last_sampled_rs_lengths != 0) {
ret = false;
}
ret = false;
}
if (!ret) {
}
return ret;
}
void
_sampled_rs_lengths = 0;
}
bool
}
void
// The current region may not yet have been added to the
// incremental collection set (it gets added when it is
// retired as the current allocation region).
if (_curr->in_collection_set()) {
// Update the collection set policy information for this region
}
// gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
}
}
void
// Add survivor regions to SurvRateGroup.
int young_index_in_cset = 0;
// The region is a non-empty survivor so let's add it to
// the incremental collection set for the next evacuation
// pause.
young_index_in_cset += 1;
}
if (_survivor_head != NULL) {
}
// Don't clear the survivor list handles until the start of
// the next evacuation pause - we need it in order to re-tag
// the survivor regions from this evacuation pause as 'young'
// at the start of the next.
}
}
}
}
{
// Claim the right to put the region on the dirty cards region list
// by installing a self pointer.
NULL);
do {
// Put the region to the dirty cards region list.
next = (HeapRegion*)
"hr->get_next_dirty_cards_region() != hr");
// The last region in the list points to itself.
} else {
}
}
}
}
}
{
do {
return NULL;
}
// The last region.
}
head);
return hr;
}
}
#ifdef ASSERT
// A region is added to the collection set as it is retired
// so an address p can point to a region which will be in the
// collection set but has not yet been retired. This method
// therefore is only accurate during a GC pause after all
// regions have been retired. It is used for debugging
// to check if an nmethod has references to objects that can
// be move during a partial collection. Though it can be
// inaccurate, it is sufficient for G1 because the conservative
// implementation of is_scavengable() for G1 will indicate that
// all nmethods must be scanned during a partial collection.
}
#endif
// Returns true if the reference points to an object that
// can move in an incremental collection.
// perm gen (or null)
return false;
} else {
return !hr->isHumongous();
}
}
// Count the dirty cards at the start.
// First clear the logged cards.
dcqs.iterate_closure_all_threads(false);
clear.print_histo();
// Now ensure that there's no dirty cards.
if (count2.n() != 0) {
count2.n(), orig_count);
}
dcqs.iterate_closure_all_threads(false);
"Or else mechanism is broken.");
if (count3.n() != orig_count) {
orig_count, count3.n());
}
}
// Private class members.
// Private methods.
if (!_secondary_free_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) {
"secondary_free_list has %u entries",
}
// It looks as if there are free regions available on the
// secondary_free_list. Let's move them to the free_list and try
// again to allocate from it.
"empty we should have moved at least one entry to the free_list");
if (G1ConcRegionFreeingVerbose) {
}
return res;
}
// Wait here until we get notified either when (a) there are no
// more free regions coming or (b) some regions have been moved on
// the secondary_free_list.
}
if (G1ConcRegionFreeingVerbose) {
"could not allocate from secondary_free_list");
}
return NULL;
}
"the only time we use this to allocate a humongous region is "
"when we are allocating a single humongous region");
if (G1StressConcRegionFreeing) {
if (!_secondary_free_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) {
"forced to look at the secondary_free_list");
}
return res;
}
}
}
if (G1ConcRegionFreeingVerbose) {
"res == NULL, trying the secondary_free_list");
}
}
// Currently, only attempts to allocate GC alloc regions set
// do_expand to true. So, we should only reach here during a
// safepoint. If this assumption changes we might have to
// reconsider the use of _expand_heap_after_alloc_failure.
"attempt heap expansion",
ergo_format_reason("region allocation request failed")
ergo_format_byte("allocation request"),
// Given that expand() succeeded in expanding the heap, and we
// always expand the heap by an amount aligned to the heap
// region size, the free list should in theory not be empty. So
// it would probably be OK to use remove_head(). But the extra
// check for NULL is unlikely to be a performance issue here (we
// just expanded the heap!) so let's just be conservative and
// use remove_head_or_null().
} else {
_expand_heap_after_alloc_failure = false;
}
}
return res;
}
if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower
// path. The caller will attempt the expansion if this fails, so
// let's not try to expand here too.
} else {
}
} else {
// We can't allocate humongous regions while cleanupComplete() is
// running, since some of the regions we find to be empty might not
// yet be added to the free list and it is not straightforward to
// know which list they are on so that we can remove them. Note
// that we only need to do this if we need to allocate more than
// one region to satisfy the current humongous allocation
// request. If we are only allocating one region we use the common
// region allocation code (see above).
if (free_regions() >= num_regions) {
if (first != G1_NULL_HRS_INDEX) {
hr->set_pending_removal(true);
}
}
}
}
return first;
}
// Index of last region in the series + 1.
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// The word size sum of all the regions we will allocate.
// This will be the "starts humongous" region.
// The header of the new object will be placed at the bottom of
// the first region.
// This will be the new end of the first region in the series that
// should also match the end of the last region in the series.
// This will be the new top of the first region that will reflect
// this allocation.
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
// Then, if there are any, we will set up the "continues
// humongous" regions.
}
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
// Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region.
"new_top should be in this region");
if (_hr_printer.is_active()) {
// the series has a single humongous region
} else {
// the series has more than one humongous regions
}
}
// Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise,
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !continuesHumongous(), but it is easier to just update the top
// fields here. The way we set top for all regions (i.e., top ==
// end for all regions but the last one, top == new_top for the
// last one) is actually used when we will free up the humongous
// region in free_humongous_region().
if ((i + 1) == last) {
// last continues humongous region
"new_top should fall on this region");
} else {
// not last one
}
}
// If we have continues humongous regions (hr != NULL), then the
// end of the last one should match new_end and its top should
// match new_top.
return new_obj;
}
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
if (first == G1_NULL_HRS_INDEX) {
// The only thing we can do now is attempt expansion.
// If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above
// should have succeeded and we wouldn't be here.
//
// We should only be trying to expand when the free suffix is
// not sufficient for the object _and_ we have some expansion
// room available.
"attempt heap expansion",
ergo_format_reason("humongous allocation request failed")
ergo_format_byte("allocation request"),
// Even though the heap was expanded, it might not have
// reached the desired size. So, we cannot assume that the
// allocation will succeed.
}
}
}
if (first != G1_NULL_HRS_INDEX) {
result =
// A successful humongous object allocation changes the used space
// information of the old generation so we need to recalculate the
// sizes and update the jstat counters here.
g1mm()->update_sizes();
}
return result;
}
unsigned int dummy_gc_count_before;
}
bool* gc_overhead_limit_was_exceeded) {
// Loop until the allocation is satisfied, or unsatisfied after GC.
unsigned int gc_count_before;
if (!isHumongous(word_size)) {
} else {
}
return result;
}
// Create the garbage collection operation...
// ...and get the VM thread to execute it.
// If the operation was successful we'll return the result even
// if it is NULL. If the allocation attempt failed immediately
// after a Full GC, it's unlikely we'll be able to allocate now.
// Allocations that take place on VM operations do not do any
// card dirtying and we have to do it here. We only have to do
// this for non-humongous allocations, though.
}
return result;
} else {
"the result should be NULL if the VM op did not succeed");
}
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
}
}
return NULL;
}
unsigned int *gc_count_before_ret) {
// Make sure you read the note in attempt_allocation_humongous().
"be called for humongous allocation requests");
// We should only get here after the first-level allocation attempt
// (attempt_allocation()) failed to allocate.
// We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll
// return NULL.
bool should_try_gc;
unsigned int gc_count_before;
{
false /* bot_updates */);
return result;
}
// If we reach here, attempt_allocation_locked() above failed to
// allocate a new region. So the mutator alloc region should be NULL.
if (GC_locker::is_active_and_needs_gc()) {
if (g1_policy()->can_expand_young_list()) {
// No need for an ergo verbose message here,
// can_expand_young_list() does this when it returns true.
false /* bot_updates */);
return result;
}
}
should_try_gc = false;
} else {
// The GCLocker may not be active but the GCLocker initiated
// GC may not yet have been performed (GCLocker::needs_gc()
// returns true). In this case we do not try this GC and
// wait until the GCLocker initiated GC is performed, and
// then retry the allocation.
should_try_gc = false;
} else {
// Read the GC count while still holding the Heap_lock.
should_try_gc = true;
}
}
}
if (should_try_gc) {
bool succeeded;
return result;
}
if (succeeded) {
// If we get here we successfully scheduled a collection which
// failed to allocate. No point in trying to allocate
// further. We'll just return NULL.
return NULL;
}
} else {
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
}
// We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
// performed a collection and reclaimed enough space. We do the
// first attempt (without holding the Heap_lock) here and the
// follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
false /* bot_updates */);
return result;
}
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("G1CollectedHeap::attempt_allocation_slow() "
"retries %d times", try_count);
}
}
return NULL;
}
unsigned int * gc_count_before_ret) {
// The structure of this method has a lot of similarities to
// attempt_allocation_slow(). The reason these two were not merged
// into a single one is that such a method would require several "if
// allocation is not humongous do this, otherwise do that"
// conditional paths which would obscure its flow. In fact, an early
// version of this code did use a unified method which was harder to
// follow and, as a result, it had subtle bugs that were hard to
// track down. So keeping these two methods separate allows each to
// be more readable. It will be good to keep these two in sync as
// much as possible.
"should only be called for humongous allocations");
// Humongous objects can exhaust the heap quickly, so we should check if we
// need to start a marking cycle at each humongous object allocation. We do
// the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
word_size)) {
}
// We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll
// return NULL.
bool should_try_gc;
unsigned int gc_count_before;
{
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
return result;
}
if (GC_locker::is_active_and_needs_gc()) {
should_try_gc = false;
} else {
// The GCLocker may not be active but the GCLocker initiated
// GC may not yet have been performed (GCLocker::needs_gc()
// returns true). In this case we do not try this GC and
// wait until the GCLocker initiated GC is performed, and
// then retry the allocation.
should_try_gc = false;
} else {
// Read the GC count while still holding the Heap_lock.
should_try_gc = true;
}
}
}
if (should_try_gc) {
// If we failed to allocate the humongous object, we should try to
// do a collection pause (if we're allowed) in case it reclaims
// enough space for the allocation to succeed after the pause.
bool succeeded;
return result;
}
if (succeeded) {
// If we get here we successfully scheduled a collection which
// failed to allocate. No point in trying to allocate
// further. We'll just return NULL.
return NULL;
}
} else {
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
}
// We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
// performed a collection and reclaimed enough space. Give a
// warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("G1CollectedHeap::attempt_allocation_humongous() "
"retries %d times", try_count);
}
}
return NULL;
}
bool expect_null_mutator_alloc_region) {
assert_at_safepoint(true /* should_be_vm_thread */);
"the current alloc region was unexpectedly found to be non-NULL");
if (!isHumongous(word_size)) {
false /* bot_updates */);
} else {
}
return result;
}
}
public:
if (r->continuesHumongous()) {
return false;
}
_g1h->reset_gc_time_stamps(r);
// You might think here that we could clear just the cards
// corresponding to the used region. But no: if we leave a dirty card
// in a region we might allocate into, then it would prevent that card
// from being enqueued, and cause it to be missed.
// Re: the performance cost: we shouldn't be doing full GC anyway!
return false;
}
};
}
int _worker_i;
public:
{ }
if (!r->continuesHumongous()) {
r->oop_iterate(&_cl);
}
return false;
}
};
public:
: AbstractGangTask("ParRebuildRSTask"),
{ }
}
};
private:
public:
// We only generate output for non-empty regions.
if (!hr->isHumongous()) {
} else if (hr->startsHumongous()) {
// single humongous region
} else {
}
} else {
}
}
return false;
}
: _hr_printer(hr_printer) { }
};
}
}
return verify_time_ms;
}
}
}
bool clear_all_soft_refs,
assert_at_safepoint(true /* should_be_vm_thread */);
if (GC_locker::check_active_before_gc()) {
return false;
}
{
// Timing
{
// Note: When we have a more flexible GC logging framework that
// allows us to add optional attributes to a GC log record we
// could consider timing and reporting how long we wait in the
// following two methods.
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
gc_prologue(true);
increment_total_collections(true /* full gc */);
// Disable discovery and empty the discovered lists
// for the CM ref processor.
// Abandon current iterations of concurrent marking and concurrent
// refinement, if any are in progress. We have to do this before
// wait_until_scan_finished() below.
concurrent_mark()->abort();
// Make sure we'll choose a new allocation region afterwards.
g1_rem_set()->cleanupHRRS();
// We should call this after we retire any currently active alloc
// regions so that all the ALLOC / RETIRE events are generated
// before the start GC event.
// We may have added regions to the current incremental collection
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
tear_down_region_sets(false /* free_list_only */);
g1_policy()->set_gcs_are_young(true);
// See the comments in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() about
// how reference processing currently works in G1.
// Temporarily make discovery by the STW ref processor single threaded (non-MT).
// Temporarily clear the STW ref processor's _is_alive_non_header field.
// Do collection work
{
}
rebuild_region_sets(false /* free_list_only */);
// Enqueue any discovered reference objects that have
// not been removed from the discovered lists.
// Note: since we've just done a full GC, concurrent
// marking is no longer active. Therefore we need not
// re-enable reference discovery for the CM ref processor.
// That will be done at the start of the next marking cycle.
// Since everything potentially moved, we will clear all remembered
// sets, and clear all cards. Later we will rebuild remembered
// sets. We will also reset the GC time stamps of the regions.
// Resize the heap if necessary.
if (_hr_printer.is_active()) {
// We should do this after we potentially resize the heap so
// that all the COMMIT / UNCOMMIT events are generated before
// the end GC event.
}
if (hot_card_cache->use_cache()) {
}
// Rebuild remembered sets of all regions.
if (G1CollectedHeap::use_parallel_gc_threads()) {
workers()->active_workers(),
"If not dynamic should be using all the workers");
// Set parallel threads in the heap (_n_par_threads) only
// before a parallel phase and always reset it to 0 after
// the phase so that the number of parallel threads does
// no get carried forward to a serial phase where there
// may be code that is "possibly_parallel".
ParRebuildRSTask rebuild_rs_task(this);
"Unless dynamic should use total workers");
// Use the most recent number of active workers
"Active workers not properly set");
set_par_threads(0);
} else {
}
if (true) { // FIXME
// Ask the permanent generation to adjust size for full collections
perm()->compute_new_size();
}
#ifdef TRACESPINNING
#endif
// Discard all rset updates
|| (G1DeferredRSUpdate &&
// At this point there should be no regions in the
// entire heap tagged as young.
"young list should be empty at this point");
// Update the number of full collections that have been completed.
increment_old_marking_cycles_completed(false /* concurrent */);
// Start a new incremental collection set for the next pause
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the next
// evacuation pause.
}
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm()->update_sizes();
gc_epilogue(true);
}
}
}
return true;
}
// do_collection() will return whether it succeeded in performing
// the GC. Currently, there is no facility on the
// do_full_collection() API to notify the caller than the collection
// did not succeed (e.g., because it was locked out by the GC
// locker). So, right now, we'll ignore the return value.
0 /* word_size */);
}
// This code is mostly copied from TenuredGeneration.
void
// Include the current allocation, if any, and bytes that will be
// pre-allocated to support collections, as "used".
// This is enforced in arguments.cpp.
"otherwise the code below doesn't make sense");
// We don't have floating point command-line arguments
// We have to be careful here as these two calculations can overflow
// 32-bit size_t's.
// Let's make sure that they are both under the max heap size, which
// by default will make them fit into a size_t.
// We can now safely turn them into size_t's.
// This assert only makes sense here, before we adjust them
// with respect to the min and max heap size.
"maximum_desired_capacity = "SIZE_FORMAT,
// Should not be greater than the heap max size. No need to adjust
// it with respect to the heap min size as it's a lower bound (i.e.,
// we'll try to make the capacity larger than it, not smaller).
// Should not be less than the heap min size. No need to adjust it
// with respect to the heap max size as it's an upper bound (i.e.,
// we'll try to make the capacity smaller than it, not greater).
// Don't expand unless it's significant
"attempt heap expansion",
ergo_format_reason("capacity lower than "
"min desired capacity after Full GC")
ergo_format_byte("capacity")
ergo_format_byte("occupancy")
ergo_format_byte_perc("min desired capacity"),
minimum_desired_capacity, (double) MinHeapFreeRatio);
// No expansion, now see if we want to shrink
} else if (capacity_after_gc > maximum_desired_capacity) {
// Capacity too large, compute shrinking size
"attempt heap shrinking",
ergo_format_reason("capacity higher than "
"max desired capacity after Full GC")
ergo_format_byte("capacity")
ergo_format_byte("occupancy")
ergo_format_byte_perc("max desired capacity"),
maximum_desired_capacity, (double) MaxHeapFreeRatio);
}
}
bool* succeeded) {
assert_at_safepoint(true /* should_be_vm_thread */);
*succeeded = true;
// Let's attempt the allocation first.
false /* expect_null_mutator_alloc_region */);
return result;
}
// In a G1 heap, we're supposed to keep allocation from failing by
// incremental pauses. Therefore, at least for now, we'll favor
// expansion over collection. (This might change in the future if we can
// do something smarter than full collection to satisfy a failed alloc.)
return result;
}
// Expansion didn't work, we'll try to do a Full GC.
false, /* clear_all_soft_refs */
if (!gc_succeeded) {
*succeeded = false;
return NULL;
}
// Retry the allocation
true /* expect_null_mutator_alloc_region */);
return result;
}
// Then, try a Full GC that will collect all soft references.
true, /* clear_all_soft_refs */
if (!gc_succeeded) {
*succeeded = false;
return NULL;
}
// Retry the allocation once more
true /* expect_null_mutator_alloc_region */);
return result;
}
"Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be
// appropriate.
return NULL;
}
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
assert_at_safepoint(true /* should_be_vm_thread */);
"attempt heap expansion",
ergo_format_reason("allocation request failed")
ergo_format_byte("allocation request"),
if (expand(expand_bytes)) {
false /* expect_null_mutator_alloc_region */);
}
return NULL;
}
// Update the committed mem region.
// Tell the card table about the update.
// Tell the BOT about the update.
// Tell the hot card cache about the update
}
"expand the heap",
ergo_format_byte("requested expansion amount")
ergo_format_byte("attempted expansion amount"),
// First commit the memory.
if (successful) {
// Then propagate this update to the necessary data structures.
// mr might be a smaller region than what was requested if
// expand_by() was unable to allocate the HeapRegion instances
"post-condition");
if (actual_expand_bytes < aligned_expand_bytes) {
// We could not expand _hrs to the desired size. In this case we
// need to shrink the committed space accordingly.
// First uncommit the memory.
// Then propagate this update to the necessary data structures.
}
if (_hr_printer.is_active()) {
}
}
} else {
"did not expand the heap",
ergo_format_reason("heap expansion operation failed"));
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
if (G1ExitOnExpansionFailure &&
// We had head room...
}
}
return successful;
}
"shrink the heap",
ergo_format_byte("requested shrinking amount")
ergo_format_byte("aligned shrinking amount")
ergo_format_byte("attempted shrinking amount"),
if (_hr_printer.is_active()) {
}
}
} else {
"did not shrink the heap",
ergo_format_reason("heap shrinking operation failed"));
}
}
// We should only reach here at the end of a Full GC which means we
// should not not be holding to any GC alloc regions. The method
// below will make sure of that and do any remaining clean up.
// Instead of tearing down / rebuilding the free lists here, we
// could instead use the remove_all_pending() method on free_list to
// remove only the ones that we need to remove.
tear_down_region_sets(true /* free_list_only */);
rebuild_region_sets(true /* free_list_only */);
}
// Public methods.
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#endif // _MSC_VER
_dirty_card_queue_set(false),
_into_cset_dirty_card_queue_set(false),
_is_alive_closure_cm(this),
_is_alive_closure_stw(this),
_mark_in_progress(false),
_full_collection(false),
_free_list("Master Free List"),
_secondary_free_list("Secondary Free List"),
_old_set("Old Set"),
_humongous_set("Master Humongous Set"),
_free_regions_coming(false),
_young_list(new YoungList(this)),
_gc_time_stamp(0),
_concurrent_cycle_started(false),
_g1h = this;
vm_exit_during_initialization("Failed necessary allocation.");
}
for (int i = 0; i < n_queues; i++) {
RefToScanQueue* q = new RefToScanQueue();
q->initialize();
_task_queues->register_queue(i, q);
iter_arr[i] = new HeapRegionRemSetIterator();
::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
}
// Initialize the G1EvacuationFailureALot counters and flags.
}
os::enable_vtime();
// Necessary to satisfy locking discipline assertions.
MutexLocker x(Heap_lock);
// We have to initialize the printer before committing the heap, as
// it will be used then.
// While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some
// cases incorrectly returns the size in wordSize units rather than
// HeapWordSize).
// Ensure that the sizes are properly aligned.
_cg1r = new ConcurrentG1Refine(this);
// Reserve the maximum.
// Includes the perm-gen.
// When compressed oops are enabled, the preferred heap base
// is calculated by subtracting the requested size from the
// 32Gb boundary and using the result as the base address for
// heap reservation. If the requested size is not aligned to
// HeapRegion::GrainBytes (i.e. the alignment that is passed
// into the ReservedHeapSpace constructor) then the actual
// base of the reserved heap may end up differing from the
// address that was requested (i.e. the preferred heap base).
// If this happens then we could end up using a non-optimal
// compressed oops mode.
// Since max_byte_size is aligned to the size of a heap region (checked
// above), we also need to align the perm gen size as it might not be.
if (UseCompressedOops) {
// Failed to reserve at specified address - the requested memory
// region is taken already, for example, by 'java' launcher.
// Try again to reserver heap higher.
// Failed to reserve at specified address again - give up.
} else {
}
}
}
if (!heap_rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for object heap");
return JNI_ENOMEM;
}
// It is important to do this in a way such that concurrent readers can't
// temporarily think something is in the heap. (I've actually seen this
// happen in asserts: DLD.)
// Create the gen rem set (and barrier set) for the entire reserved region.
} else {
vm_exit_during_initialization("G1 requires a mod ref bs.");
return JNI_ENOMEM;
}
// Also create a G1 rem set.
} else {
vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
return JNI_ENOMEM;
}
// Carve out the G1 part of the heap.
// Do later initialization work for concurrent refinement.
// 6843694 - ensure that the maximum region index can fit
// in the remembered set structures.
"too many cards per region");
_g1h = this;
// We're biasing _in_cset_fast_test to avoid subtracting the
// beginning of the heap every time we want to index; basically
// it's the same with what we do with the card table.
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the first
// evacuation pause.
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
// Initialize the from_card cache structure of HeapRegionRemSet.
// Now expand into the initial heap size.
if (!expand(init_byte_size)) {
vm_exit_during_initialization("Failed to allocate initial heap.");
return JNI_ENOMEM;
}
// Perform any initialization actions delegated to the policy.
g1_rem_set(),
concurrent_g1_refine()->red_zone(),
if (G1DeferredRSUpdate) {
-1, // never trigger processing
-1, // no limit on length
}
// Initialize the card queue set used to hold cards containing
// references into the collection set.
-1, // never trigger processing
-1, // no limit on length
// In case we're keeping closure specialization stats, initialize those
// counts and that mechanism.
// Here we allocate the dummy full region that is required by the
// G1AllocRegion class. If we don't pass an address in the reserved
// space here, lots of asserts fire.
_g1_reserved.start());
// We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young
// region will complain that it cannot support allocations without
// BOT updates. So we'll tag the dummy region as young to avoid that.
// Make sure it's full.
// Do create of the monitoring and management support so that
// values in the heap have been properly initialized.
_g1mm = new G1MonitoringSupport(this);
return JNI_OK;
}
// Reference processing in G1 currently works as follows:
//
// * There are two reference processor instances. One is
// used to record and process discovered references
// during concurrent marking; the other is used to
// record and process references during STW pauses
// (both full and incremental).
// * Both ref processors need to 'span' the entire heap as
// the regions in the collection set may be dotted around.
//
// * For the concurrent marking ref processor:
// * Reference discovery is enabled at initial marking.
// * Reference discovery is disabled and the discovered
// references processed etc during remarking.
// * Reference discovery is MT (see below).
// * Reference discovery requires a barrier (see below).
// * Reference processing may or may not be MT
// (depending on the value of ParallelRefProcEnabled
// and ParallelGCThreads).
// * A full GC disables reference discovery by the CM
// ref processor and abandons any entries on it's
// discovered lists.
//
// * For the STW processor:
// * Non MT discovery is enabled at the start of a full GC.
// * Processing and enqueueing during a full GC is non-MT.
// * During a full GC, references are processed after marking.
//
// * Discovery (may or may not be MT) is enabled at the start
// of an incremental evacuation pause.
// * References are processed near the end of a STW evacuation pause.
// * For both types of GC:
// * Discovery is atomic - i.e. not concurrent.
// * Reference discovery will not need a barrier.
// Concurrent Mark ref processor
// mt processing
(int) ParallelGCThreads,
// degree of mt processing
// mt discovery
// degree of mt discovery
false,
// Reference discovery is not atomic
// is alive closure
// (for efficiency/performance)
true);
// Setting next fields of discovered
// lists requires a barrier.
// STW ref processor
// mt processing
// degree of mt processing
(ParallelGCThreads > 1),
// mt discovery
// degree of mt discovery
true,
// Reference discovery is atomic
// is alive closure
// (for efficiency/performance)
false);
// Setting next fields of discovered
// lists requires a barrier.
}
return _g1_committed.byte_size();
}
if (hr->startsHumongous()) {
}
}
}
#ifndef PRODUCT
private:
unsigned _gc_time_stamp;
bool _failures;
public:
if (_gc_time_stamp != region_gc_time_stamp) {
_failures = true;
}
return false;
}
};
}
#endif // PRODUCT
bool concurrent,
int worker_i) {
// Clean cards in the hot card cache
int n_completed_buffers = 0;
}
}
// Computes the sum of the storage used by the various regions.
"Should be owned on this thread's behalf.");
// Read only once in case it is set to NULL concurrently
return result;
}
return result;
}
public:
if (!r->continuesHumongous()) {
}
return false;
}
};
}
// otherwise, is there space in the current allocation region?
// We need to store the current allocation region in a local variable
// here. The problem is that this method doesn't take any locks and
// there may be other threads which overwrite the current allocation
// region field. attempt_allocation(), for example, sets it to NULL
// and this can happen *after* the NULL check here but before the call
// to free(), resulting in a SIGSEGV. Note that this doesn't appear
// to be a problem in the optimized build, since the two loads of the
// current allocation region field are optimized away.
return 0;
}
}
switch (cause) {
case GCCause::_g1_humongous_allocation: return true;
default: return false;
}
}
#ifndef PRODUCT
// Let's fill up most of the region
// And as a result the region we'll allocate will be humongous.
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
// Let's use the existing mechanism for the allocation
} else {
// If we can't allocate once, we probably cannot allocate
// again. Let's get out of the loop.
break;
}
}
}
#endif // !PRODUCT
err_msg("Wrong marking cycle count (started: %d, completed: %d)",
}
// We assume that if concurrent == true, then the caller is a
// concurrent thread that was joined the Suspendible Thread
// Set. If there's ever a cheap way to check this, we should add an
// assert here.
// Given that this method is called at the end of a Full GC or of a
// concurrent cycle, and those can be nested (i.e., a Full GC can
// interrupt a concurrent cycle), the number of full collections
// completed should be either one (in the case where there was no
// nesting) or two (when a Full GC interrupted a concurrent cycle)
// behind the number of full collections started.
// This is the case for the inner caller, i.e. a Full GC.
assert(concurrent ||
err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
"is inconsistent with _old_marking_cycles_completed = %u",
// This is the case for the outer caller, i.e. the concurrent cycle.
assert(!concurrent ||
err_msg("for outer caller (concurrent cycle): "
"_old_marking_cycles_started = %u "
"is inconsistent with _old_marking_cycles_completed = %u",
// We need to clear the "in_progress" flag in the CM thread before
// we wake up any waiters (especially when ExplicitInvokesConcurrent
// is set) so that if a waiter requests another System.gc() it doesn't
// incorrectly see that a marking cycle is still in progress.
if (concurrent) {
}
// This notify_all() will ensure that a thread that called
// System.gc() with (with ExplicitGCInvokesConcurrent set or not)
// and it's waiting for a full GC to finish will be woken up. It is
// waiting in VM_G1IncCollectionPause::doit_epilogue().
}
_concurrent_cycle_started = true;
}
if (_concurrent_cycle_started) {
if (_cm->has_aborted()) {
}
_concurrent_cycle_started = false;
}
}
if (_concurrent_cycle_started) {
}
}
if (is_initial_mark) {
return InitialMark;
} else if (is_during_mark) {
return DuringMark;
} else if (is_young) {
return Normal;
} else {
return Mixed;
}
}
assert_at_safepoint(true /* should_be_vm_thread */);
switch (cause) {
case GCCause::_heap_inspection:
case GCCause::_heap_dump: {
do_full_collection(false); // don't clear all soft refs
break;
}
default: // XXX FIX ME
ShouldNotReachHere(); // Unexpected use of this function
}
}
unsigned int gc_count_before;
unsigned int old_marking_count_before;
bool retry_gc;
do {
retry_gc = false;
{
// Read the GC count while holding the Heap_lock
}
if (should_do_concurrent_full_gc(cause)) {
// Schedule an initial-mark evacuation pause that will start a
// concurrent cycle. We're setting word_size to 0 which means that
// we are not requesting a post-GC allocation.
0, /* word_size */
true, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
if (!op.pause_succeeded()) {
} else {
// A Full GC happened while we were trying to schedule the
// initial-mark GC. No point in starting a new cycle given
// that the whole heap was collected anyway.
}
if (retry_gc) {
if (GC_locker::is_active_and_needs_gc()) {
}
}
}
} else {
// Schedule a standard evacuation pause. We're setting word_size
// to 0 which means that we are not requesting a post-GC allocation.
0, /* word_size */
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
} else {
// Schedule a Full GC.
}
}
} while (retry_gc);
}
if (_g1_committed.contains(p)) {
// Given that we know that p is in the committed space,
// heap_region_containing_raw() should successfully
// return the containing region.
} else {
}
}
// Iteration functions.
// Iterates an OopClosure over all ref-containing fields of objects
// within a HeapRegion.
public:
if (!r->continuesHumongous()) {
r->oop_iterate(_cl);
}
return false;
}
};
if (do_perm) {
}
}
if (do_perm) {
}
}
// Iterates an ObjectClosure over all objects within a HeapRegion.
public:
if (! r->continuesHumongous()) {
r->object_iterate(_cl);
}
return false;
}
};
if (do_perm) {
}
}
// FIXME: is this right?
guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
}
// Calls a SpaceClosure on a HeapRegion.
public:
return false;
}
};
}
}
void
jint claim_value) {
1);
"Non dynamic should use fixed number of workers");
// try to spread out the starting points of the workers
// each worker will actually look at all regions
// we'll ignore "continues humongous" regions (we'll process them
// when we come across their corresponding "start humongous"
// region) and regions already claimed
continue;
}
// OK, try to claim it
if (r->claimHeapRegion(claim_value)) {
// success!
if (r->startsHumongous()) {
// If the region is "starts humongous" we'll iterate over its
// "continues humongous" first; in fact we'll do them
// first. The order is important. In on case, calling the
// closure on the "starts humongous" region might de-allocate
// and clear all its "continues humongous" regions and, as a
// result, we might end up processing them twice. So, we'll do
// them first (notice: most closures will ignore them anyway) and
// then we'll do the "starts humongous" region.
// if the region has already been claimed or it's not
// "continues humongous" we're done
!chr->continuesHumongous()) {
break;
}
// Noone should have claimed it directly. We can given
// that we claimed its "starts humongous" region.
// we should always be able to claim it; noone else should
// be trying to claim this region
// Right now, this holds (i.e., no closure that actually
// does something with "continues humongous" regions
// clears them). We might have to weaken it in the future,
// but let's leave these two asserts here for extra safety.
} else {
guarantee(false, "we should not reach here");
}
}
}
}
}
}
public:
return false;
}
};
}
}
#ifdef ASSERT
// This checks whether all regions in the heap have the correct claim
// value. I also piggy-backed on this a check to ensure that the
// humongous_start_region() information on "continues humongous"
// regions is correct.
private:
public:
if (r->claim_value() != _claim_value) {
"claim value = %d, should be %d",
HR_FORMAT_PARAMS(r),
r->claim_value(), _claim_value);
++_failures;
}
if (!r->isHumongous()) {
_sh_region = NULL;
} else if (r->startsHumongous()) {
_sh_region = r;
} else if (r->continuesHumongous()) {
if (r->humongous_start_region() != _sh_region) {
HR_FORMAT_PARAMS(r),
r->humongous_start_region(),
++_failures;
}
}
return false;
}
};
}
private:
public:
"claim value = %d, should be %d",
_failures += 1;
}
return false;
}
};
}
#endif // ASSERT
// Clear the cached CSet starting regions and (more importantly)
// the time stamps. Called when we reset the GC time stamp.
for (int i = 0; i < n_queues; i++) {
}
}
// Given the id of a worker, obtain or calculate a suitable
// starting region for iterating over the current collection set.
// Cached starting region for current worker was set
// during the current pause - so it's valid.
// Note: the cached starting heap region may be NULL
// (when the collection set is empty).
return result;
}
// The cached entry was not valid so let's calculate
// a suitable starting heap region for this worker.
// We want the parallel threads to start their collection
// set iteration at different collection set regions to
// avoid contention.
// If we have:
// n collection set regions
// p threads
// Then thread t will start at region floor ((t * n) / p)
if (G1CollectedHeap::use_parallel_gc_threads()) {
"Unless dynamic should use total workers");
if (worker_i > 0 &&
// Previous workers starting region is valid
// so let's iterate from there
}
}
}
// Note: the calculated starting heap region may be NULL
// (when the collection set is empty).
"should be updated only once per pause");
return result;
}
"Non dynamic should use fixed number of workers");
return region_at(start_index);
}
while (r != NULL) {
if (cl->doHeapRegion(r)) {
cl->incomplete();
return;
}
r = next;
}
}
HeapRegionClosure *cl) {
if (r == NULL) {
// The CSet is empty so there's nothing to do.
return;
}
assert(r->in_collection_set(),
"Start region must be a member of the collection set.");
cl->incomplete();
return;
}
}
while (cur != r) {
cl->incomplete();
return;
}
}
}
}
return res;
}
}
return NULL;
}
}
}
return true;
}
return HeapRegion::GrainBytes;
}
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
// Also, this value can be at most the humongous object threshold,
// since we can't allow tlabs to grow big enough to accommodate
// humongous objects.
return max_tlab_size;
} else {
}
}
return _g1_reserved.byte_size();
}
// assert(false, "NYI");
return 0;
}
ensure_parsability(false);
}
g1_rem_set()->prepare_for_verify();
}
VerifyOption vo) {
switch (vo) {
return false;
default:
}
return false; // keep some compilers happy
}
switch (vo) {
case VerifyOption_G1UseMarkWord: return NULL;
default: ShouldNotReachHere();
}
return NULL; // keep some compilers happy
}
switch (vo) {
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
}
switch (vo) {
case VerifyOption_G1UsePrevMarking: return "PTAMS";
case VerifyOption_G1UseNextMarking: return "NTAMS";
case VerifyOption_G1UseMarkWord: return "NONE";
default: ShouldNotReachHere();
}
return NULL; // keep some compilers happy
}
public:
{ }
template <class T> void do_oop_work(T *p) {
"Dead object referenced by a not dead object");
}
};
private:
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
}
// If the object is alive according to the mark word,
// then verify that the marking information agrees.
// Note we can't verify the contra-positive of the
// above: if the object is dead (according to the mark
// word), it may not be marked, or may have been marked
// but has since became dead, or may have been allocated
// since the last marking.
if (_vo == VerifyOption_G1UseMarkWord) {
}
o->oop_iterate(&isLive);
if (!_hr->obj_allocated_since_prev_marking(o)) {
}
}
}
};
public:
};
if (o != NULL) {
" isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
(void*) o, word_sz,
_g1->isMarkedPrev(o),
_g1->isMarkedNext(o),
int *val;
}
}
}
};
private:
bool _par;
bool _failures;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
_failures(false) {}
bool failures() {
return _failures;
}
if (!r->continuesHumongous()) {
bool failures = false;
if (failures) {
_failures = true;
} else {
if (_vo != VerifyOption_G1UseNextMarking) {
"< calculated "SIZE_FORMAT,
r->max_live_bytes(),
_failures = true;
}
} else {
// When vo == UseNextMarking we cannot currently do a sanity
// check on the live bytes as the calculation has not been
// finalized yet.
}
}
}
return false; // stop the region iteration if we hit a failure
}
};
private:
bool _failures;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
_failures(false) { }
template <class T> void do_oop_nv(T* p) {
if (_vo == VerifyOption_G1UseMarkWord) {
}
_failures = true;
}
}
}
};
// This is the task used for parallel heap verification.
private:
bool _failures;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
AbstractGangTask("Parallel verify task"),
_failures(false) { }
bool failures() {
return _failures;
}
_failures = true;
}
}
};
}
VerifyOption vo) {
if (SafepointSynchronize::is_at_safepoint()) {
"Expected to be executed serially by the VM thread at this point");
// We apply the relevant closures to all the oops in the
// system dictionary, the string table and the code cache.
process_strong_roots(true, // activate StrongRootsScope
true, // we set "collecting perm gen" to true,
// so we don't reset the dirty cards in the perm gen.
&rootsCl,
&blobsCl,
&rootsCl);
// If we're verifying after the marking phase of a Full GC then we can't
// treat the perm gen as roots into the G1 heap. Some of the objects in
// the perm gen may be dead and hence not marked. If one of these dead
// objects is considered to be a root then we may end up with a false
// "Root location <x> points to dead ob <y>" failure.
if (vo != VerifyOption_G1UseMarkWord) {
// Since we used "collecting_perm_gen" == true above, we will not have
// checked the refs from perm into the G1-collected heap. We check those
// references explicitly below. Whether the relevant cards are dirty
// is checked further below in the rem set verification.
}
if (vo != VerifyOption_G1UseMarkWord) {
// If we're verifying during a full GC then the region sets
// will have been torn down at the start of the GC. Therefore
// verifying the region sets will fail. So we only verify
// the region sets when not in a full GC.
}
"sanity check");
"If not dynamic should be using all the workers");
set_par_threads(0);
failures = true;
}
// Checks that the expected amount of parallel work was done.
// The implication is that n_workers is > 0.
"sanity check");
"sanity check");
} else {
failures = true;
}
}
if (failures) {
// It helps to have the per-region information in the output to
// help us track down what went wrong. This is why we call
// print_extended_on() instead of print_on().
#ifndef PRODUCT
if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
vo, false /* all */);
}
#endif
gclog_or_tty->flush();
}
} else {
if (!silent)
}
}
public:
return false;
}
};
capacity()/K, used_unlocked()/K);
_g1_storage.high(),
}
// Print the per-region information.
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, TS=gc time stamp, "
"PTAMS=previous top-at-mark-start, "
"NTAMS=next top-at-mark-start)");
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
}
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
}
}
// We'll overload this to mean "trace GC pause statistics."
if (TraceGen0Time || TraceGen1Time) {
// The "G1CollectorPolicy" is keeping track of these stats, so delegate
// to that.
g1_policy()->print_tracing_info();
}
if (G1SummarizeRSetStats) {
g1_rem_set()->print_summary_info();
}
if (G1SummarizeConcMark) {
}
}
#ifndef PRODUCT
// Helpful for debugging RSet issues.
private:
const char* _msg;
public:
HR_FORMAT_PARAMS(r));
if (occupied == 0) {
} else {
}
return false;
}
gclog_or_tty->cr();
gclog_or_tty->cr();
}
~PrintRSetsClosure() {
gclog_or_tty->cr();
}
};
}
}
#endif // PRODUCT
"not a garbage-first heap");
return _g1h;
}
// always_do_update_barrier = false;
// Call allocation profiler
// Fill TLAB's and such
ensure_parsability(true);
}
// FIXME: what is this about?
// I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
// is set.
"derived pointer present"));
// always_do_update_barrier = true;
// We have just completed a GC. Update the soft reference
// policy with the new heap occupancy
}
unsigned int gc_count_before,
bool* succeeded) {
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
"the result should be NULL if the VM did not succeed");
return result;
}
void
if (!_cmThread->in_progress()) {
_cmThread->set_started();
}
}
}
// PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
// in bytes - not the number of 'entries'. We need to convert
// into a number of cards.
}
return g1_rem_set()->cardsScanned();
}
void
if (_surviving_young_words == NULL) {
"Not enough space for young surv words summary.");
}
#ifdef ASSERT
for (uint i = 0; i < array_length; ++i) {
}
#endif // !ASSERT
}
void
for (uint i = 0; i < array_length; ++i) {
_surviving_young_words[i] += surv_young_words[i];
}
}
void
}
#ifdef ASSERT
public:
// Here we check that the CSet region's RSet is ready for parallel
// iteration. The fields that we'll verify are only manipulated
// when the region is part of a CSet and is collected. Afterwards,
// we reset these fields when we clear the region's RSet (when the
// region is freed) so they are ready when the region is
// re-allocated. The only exception to this is if there's an
// evacuation failure and instead of freeing the region we leave
// it in the heap. In that case, we reset these fields during
// evacuation failure handling.
// Here's a good place to add any other checks we'd like to
// perform on CSet regions.
return false;
}
};
#endif // ASSERT
#if TASKQUEUE_STATS
}
for (int i = 0; i < n; ++i) {
}
}
for (int i = 0; i < n; ++i) {
}
}
#endif // TASKQUEUE_STATS
return;
}
}
return;
}
if (evacuation_failed()) {
}
} else {
if (evacuation_failed()) {
}
}
gclog_or_tty->flush();
}
bool
assert_at_safepoint(true /* should_be_vm_thread */);
if (GC_locker::check_active_before_gc()) {
return false;
}
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
// We do not allow initial-mark to be piggy-backed on a mixed GC.
// We also do not allow mixed GCs during marking.
// Record whether this pause is an initial mark. When the current
// thread has completed its logging output and it's safe to signal
// the CM thread, the flag's value in the policy has been reset.
// Inner scope for scope based logging, timers, and stats collection
{
if (g1_policy()->during_initial_mark_pause()) {
// We are about to start a marking cycle, so we increment the
// full collection counter.
}
// If the secondary_free_list is not empty, append it to the
// free_list. No need to wait for the cleanup operation to finish;
// the region allocation code will check the secondary_free_list
// and wait if necessary. If the G1StressConcRegionFreeing flag is
// set, skip this step so that the region allocation code has to
// get entries from the secondary_free_list.
if (!G1StressConcRegionFreeing) {
}
"young list should be well formed");
// Don't dynamically change the number of GC threads this early. A value of
// 0 is used to indicate serial work. When parallel work is done,
// it will be set.
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC
gc_prologue(false);
increment_total_collections(false /* full gc */);
// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
// Enable discovery in the STW reference processor
true /*verify_no_refs*/);
{
// We want to temporarily turn off discovery by the
// CM ref processor, if necessary, and turn it back on
// on again later if we do. Using a scoped
// NoRefDiscovery object will do this.
// Forget the current alloc region (we might even choose it to be part
// of the collection set!).
// We should call this after we retire the mutator alloc
// region(s) so that all the ALLOC / RETIRE events are generated
// before the start GC event.
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
//
// Preserving the old comment here if that helps the investigation:
//
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
_young_list->print();
#endif // YOUNG_LIST_VERBOSE
// We have to wait until the CM threads finish scanning the
// root regions as it's the only way to ensure that all the
// objects on them have been correctly scanned before we start
// moving them during the GC.
if (waited) {
}
_young_list->print();
#endif // YOUNG_LIST_VERBOSE
if (g1_policy()->during_initial_mark_pause()) {
}
perm_gen()->save_marks();
_young_list->print();
#endif // YOUNG_LIST_VERBOSE
_cm->note_start_of_gc();
// We should not verify the per-thread SATB buffers given that
// we have not filtered them yet (we'll do so during the
// GC). We also call this after finalize_cset() to
// ensure that the CSet has been finalized.
true /* verify_enqueued_buffers */,
false /* verify_thread_buffers */,
true /* verify_fingers */);
if (_hr_printer.is_active()) {
} else if (hr->is_survivor()) {
} else {
}
}
}
#ifdef ASSERT
#endif // ASSERT
// Initialize the GC alloc regions.
// Actually do the work...
// We do this to mainly verify the per-thread SATB buffers
// (which have been filtered by now) since we didn't verify
// them earlier. No point in re-checking the stacks / enqueued
// buffers given that the CSet has not changed since last time
// we checked.
false /* verify_enqueued_buffers */,
true /* verify_thread_buffers */,
true /* verify_fingers */);
// Start a new incremental collection set for the next pause.
// Clear the _cset_fast_test bitmap in anticipation of adding
// regions to the incremental collection set for the next
// evacuation pause.
// Don't check the whole heap at this point as the
// GC alloc regions from this pause have been tagged
// as survivors and moved on to the survivor list.
// Survivor regions will fail the !is_young() check.
"young list should be empty");
_young_list->print();
#endif // YOUNG_LIST_VERBOSE
if (evacuation_failed()) {
if (_evacuation_failed_info_array[i].has_failed()) {
}
}
} else {
// The "used" of the the collection set have already been subtracted
// when they were freed. Add in the bytes evacuated.
}
if (g1_policy()->during_initial_mark_pause()) {
// We have to do this before we notify the CM threads that
// they can start working to make sure that all the
// appropriate initialization is done on the CM object.
// Note that we don't actually trigger the CM thread at
// this point. We do that later when we're sure that
// the current thread has completed its logging output.
}
_young_list->print();
#endif // YOUNG_LIST_VERBOSE
{
if (expand_bytes > 0) {
// No need for an ergo verbose message here,
// expansion_amount() does this when it returns a value > 0.
if (!expand(expand_bytes)) {
// We failed to expand the heap so let's verify that
// committed/uncommitted amount match the backing store
}
}
}
// We redo the verification but now wrt to the new CSet which
// has just got initialized after the previous CSet was freed.
true /* verify_enqueued_buffers */,
true /* verify_thread_buffers */,
true /* verify_fingers */);
_cm->note_end_of_gc();
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
// In prepare_for_verify() below we'll need to scan the deferred
// update buffers to bring the RSets up-to-date if
// G1HRRSFlushLogBuffersOnVerify has been set. While scanning
// the update buffers we'll probably need to scan cards on the
// regions we just allocated to (i.e., the GC alloc
// regions). However, during the last GC we called
// set_saved_mark() on all the GC alloc regions, so card
// scanning might skip the [saved_mark_word()...top()] area of
// those regions (i.e., the area we allocated objects into
// during the last GC). But it shouldn't. Given that
// saved_mark_word() is conditional on whether the GC time stamp
// on the region is current or not, by incrementing the GC time
// stamp here we invalidate all the GC time stamps on all the
// regions and saved_mark_word() will simply return top() for
// all the regions. This is a nicer way of ensuring this rather
// than iterating over the regions and fixing them. In fact, the
// GC time stamp increment here also ensures that
// saved_mark_word() will return top() between pauses, i.e.,
// during concurrent refinement. So we don't need the
// is_gc_active() check to decided which top to use when
// scanning cards (see CR 7039627).
// CM reference discovery will be re-enabled if necessary.
}
// We should do this after we potentially expand the heap so
// that all the COMMIT events are generated before the end GC
// event, and after we retire the GC alloc regions so that all
// RETIRE events are generated before the end GC event.
if (mark_in_progress()) {
}
#ifdef TRACESPINNING
#endif
gc_epilogue(false);
}
// Print the remainder of the GC log output.
// It is not yet to safe to tell the concurrent mark to
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
// logging output either.
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm()->update_sizes();
}
if (G1SummarizeRSetStats &&
(G1SummarizeRSetStatsPeriod > 0) &&
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
g1_rem_set()->print_summary_info();
}
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
// that came from the pause.
if (should_start_conc_mark) {
// CAUTION: after the doConcurrentMark() call below,
// the concurrent marking thread(s) could be running
// concurrently with us. Make sure that anything after
// this point does not assume that we are the only GC thread
// running. Note: of course, the actual marking work will
// not start until the safepoint itself is released in
// ConcurrentGCThread::safepoint_desynchronize().
}
return true;
}
{
switch (purpose) {
case GCAllocForSurvived:
break;
case GCAllocForTenured:
break;
default:
assert(false, "unknown GCAllocPurpose");
break;
}
// Prevent humongous PLAB sizes for two reasons:
// * PLABs are allocated using a similar paths as oops, but should
// never be in a humongous region
// * Allowing humongous PLABs needlessly churns the region free lists
}
}
}
assert_at_safepoint(true /* should_be_vm_thread */);
// We will discard the current GC alloc region if:
// a) it's in the collection set (it can happen!),
// b) it's already full (no point in using it),
// c) it's empty (this means that it was emptied during
// a cleanup and it should be on the free list now), or
// d) it's humongous (this means that it was emptied
// during a cleanup and was added to the free list, but
// has been subsequently used to allocate a humongous
// object that may be less than the region size).
if (retained_region != NULL &&
!retained_region->in_collection_set() &&
!retained_region->is_empty() &&
!retained_region->isHumongous()) {
// The retained region was added to the old region set when it was
// retired. We have to remove it now, since we don't allow regions
// we allocate to in the region sets. We'll re-add it later, when
// it's retired again.
}
}
void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
// If we have an old GC alloc region to release, we'll save it in
// _retained_old_gc_alloc_region. If we don't
// _retained_old_gc_alloc_region will become NULL. This is what we
// want either way so no reason to check explicitly for either
// condition.
if (ResizePLAB) {
}
}
}
_drain_in_progress = false;
}
_evac_failure_scan_stack->length() == 0,
"Postcondition");
delete _evac_failure_scan_stack;
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
set_par_threads(0);
} else {
}
// Reset the claim values in the regions in the collection set.
// Now restore saved marks, if any.
while (!_objs_with_preserved_marks.is_empty()) {
}
_objs_with_preserved_marks.clear(true);
_preserved_marks_of_objs.clear(true);
}
}
while (_evac_failure_scan_stack->length() > 0) {
}
}
if (forward_ptr == NULL) {
// Forward-to-self succeeded.
_evacuation_failed = true;
if (_evac_failure_closure != cl) {
"Should only be true while someone holds the lock.");
// Set the global evac-failure closure to the current thread's.
// Now do the common part.
// Reset to NULL.
} else {
// The lock is already held, and this is recursive.
}
return old;
} else {
// Forward-to-self failed. Either someone else managed to allocate
// space for this object (old != forward_ptr) or they beat us in
// self-forwarding it (old == forward_ptr).
"should not be in the CSet",
return forward_ptr;
}
}
if (!r->evacuation_failed()) {
r->set_evacuation_failed(true);
}
if (!_drain_in_progress) {
// prevent recursion in copy_to_survivor_space()
_drain_in_progress = true;
_drain_in_progress = false;
}
}
// We want to call the "for_promotion_failure" version only in the
// case of a promotion failure.
if (m->must_be_preserved_for_promotion_failure(obj)) {
}
}
if (purpose == GCAllocForSurvived) {
return result;
} else {
// Let's try to allocate in the old gen in case we can fit the
// object there.
return old_attempt_allocation(word_size);
}
} else {
return result;
} else {
// Let's try to allocate in the survivors in case we can fit the
// object there.
return survivor_attempt_allocation(word_size);
}
}
// Trying to keep some compilers happy.
return NULL;
}
_term_attempts(0),
_age_table(false),
_strong_roots_time(0), _term_time(0),
_alloc_buffer_waste(0), _undo_waste(0) {
// we allocate G1YoungSurvRateNumRegions plus one entries, since
// we "sacrifice" entry 0 to keep track of surviving bytes for
// non-young regions (where the age is -1)
// We also add a few elements at the beginning and at the end in
// an attempt to eliminate cache contention
if (_surviving_young_words_base == NULL)
"Not enough space for young surv histo.");
}
void
{
" ------waste (KiB)------");
" total alloc undo");
" ------- ------- -------");
}
void
outputStream* const st) const
{
alloc_buffer_waste() * HeapWordSize / K,
undo_waste() * HeapWordSize / K);
}
#ifdef ASSERT
return true;
}
if (has_partial_array_mask(ref)) {
// Must be in the collection set--it's already been copied.
} else {
}
return true;
}
} else {
}
}
#endif // ASSERT
do {
// Drain the overflow stack first, so other threads can steal.
}
}
}
#ifdef ASSERT
#endif // ASSERT
// We know that the object is not moving so it's safe to read its size.
}
#ifdef ASSERT
#endif // ASSERT
// The object might be in the process of being copied by another
// worker so we cannot trust that its to-space image is
// well-formed. So we have to read its size from its from-space
// image which we know should not be changing.
}
// +1 to make the -1 indexes valid...
: m->age();
word_sz);
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1->evacuation_should_fail()) {
}
}
#endif // !PRODUCT
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
}
// We're going to allocate linearly, so might as well prefetch ahead.
if (forward_ptr == NULL) {
// We could simply do obj->incr_age(). However, this causes a
// performance issue. obj->incr_age() will first check whether
// the object has a displaced mark by checking its mark word;
// getting the mark word from the new location of the object
// stalls. So, given that we already have the mark word and we
// are about to install it anyway, it's better to increase the
// age on the mark word, when the object does not have a
// displaced mark word. We're not expecting many objects to have
// a displaced marked word, so that case is not optimized
// further (it could be...) and we simply call obj->incr_age().
if (m->has_displaced_mark_helper()) {
// in this case, we have to install the mark word first,
// otherwise obj looks to be forwarded (the old mark word,
// which contains the forward pointer, was copied)
} else {
m = m->incr_age();
}
} else {
}
// We keep track of the next start index in the length field of
// the to-space object. The actual length can be found in the
// length field of the from-space object.
} else {
// No point in using the slower heap_region_containing() method,
// given that we know obj is in the heap.
}
} else {
obj = forward_ptr;
}
return obj;
}
template <class T>
::do_oop_work(T* p) {
"Precondition: G1BarrierRS implies obj is non-NULL");
// here the null check is implicit in the cset_fast_test() test
if (obj->is_forwarded()) {
} else {
}
// If the object is self-forwarded we don't need to explicitly
// mark it, the evacuation failure protocol will do so.
}
// When scanning the RS, we only care about objs in CS.
if (barrier == G1BarrierRS) {
}
} else {
// The object is not in collection set. If we're a root scanning
// closure during an initial mark pause (i.e. do_mark_object will
// be true) then attempt to mark the object.
}
}
}
par_do_barrier(p);
}
}
// The from-space object contains the real length.
// We keep track of the next start index in the length field of the
// to-space object.
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
// Push the remainder before we process the range in case another
// worker has run out of things to do and can steal it.
} else {
// We'll process the final range for this object. Restore the length
// so that the heap remains parsable in case of evacuation failure.
}
// Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not
// correct given that we are using it to keep track of the next
// start index. oop_iterate_range() (thankfully!) ignores the length
// field and only relies on the start / end parameters. It does
// however return the size of the object which will be incorrect. So
// we have to ignore it even if we wanted to use it.
}
protected:
public:
void do_void();
private:
inline bool offer_termination();
};
pss->start_term_time();
pss->end_term_time();
return res;
}
pss->trim_queue();
do {
if (stolen_task.is_narrow()) {
} else {
}
// We've just processed a reference and we might have made
// available new entries on the queues. So we have to make sure
// we drain the queues as necessary.
pss->trim_queue();
}
} while (!offer_termination());
}
protected:
}
public:
: AbstractGangTask("G1 collection"),
_terminator(0, _queues),
{}
}
// This task calls set_n_termination() in par_non_clean_card_iterate_work()
// in the young space (_par_seq_tasks) in the G1 heap
// for SequentialSubTasksDone.
// This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
// both of which need setting by set_n_termination().
}
{
// We also need to mark copied objects.
}
{
}
if (ParallelGCVerbose) {
MutexLocker x(stats_lock());
}
// Close the inner scope so that the ResourceMark and HandleMark
// destructors are executed here and are included as part of the
// "GC Worker Time".
}
}
};
// *** Common G1 Evacuation Stuff
// Closures that support the filtering of CodeBlobs scanned during
// external root scanning.
// Closure applied to reference fields in code blobs (specifically nmethods)
// to determine whether an nmethod contains references that point into
// the collection set. Used as a predicate when walking code roots so
// that only nmethods that point into the collection set are added to the
// 'marked' list.
bool _points_into_cs;
public:
template <class T>
void do_oop_nv(T* p) {
if (!_points_into_cs) {
_points_into_cs = true;
}
}
}
};
public:
if (predicate_cl.points_into_cs()) {
// At least one of the reference fields or the oop relocations
// in the nmethod points into the collection set. We have to
// 'mark' this nmethod.
// Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
// or MarkingCodeBlobClosure::do_code_blob() change.
if (!nm->test_set_oops_do_mark()) {
}
}
}
}
};
// This method is run in a GC worker.
void
int worker_i) {
// First scan the strong roots, including the perm gen.
// Walk the code cache w/o buffering, because StarTask cannot handle
// unaligned oop locations.
process_strong_roots(false, // no scoping; this is parallel code
// Now the CM ref_processor roots.
// We need to treat the discovered reference lists of the
// concurrent mark ref processor as roots and keep entries
// (which are added by the marking threads) on them live
// until they can be processed at the end of marking.
}
// Finish up any enqueued closure apps (attributed as object copy time).
double ext_root_time_ms =
// During conc marking we have to filter the per-thread SATB buffers
// to make sure we remove any oops into the CSet (which will show up
// as implicitly live).
if (mark_in_progress()) {
}
}
// Now scan the complement of the collection set.
}
}
void
}
// Weak Reference Processing support
// An always "is_alive" closure that is used to preserve referents.
// If the object is non-null then it's alive. Used in the preservation
// of referent objects that are pointed to by reference objects
// discovered by the CM ref processor.
public:
if (p != NULL) {
return true;
}
return false;
}
};
// An object is reachable if it is outside the collection set,
// or is inside and copied.
}
// Non Copying Keep Alive closure
public:
}
}
};
// Copying Keep Alive closure - can be called from both
// serial and parallel code as long as different worker
// threads utilize different G1ParScanThreadState instances
// and different queues.
public:
{}
template <class T> void do_oop_work(T* p) {
// If the referent object has been forwarded (either copied
// to a new location or to itself in the event of an
// evacuation failure) then we need to update the reference
// field and, if both reference and referent are in the G1
// heap, update the RSet for the referent.
//
// If the referent has not been forwarded then we have to keep
// it alive by policy. Therefore we have copy the referent.
//
// If the reference field is in the G1 heap then we can push
// on the PSS queue. When the queue is drained (after each
// phase of reference processing) the object and it's followers
// will be copied, the reference field set to point to the
// new location, and the RSet updated. Otherwise we need to
// use the the non-heap or perm closures directly to copy
// the referent object and update the pointer, while avoiding
// updating the RSet.
if (_g1h->is_in_g1_reserved(p)) {
} else {
// The reference field is not in the G1 heap.
_copy_perm_obj_cl->do_oop(p);
} else {
}
}
}
}
};
// Serial drain queue closure. Called as the 'complete_gc'
// closure for each discovered list in some of the
// reference processing phases.
protected:
public:
{ }
void do_void() {
pss->trim_queue();
}
};
// Parallel Reference Processing closures
// Implementation of AbstractRefProcTaskExecutor for parallel reference
// processing during G1 evacuation pauses.
private:
int _active_workers;
public:
int n_workers) :
{
}
// Executes the given task using concurrent marking worker threads.
};
// Gang task for possibly parallel reference processing
public:
AbstractGangTask("Process reference objects in parallel"),
{}
// The reference processing task executed by a single worker.
// We also need to mark copied objects.
}
// Keep alive closure.
// Complete GC closure
// Call the reference processing task's work routine.
// Note we cannot assert that the refs array is empty here as not all
// of the processing tasks (specifically phase2 - pp2_work) execute
// the complete_gc closure (which ordinarily would drain the queue) so
// the queue may not be empty.
}
};
// Driver routine for parallel reference processing.
// Creates an instance of the ref processing gang
// task and has the worker threads execute it.
_g1h->set_par_threads(0);
}
// Gang task for parallel reference enqueueing.
public:
AbstractGangTask("Enqueue reference objects in parallel"),
{ }
}
};
// Driver routine for parallel reference enqueueing.
// Creates an instance of the ref enqueueing gang
// task and has the worker threads execute it.
_g1h->set_par_threads(0);
}
// End of weak reference support closures
// Abstract task used to preserve (i.e. copy) any referent objects
// that are in the collection set and are pointed to by reference
// objects discovered by the CM ref processor.
protected:
public:
AbstractGangTask("ParPreserveCMReferents"),
{ }
// We also need to mark copied objects.
}
// Is alive closure
// Copying keep alive closure. Applied to referent objects that need
// to be copied.
// limit is set using max_num_q() - which was set using ParallelGCThreads.
// So this must be true - but assert just in case someone decides to
// change the worker ids.
// Select discovered lists [i, i+stride, i+2*stride,...,limit)
// Since discovery is not atomic for the CM ref processor, we
// can see some null referent objects.
// This will filter nulls.
if (iter.is_referent_alive()) {
}
iter.move_to_next();
}
}
// Drain the queue - which may cause stealing
// Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
}
};
// Weak Reference processing during an evacuation pause (part 1).
// Any reference objects, in the collection set, that were 'discovered'
// by the CM ref processor should have already been copied (either by
// applying the external root copy closure to the discovered lists, or
// by following an RSet entry).
//
// But some of the referents, that are in the collection set, that these
// reference objects point to may not have been copied: the STW ref
// processor would have seen that the reference object had already
// been 'discovered' and would have skipped discovering the reference,
// but would not have treated the reference object as a regular oop.
// As a result the copy closure would not have been applied to the
// referent object.
//
// We need to explicitly copy these referent objects - the references
// will be processed at the end of remarking.
//
// We also need to do this copying before we process the reference
// objects discovered by the STW ref processor in case one of these
// referents points to another object which is also referenced by an
// object discovered by the STW ref processor.
"Need to reset active GC workers");
if (G1CollectedHeap::use_parallel_gc_threads()) {
} else {
}
set_par_threads(0);
// Closure to test whether a referent is alive.
G1STWIsAliveClosure is_alive(this);
// Even when parallel reference processing is enabled, the processing
// of JNI refs is serial and performed serially by the current thread
// rather than by a worker. The following PSS will be used for processing
// JNI refs.
// Use only a single queue for this PSS.
G1ParScanThreadState pss(this, 0);
// closures while we're actually processing the discovered
// reference objects.
// We also need to mark copied objects.
}
// Keep alive closure.
// Serial Complete GC closure
// Setup the soft refs policy...
rp->setup_policy(false);
if (!rp->processing_is_mt()) {
// Serial reference processing...
NULL,
} else {
// Parallel reference processing
}
// We have completed copying any necessary live referent objects
// (that were not copied during the actual pause) so we can
// retire any active alloc buffers
}
// Weak Reference processing during an evacuation pause (part 2).
// Now enqueue any remaining on the discovered lists on to
// the pending list.
if (!rp->processing_is_mt()) {
// Serial reference processing...
} else {
// Parallel reference enqueueing
"Need to reset active workers");
}
// FIXME
// CM's reference processing also cleans up the string and symbol tables.
// Should we do that here also? We could, but it is a serial operation
// and could significantly increase the pause time.
}
_expand_heap_after_alloc_failure = true;
_evacuation_failed = false;
// Should G1EvacuationFailureALot be in effect for this GC?
// Disable the hot card cache.
hot_card_cache->set_use_cache(false);
if (G1CollectedHeap::use_parallel_gc_threads()) {
workers()->active_workers(),
"If not dynamic should be using all the workers");
} else {
assert(n_par_threads() == 0,
"Should be the original non-parallel value");
n_workers = 1;
}
rem_set()->prepare_for_younger_refs_iterate(true);
double end_par_time_sec;
{
StrongRootsScope srs(this);
if (G1CollectedHeap::use_parallel_gc_threads()) {
// The individual threads will set their evac-failure closures.
// These tasks use ShareHeap::_process_strong_tasks
"If not dynamic should be using all the workers");
} else {
g1_par_task.work(0);
}
// Closing the inner scope will execute the destructor
// for the StrongRootsScope object. We record the current
// elapsed time before closing the scope so that time
// taken for the SRS destructor is NOT included in the
// reported parallel time.
}
double code_root_fixup_time_ms =
set_par_threads(0);
// Process any discovered reference objects - we have
// to do this _before_ we retire the GC alloc regions
// as we may have to copy some 'reachable' referent
// objects (and their reachable sub-graphs) that were
// not copied during the pause.
// Weak root processing.
// Note: when JSR 292 is enabled and code blobs can contain
// non-perm oops then we will need to process the code blobs
// here too.
{
G1STWIsAliveClosure is_alive(this);
G1KeepAliveClosure keep_alive(this);
}
// Reset and re-enable the hot card cache.
// Note the counts for the cards in the regions in the
// collection set are reset when the collection set is freed.
hot_card_cache->set_use_cache(true);
if (evacuation_failed()) {
// Reset the G1EvacuationFailureALot counters and flags
// Note: the values are reset only when an actual
// evacuation failure occurs.
}
// Enqueue any remaining references remaining on the STW
// reference processor's discovered lists. We need to do
// this after the card table is cleaned (and verified) as
// the act of enqueueing entries on to the pending list
// will log these updates (and dirty their associated
// cards). We need these updates logged to update any
// RSets.
if (G1DeferredRSUpdate) {
}
}
bool par) {
if (hr->isHumongous()) {
} else {
}
} else {
}
}
bool par) {
// Clear the card counts for this region.
// Note: we only need to do this if the region is not young
// (since we don't refine cards in young regions).
}
}
bool par) {
// We need to read this before we make the region non-humongous,
// otherwise the information will be gone.
hr->set_notHumongous();
while (i < last_index) {
i += 1;
}
*pre_used += hr_pre_used;
}
bool par) {
if (pre_used > 0) {
"should be >= pre_used: "SIZE_FORMAT,
}
}
}
}
}
public:
G1CollectedHeap* g1h) :
AbstractGangTask("G1 Par Cleanup CT Task"),
HeapRegion* r;
while (r = _g1h->pop_dirty_cards_region()) {
clear_cards(r);
}
}
// Cards of the survivors should have already been dirtied.
if (!r->is_survivor()) {
}
}
};
#ifndef PRODUCT
public:
if (r->is_survivor()) {
_g1h->verify_dirty_region(r);
} else {
}
return false;
}
};
// All of the region should be clean.
}
// We cannot guarantee that [bottom(),end()] is dirty. Threads
// dirty allocated blocks as they allocate them. The thread that
// retires each region and replaces it with a new one will do a
// maximal allocation to fill in [pre_dummy_top(),end()] but will
// not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty.
}
}
}
}
#endif
{
// Iterate over the dirty cards region list.
if (G1CollectedHeap::use_parallel_gc_threads()) {
set_par_threads(0);
} else {
while (_dirty_cards_region_list) {
if (_dirty_cards_region_list == r) {
// The last region.
}
}
}
#ifndef PRODUCT
if (G1VerifyCTCleanup || VerifyAfterGC) {
}
#endif
}
}
// Since the collection set is a superset of the the young list,
// all we need to do to clear the young list is clear its
// head and length, and unlink any young regions in the code below
_young_list->clear();
bool non_young = true;
if (non_young) {
non_young = false;
}
} else {
non_young = true;
}
}
cur->set_in_collection_set(false);
// At this point the we have 'popped' cur from the collection set
// (linked via next_in_collection_set()) but it is still in the
// young list (linked via next_young_region()). Clear the
// _next_young_region field.
} else {
}
"invariant" );
if (!cur->evacuation_failed()) {
// And the region is empty.
} else {
}
cur->set_not_young();
cur->set_evacuation_failed(false);
// The region is now considered to be old.
}
}
if (non_young) {
} else {
}
NULL /* old_proxy_set */,
NULL /* humongous_proxy_set */,
false /* par */);
}
// This routine is similar to the above but does not record
// any policy statistics or update free lists; we are abandoning
// the current incremental collection set in preparation of a
// full collection. After the full GC we will start to build up
// the incremental collection set again.
// This is only called when we're doing a full collection
// and is immediately followed by the tearing down of the young list.
cur->set_in_collection_set(false);
}
}
if (G1ConcRegionFreeingVerbose) {
"setting free regions coming");
}
_free_regions_coming = true;
}
{
_free_regions_coming = false;
}
if (G1ConcRegionFreeingVerbose) {
"reset free regions coming");
}
}
// Most of the time we won't have to wait, so let's do a quick test
// first before we take the lock.
if (!free_regions_coming()) {
return;
}
if (G1ConcRegionFreeingVerbose) {
"waiting for free regions");
}
{
while (free_regions_coming()) {
}
}
if (G1ConcRegionFreeingVerbose) {
"done waiting for free regions");
}
}
"the heap lock should already be held by or for this thread");
}
private:
bool _success;
public:
if (r->is_young()) {
_success = false;
}
return false;
}
};
if (check_heap) {
}
return ret;
}
private:
public:
if (r->is_empty()) {
// We ignore empty regions, we'll empty the free list afterwards
} else if (r->is_young()) {
// We ignore young regions, we'll empty the young list afterwards
} else if (r->isHumongous()) {
// We ignore humongous regions, we're not tearing down the
// humongous region set
} else {
// The rest should be old
}
return false;
}
}
};
assert_at_safepoint(true /* should_be_vm_thread */);
if (!free_list_only) {
// Need to do this after the heap iteration to be able to
// recognize the young regions and ignore them during the iteration.
}
}
private:
bool _free_list_only;
public:
if (!free_list_only) {
}
}
if (r->continuesHumongous()) {
return false;
}
if (r->is_empty()) {
// Add free regions to the free list
_free_list->add_as_tail(r);
} else if (!_free_list_only) {
if (r->isHumongous()) {
// We ignore humongous regions, we left the humongous set unchanged
} else {
// The rest should be old, add them to the old set
}
_total_used += r->used();
}
return false;
}
return _total_used;
}
};
assert_at_safepoint(true /* should_be_vm_thread */);
if (!free_list_only) {
}
err_msg("inconsistent _summary_bytes_used, "
}
}
return is_in_permanent(p);
} else {
}
}
// Methods for the mutator alloc region
bool force) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
"if force is true we should be able to expand the young list");
if (force || !young_list_full) {
false /* do_expand */);
if (new_alloc_region != NULL) {
return new_alloc_region;
}
}
return NULL;
}
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
// We update the eden sizes here, when the region is retired,
// instead of when it's allocated, since this is the point that its
// used space has been recored in _summary_bytes_used.
g1mm()->update_eden_size();
}
bool force) {
}
// Don't change the number of workers. Use the value previously set
// in the workgroup.
"Otherwise should be using the total number of workers");
if (n_workers == 0) {
assert(false, "Should have been set in prior evacuation pause.");
}
}
}
// Methods for the GC alloc regions
GCAllocPurpose ap) {
true /* do_expand */);
if (new_alloc_region != NULL) {
// We really only need to do this for old regions given that we
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
if (ap == GCAllocForSurvived) {
} else {
}
return new_alloc_region;
} else {
}
}
return NULL;
}
GCAllocPurpose ap) {
if (ap == GCAllocForSurvived) {
} else {
}
}
bool force) {
}
}
bool force) {
}
}
// Heap region set verification
private:
public:
_region_count += 1;
if (hr->continuesHumongous()) {
return false;
}
// TODO
} else if (hr->startsHumongous()) {
} else {
}
return false;
}
};
// This might return NULL if the allocation fails
}
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
// First, check the explicit lists.
_free_list.verify();
{
// Given that a concurrent operation might be adding regions to
// the secondary free list we have to take the lock before
// verifying it.
}
// If a concurrent region freeing operation is in progress it will
// be difficult to correctly attributed any free regions we come
// across to the correct free list given that they might belong to
// one of several (free_list, secondary_free_list, any local lists,
// etc.). So, if that's the case we will skip the rest of the
// verification operation. Alternatively, waiting for the concurrent
// operation to complete will have a non-trivial effect on the GC's
// operation (no concurrent operation will last longer than the
// interval between two calls to verification) and it might hide
// any issues that we would like to catch during testing.
if (free_regions_coming()) {
return;
}
// Make sure we append the secondary_free_list on the free_list so
// that all free regions we will come across can be safely
// attributed to the free_list.
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
}