/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "runtime/arguments.hpp"
#include "runtime/mutexLocker.hpp"
// Different defaults for different number of GC threads
// They were chosen by running GCOld and SPECjbb on debris with different
// numbers of GC threads and choosing them based on the results
// all the same
static double rs_length_diff_defaults[] = {
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
};
static double cost_per_card_ms_defaults[] = {
0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
};
// all the same
static double young_cards_per_entry_ratio_defaults[] = {
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
};
static double cost_per_entry_ms_defaults[] = {
0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
};
static double cost_per_byte_ms_defaults[] = {
0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
};
// these should be pretty consistent
static double constant_other_time_ms_defaults[] = {
5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
};
static double young_other_cost_per_region_ms_defaults[] = {
0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
};
static double non_young_other_cost_per_region_ms_defaults[] = {
1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
};
? ParallelGCThreads : 1),
_stop_world_start(0.0),
new TruncatedSeq(TruncatedSeqLength)),
_pause_time_target_ms((double) MaxGCPauseMillis),
_gcs_are_young(true),
_during_marking(false),
_in_marking_window(false),
_in_marking_window_im(false),
_initiate_conc_mark_if_possible(false),
_during_initial_mark_pause(false),
_last_young_gc(false),
_last_gc_was_young(false),
// Incremental CSet attributes
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#endif // _MSC_VER
// add here any more surv rate groups
_survivors_age_table(true),
_gc_overhead_perc(0.0) {
// Set up the region size and associated fields. Given that the
// policy is created before the heap, we have to set this up here,
// so it's done as soon as possible.
if (PrintAdaptiveSizePolicy) {
// Currently, we only use a single switch for all the heuristics.
G1ErgoVerbose::set_enabled(true);
// Given that we don't currently have a verboseness level
// parameter, we'll hardcode this to high. This can be easily
// changed in the future.
} else {
G1ErgoVerbose::set_enabled(false);
}
// Verify PLAB sizes
}
// Below, we might need to calculate the pause time target based on
// the pause interval. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll
// arrange that the pause interval to be pause time target + 1 to
// ensure that a) the pause time target is maximized with respect to
// the pause interval and b) we maintain the invariant that pause
// time target < pause interval. If the user does not want this
// maximum flexibility, they will have to set the pause interval
// explicitly.
// First make sure that, if either parameter is set, its value is
// reasonable.
if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
if (MaxGCPauseMillis < 1) {
vm_exit_during_initialization("MaxGCPauseMillis should be "
"greater than 0");
}
}
if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
if (GCPauseIntervalMillis < 1) {
vm_exit_during_initialization("GCPauseIntervalMillis should be "
"greater than 0");
}
}
// Then, if the pause time target parameter was not set, set it to
// the default value.
if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
// The default pause time target in G1 is 200ms
} else {
// We do not allow the pause interval to be set without the
// pause time target
vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
"without setting MaxGCPauseMillis");
}
}
// Then, if the interval parameter was not set, set it according to
// the pause time target (this will also deal with the case when the
// pause time target is the default value).
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
}
// Finally, make sure that the two parameters are consistent.
if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
"MaxGCPauseMillis (%u) should be less than "
"GCPauseIntervalMillis (%u)",
}
// Put an artificial ceiling on this so that it's not set to a silly value.
if (confidence_perc > 100) {
confidence_perc = 100;
warning("G1ConfidencePercent is set to a value that is too large, "
"it's been updated to %u", confidence_perc);
}
// start conservatively (around 50ms is about right)
// _max_survivor_regions will be calculated by
// update_young_list_target_length() during initialization.
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
// Put an artificial ceiling on this so that it's not set to a silly value.
if (reserve_perc > 50) {
reserve_perc = 50;
warning("G1ReservePercent is set to a value that is too large, "
"it's been updated to %u", reserve_perc);
}
// This will be set when the heap is expanded
// for the first time during initialization.
_reserve_regions = 0;
}
if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified");
}
}
if (FLAG_IS_CMDLINE(NewRatio)) {
warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
} else {
_adaptive_size = false;
return;
}
}
if (FLAG_IS_CMDLINE(NewSize)) {
1U);
if (FLAG_IS_CMDLINE(MaxNewSize)) {
1U);
} else {
}
} else if (FLAG_IS_CMDLINE(MaxNewSize)) {
1U);
}
}
}
}
switch (_sizer_kind) {
case SizerDefaults:
break;
case SizerNewSizeOnly:
break;
case SizerMaxNewSizeOnly:
break;
case SizerMaxAndNewSize:
// Do nothing. Values set on the command line, don't update them at runtime.
break;
case SizerNewRatio:
break;
default:
}
assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
}
// Set aside an initial future to_space.
if (adaptive_young_list_length()) {
} else {
}
// We may immediately start allocating regions and placing them on the
// collection set list. Initialize the per-collection set info
}
// Create the jstat counters for the policy.
}
double base_time_ms,
double target_pause_time_ms) {
if (young_length >= base_free_regions) {
// end condition 1: not enough space for the young regions
return false;
}
if (pause_time_ms > target_pause_time_ms) {
// end condition 2: prediction is over the target pause time
return false;
}
// end condition 3: out-of-space (conservatively!)
return false;
}
// success!
return true;
}
// re-calculate the necessary reserve
// We use ceiling so that if reserve_regions_d is > 0.0 (but
// smaller than 1.0) we'll get 1.
}
if (adaptive_young_list_length()) {
} else {
// otherwise we don't have enough info to make the prediction
}
}
// make sure we don't go below any user-defined minimum bound
}
// Here, we might want to also take into account any additional
// constraints (i.e., user-defined minimum bound). Currently, we
// effectively don't set this bound.
return _young_gen_sizer->max_desired_young_length();
}
// if it's set to the default value (-1), we should predict it;
// otherwise, use the given value.
}
// Calculate the absolute and desired min bounds.
// This is how many young regions we already have (currently: the survivors).
// This is the absolute minimum young length, which ensures that we
// can allocate one eden region in the worst-case.
if (desired_min_length < absolute_min_length) {
}
// Calculate the absolute and desired max bounds.
// We will try our best not to "eat" into the reserve.
}
if (desired_max_length > absolute_max_length) {
}
if (adaptive_young_list_length()) {
if (gcs_are_young()) {
} else {
// Don't calculate anything and let the code below bound it to
// the desired_min_length, i.e., do the next GC as soon as
// possible to maximize how many old regions we can add to it.
}
} else {
// The user asked for a fixed young gen so we'll fix the young gen
// whether the next GC is young or mixed.
}
// Make sure we don't go over the desired max length, nor under the
// desired min length. In case they clash, desired_min_length wins
// which is why that test is second.
}
}
"we should be able to allocate at least one eden region");
}
// In case some edge-condition makes the desired max length too small...
if (desired_max_length <= desired_min_length) {
return desired_min_length;
}
// We'll adjust min_young_length and max_young_length not to include
// the already allocated young regions (i.e., so they reflect the
// min and max eden regions we'll allocate). The base_min_length
// will be reflected in the predictions by the
// survivor_regions_evac_time prediction.
double base_time_ms =
if (available_free_regions > _reserve_regions) {
}
// Here, we will make sure that the shortest young length that
// makes sense fits within the target pause time.
// The shortest young length will fit into the target pause time;
// we'll now check whether the absolute maximum number of young
// regions will fit in the target pause time. If not, we'll do
// a binary search between min_young_length and max_young_length.
// The maximum young length will fit into the target pause time.
// We are done so set min young length to the maximum length (as
// the result is assumed to be returned in min_young_length).
} else {
// The maximum possible number of young regions will not fit within
// the target pause time so we'll search for the optimal
// length. The loop invariants are:
//
// min_young_length < max_young_length
// min_young_length is known to fit into the target pause time
// max_young_length is known not to fit into the target pause time
//
// Going into the loop we know the above hold as we've just
// checked them. Every time around the loop we check whether
// the middle value between min_young_length and
// max_young_length fits into the target pause time. If it
// does, it becomes the new min. If it doesn't, it becomes
// the new max. This way we maintain the loop invariants.
while (diff > 0) {
} else {
}
}
// The results is min_young_length which, according to the
// loop invariants, should fit within the target pause time.
// These are the post-conditions of the binary search above:
"otherwise we should have discovered that max_young_length "
"fits into the pause target and not done the binary search");
"min_young_length, the result of the binary search, should "
"fit into the pause target");
"min_young_length, the result of the binary search, should be "
"optimal, so no larger length should fit into the pause target");
}
} else {
// Even the minimum length doesn't fit into the pause time
// target, return it as the result nevertheless.
}
return base_min_length + min_young_length;
}
double G1CollectorPolicy::predict_survivor_regions_evac_time() {
double survivor_regions_evac_time = 0.0;
for (HeapRegion * r = _recorded_survivor_head;
r = r->get_next_young_region()) {
}
return survivor_regions_evac_time;
}
if (rs_lengths > _rs_lengths_prediction) {
// add 10% to avoid having to recalculate often
}
}
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
guarantee(false, "Not using this policy feature yet.");
return NULL;
}
// This method controls how a collector handles one or more
// of its generations being fully allocated.
bool is_tlab) {
guarantee(false, "Not using this policy feature yet.");
return NULL;
}
#ifndef PRODUCT
return
// also call verify_young_ages on any additional surv rate groups
}
bool
bool ret = true;
ret = false;
}
if (surv_rate_group == group) {
if (age < 0) {
ret = false;
}
ret = false;
}
}
}
return ret;
}
#endif // PRODUCT
// Release the future to-space so that it is available for compaction into.
}
// Consider this like a collection pause for the purposes of allocation
// since last pause.
// transitions and make sure we start with young GCs after the Full GC.
set_gcs_are_young(true);
_last_young_gc = false;
_in_marking_window = false;
_in_marking_window_im = false;
// also call this on any additional surv rate groups
// Reset survivors SurvRateGroup.
}
}
// We only need to do this here as the policy will only be applied
// to the GC we're about to start. so, no point is calculating this
// every time we calculate / recalculate the target young length.
_stop_world_start = 0.0;
_last_gc_was_young = false;
// do that for any other surv rate groups
}
_during_marking = true;
}
_during_marking = false;
}
}
}
_last_young_gc = true;
_in_marking_window = false;
}
if (_stop_world_start > 0.0) {
}
}
return false;
}
if (gcs_are_young()) {
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
ergo_format_byte("occupancy")
ergo_format_byte("allocation request")
ergo_format_byte_perc("threshold")
ergo_format_str("source"),
(double) InitiatingHeapOccupancyPercent,
source);
return true;
} else {
"do not request concurrent cycle initiation",
ergo_format_reason("still doing mixed collections")
ergo_format_byte("occupancy")
ergo_format_byte("allocation request")
ergo_format_byte_perc("threshold")
ergo_format_str("source"),
(double) InitiatingHeapOccupancyPercent,
source);
}
}
return false;
}
// Anything below that is considered to be zero
void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
"otherwise, the subtraction below does not make sense");
bool last_pause_included_initial_mark = false;
#ifndef PRODUCT
if (G1YoungSurvRateVerbose) {
// do that for any other surv rate groups too
}
#endif // PRODUCT
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
}
end_time_sec, false);
double survival_fraction =
(double)surviving_bytes/
(double)_collection_set_bytes_used_before;
if (update_stats) {
// this is where we update the allocation rate of the application
double app_time_ms =
if (app_time_ms < MIN_TIMER_GRANULARITY) {
// This usually happens due to the timer not having the required
// granularity. Some Linuxes are the usual culprits.
// We'll just set it to something (arbitrarily) small.
app_time_ms = 1.0;
}
// We maintain the invariant that all objects allocated by mutator
// threads will be allocated out of eden regions. So, we can use
// the eden region number allocated since the previous GC to
// calculate the application's allocate rate. The only exception
// to that is humongous objects that are allocated separately. But
// given that humongous object allocations do not really affect
// either the pause's duration nor when the next pause will take
// place we can safely ignore them here.
double interval_ms =
if (recent_avg_pause_time_ratio() < 0.0 ||
#ifndef PRODUCT
// Dump info to allow post-facto debugging
// In debug mode, terminate the JVM if the user wants to debug at this point.
#endif // !PRODUCT
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
if (_recent_avg_pause_time_ratio < 0.0) {
_recent_avg_pause_time_ratio = 0.0;
} else {
_recent_avg_pause_time_ratio = 1.0;
}
}
}
bool new_in_marking_window_im = false;
if (during_initial_mark_pause()) {
new_in_marking_window = true;
new_in_marking_window_im = true;
}
if (_last_young_gc) {
// This is supposed to to be the "last young GC" before we start
// doing mixed GCs. Here we decide whether to start mixed GCs or not.
if (!last_pause_included_initial_mark) {
if (next_gc_should_be_mixed("start mixed GCs",
"do not start mixed GCs")) {
set_gcs_are_young(false);
}
} else {
"do not start mixed GCs",
ergo_format_reason("concurrent cycle is about to start"));
}
_last_young_gc = false;
}
if (!_last_gc_was_young) {
// This is a mixed GC. Here we decide whether to continue doing
// mixed GCs or not.
if (!next_gc_should_be_mixed("continue mixed GCs",
"do not continue mixed GCs")) {
set_gcs_are_young(true);
}
}
// do that for any other surv rate groupsx
if (update_stats) {
if (_pending_cards > 0) {
}
if (cards_scanned > 10) {
if (_last_gc_was_young) {
} else {
}
}
if (_max_rs_lengths > 0) {
double cards_per_entry_ratio =
(double) cards_scanned / (double) _max_rs_lengths;
if (_last_gc_was_young) {
} else {
}
}
// This is defensive. For a while _max_rs_lengths could get
// smaller than _recorded_rs_lengths which was causing
// rs_length_diff to get very large and mess up the RSet length
// predictions. The reason was unsafe concurrent updates to the
// _inc_cset_recorded_rs_lengths field which the code below guards
// against (see CR 7118202). This bug has now been fixed (see CR
// 7119027). However, I'm still worried that
// _inc_cset_recorded_rs_lengths might still end up somewhat
// inaccurate. The concurrent refinement thread calculates an
// RSet's length concurrently with other CR threads updating it
// which might cause it to calculate the length incorrectly (if,
// say, it's in mid-coarsening). So I'll leave in the defensive
// conditional below just in case.
if (_max_rs_lengths > _recorded_rs_lengths) {
}
if (copied_bytes > 0) {
if (_in_marking_window) {
} else {
}
}
if (young_cset_region_length() > 0) {
(double) young_cset_region_length());
}
if (old_cset_region_length() > 0) {
(double) old_cset_region_length());
}
if (_collection_set_bytes_used_before > 0) {
survival_ratio = (double) _bytes_copied_during_gc /
(double) _collection_set_bytes_used_before;
}
}
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
}
byte_size_in_proper_unit((double)(bytes)), \
}
}
}
double update_rs_processed_buffers,
double goal_ms) {
if (G1UseAdaptiveConcRefinement) {
int g = cg1r->green_zone();
if (update_rs_time > goal_ms) {
g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
} else {
}
}
// Change the refinement threads params
cg1r->set_green_zone(g);
cg1r->yellow_zone());
// Change the barrier params
}
} else {
}
}
double
return
}
double
if (gcs_are_young()) {
} else {
}
}
else {
}
return bytes_to_copy;
}
double
bool for_young_gc) {
// Predicting the number of cards is based on which type of GC
// we're predicting for.
if (for_young_gc) {
} else {
}
double region_elapsed_time_ms =
// The prediction of the "other" time for this region is based
// upon the region type and NOT the GC type.
} else {
}
return region_elapsed_time_ms;
}
void
}
}
double elapsed_ms) {
}
if (recent_gc_overhead > threshold) {
// We will double the existing space, or take
// G1ExpandByPercentOfAvailable % of the available expansion
// space, whichever is smaller, bounded below by a minimum
// expansion (unless that's all that's left.)
"attempt heap expansion",
ergo_format_reason("recent GC overhead higher than "
"threshold after GC")
ergo_format_perc("recent GC overhead")
ergo_format_perc("threshold")
ergo_format_byte("uncommitted")
ergo_format_byte_perc("calculated expansion amount"),
expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
return expand_bytes;
} else {
return 0;
}
}
}
#ifndef PRODUCT
// add this call for any other surv rate groups
#endif // PRODUCT
}
#ifndef PRODUCT
// for debugging, bit of a hack...
static char*
return buffer;
}
#endif // PRODUCT
switch (purpose) {
case GCAllocForSurvived:
return _max_survivor_regions;
case GCAllocForTenured:
return REGIONS_UNLIMITED;
default:
return REGIONS_UNLIMITED;
};
}
if (GCLockerEdenExpansionPercent > 0) {
// We use ceiling so that if expansion_region_num_d is > 0.0 (but
// less than 1.0) we'll get 1.
} else {
}
}
// Calculates survivor space parameters.
double max_survivor_regions_d =
(double) _young_list_target_length / (double) SurvivorRatio;
// We use ceiling so that if max_survivor_regions_d is > 0.0 (but
// smaller than 1.0) we'll get 1.
}
if (!during_cycle) {
"request concurrent cycle initiation",
ergo_format_reason("requested by GC cause")
ergo_format_str("GC cause"),
return true;
} else {
"do not request concurrent cycle initiation",
ergo_format_reason("concurrent cycle already in progress")
ergo_format_str("GC cause"),
return false;
}
}
void
// We are about to decide on whether this pause will be an
// initial-mark pause.
// First, during_initial_mark_pause() should not be already set. We
// will set it here if we have to. However, it should be cleared by
// the end of the pause (it's only set for the duration of an
// initial-mark pause).
if (initiate_conc_mark_if_possible()) {
// We had noticed on a previous pause that the heap occupancy has
// gone over the initiating threshold and we should start a
// concurrent marking cycle. So we might initiate one.
if (!during_cycle) {
// The concurrent marking thread is not "during a cycle", i.e.,
// it has completed the last one. So we can go ahead and
// initiate a new cycle.
// We do not allow mixed GCs during marking.
if (!gcs_are_young()) {
set_gcs_are_young(true);
"end mixed GCs",
ergo_format_reason("concurrent cycle is about to start"));
}
// And we can now clear initiate_conc_mark_if_possible() as
// we've already acted on it.
"initiate concurrent cycle",
ergo_format_reason("concurrent cycle initiation requested"));
} else {
// The concurrent marking thread is still finishing up the
// previous cycle. If we start one right now the two cycles
// overlap. In particular, the concurrent marking thread might
// be in the process of clearing the next marking bitmap (which
// we will use for the next cycle if we start one). Starting a
// cycle now will be bad given that parts of the marking
// information might get cleared by the marking thread. And we
// cannot wait for the marking thread to finish the cycle as it
// periodically yields while clearing the next marking bitmap
// and, if it's in a yield point, it's waiting for us to
// finish. So, at this point we will not start a cycle and we'll
// let the concurrent marking thread complete the last one.
"do not initiate concurrent cycle",
ergo_format_reason("concurrent cycle already in progress"));
}
}
}
public:
// We only include humongous regions in collection
// sets when concurrent mark shows that their contained object is
// unreachable.
// Do we have any marking information for this region?
if (r->is_marked()) {
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
_hrSorted->add_region(r);
}
}
return false;
}
};
public:
uint chunk_size) :
// Do we have any marking information for this region?
if (r->is_marked()) {
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
}
}
return false;
}
};
public:
AbstractGangTask("ParKnownGarbageTask"),
// Back to zero for the claim value.
}
};
void
if (G1CollectedHeap::use_parallel_gc_threads()) {
// The use of MinChunkSize = 8 in the original code
// causes some assertion failures when the total number of
// region is less than 8. The code here tries to fix that.
// Should the original code also be fixed?
if (no_of_gc_threads > 0) {
} else {
assert(no_of_gc_threads > 0,
"The active gc workers should be greater than 0");
// In a product build do something reasonable to avoid a crash.
WorkUnit =
}
WorkUnit);
(int) WorkUnit);
"sanity check");
} else {
}
}
// Add the heap region at the head of the non-incremental collection set
hr->set_in_collection_set(true);
_old_cset_region_length += 1;
}
// Initialize the per-collection-set information
_inc_cset_max_finger = 0;
}
// The two "main" fields, _inc_cset_recorded_rs_lengths and
// _inc_cset_predicted_elapsed_time_ms, are updated by the thread
// that adds a new region to the CSet. Further updates by the
// concurrent refinement thread that samples the young RSet lengths
// are accumulated in the *_diffs fields. Here we add the diffs to
// the "main" fields.
if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
} else {
// This is defensive. The diff should in theory be always positive
// as RSets can only grow between GCs. However, given that we
// sample their size concurrently with other threads updating them
// it's possible that we might get the wrong size back, which
// could make the calculations somewhat inaccurate.
if (_inc_cset_recorded_rs_lengths >= diffs) {
} else {
}
}
}
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
// evacuation pause,
// * adding the current allocation region to the incremental cset
// when it is retired, and
// * updating existing policy information for a region in the
// incremental cset via young list RSet sampling.
// Therefore this routine may be called at a safepoint by the
// VM thread, or in-between safepoints by mutator threads (when
// retiring the current allocation region) or a concurrent
// refine thread (RSet sampling).
// Cache the values we have added to the aggregated informtion
// in the heap region in case we have to remove this region from
// the incremental collection set, or it is updated by the
// rset sampling code
}
// Update the CSet information that is dependent on the new RS length
"should not be at a safepoint");
// We could have updated _inc_cset_recorded_rs_lengths and
// _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
// that atomically, as this code is executed by a concurrent
// refinement thread, potentially concurrently with a mutator thread
// allocating a new region and also updating the same fields. To
// avoid the atomic operations we accumulate these updates on two
// separate fields (*_diffs) and we'll just add them to the "main"
// fields at the start of a GC.
}
// information in the heap region here (before the region gets added
// to the collection set). An individual heap region's cached values
// are calculated, aggregated with the policy collection set info,
// and cached in the heap region here (initially) and (subsequently)
// by the Young List sampling code.
hr->set_in_collection_set(true);
}
// Add the region at the RHS of the incremental cset
// We should only ever be appending survivors at the end of a pause
// Do the 'common' stuff
// Now add the region at the right hand side
if (_inc_cset_tail == NULL) {
_inc_cset_head = hr;
} else {
}
_inc_cset_tail = hr;
}
// Add the region to the LHS of the incremental cset
// Survivors should be added to the RHS at the end of a pause
// Do the 'common' stuff
// Add the region at the left hand side
if (_inc_cset_head == NULL) {
_inc_cset_tail = hr;
}
_inc_cset_head = hr;
}
#ifndef PRODUCT
}
}
#endif // !PRODUCT
// Returns the given amount of reclaimable bytes (that represents
// the amount of reclaimable space still to be collected) as a
// percentage of the current heap capacity.
}
const char* false_action_str) {
if (cset_chooser->is_empty()) {
ergo_format_reason("candidate old regions not available"));
return false;
}
// Is the amount of uncollected reclaimable space above G1HeapWastePercent?
if (reclaimable_perc <= threshold) {
ergo_format_reason("reclaimable percentage not over threshold")
ergo_format_region("candidate old regions")
ergo_format_byte_perc("reclaimable")
ergo_format_perc("threshold"),
return false;
}
ergo_format_reason("candidate old regions available")
ergo_format_region("candidate old regions")
ergo_format_byte_perc("reclaimable")
ergo_format_perc("threshold"),
return true;
}
// The min old CSet region bound is based on the maximum desired
// number of mixed GCs after a cycle. I.e., even if some old regions
// look expensive, we should add them to the CSet anyway to make
// sure we go through the available old regions in no more than the
// maximum desired number of mixed GCs.
//
// The calculation is based on the number of marked regions we added
// to the CSet chooser in the first place, not how many remain, so
// that the result is the same during all mixed GCs that follow a cycle.
// emulate ceiling
result += 1;
}
}
// The max old CSet region bound is based on the threshold expressed
// as a percentage of the heap size. I.e., it should bound the
// number of old regions added to the CSet irrespective of how many
// of them are available.
// emulate ceiling
result += 1;
}
}
void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
err_msg("target_pause_time_ms = %1.6lf should be positive",
"start choosing CSet",
ergo_format_size("_pending_cards")
ergo_format_ms("predicted base time")
ergo_format_ms("remaining time")
ergo_format_ms("target pause time"),
_last_gc_was_young = gcs_are_young() ? true : false;
if (_last_gc_was_young) {
} else {
}
// The young list is laid with the survivor regions from the previous
// pause are appended to the RHS of the young list, i.e.
// [Newly Young Regions ++ Survivors from last pause].
}
// Clear the fields that point to the survivor list - they are all young now.
"add young regions to CSet",
ergo_format_region("eden")
ergo_format_region("survivors")
ergo_format_ms("predicted young region time"),
// The number of recorded young regions is the incremental
// collection set's current size
phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
// Set the start of the non-young choice time.
if (!gcs_are_young()) {
cset_chooser->verify();
if (old_cset_region_length() >= max_old_cset_length) {
// Added maximum number of old regions to the CSet.
"finish adding old regions to CSet",
ergo_format_reason("old CSet region num reached max")
ergo_format_region("old")
ergo_format_region("max"),
break;
}
// Stop adding regions if the remaining reclaimable space is
// not above G1HeapWastePercent.
if (reclaimable_perc <= threshold) {
// We've added enough old regions that the amount of uncollected
// reclaimable space is at or below the waste threshold. Stop
// adding old regions to the CSet.
"finish adding old regions to CSet",
ergo_format_reason("reclaimable percentage not over threshold")
ergo_format_region("old")
ergo_format_region("max")
ergo_format_byte_perc("reclaimable")
ergo_format_perc("threshold"),
break;
}
if (check_time_remaining) {
if (predicted_time_ms > time_remaining_ms) {
// Too expensive for the current CSet.
if (old_cset_region_length() >= min_old_cset_length) {
// We have added the minimum number of old regions to the CSet,
// we are done with this CSet.
"finish adding old regions to CSet",
ergo_format_reason("predicted time is too high")
ergo_format_ms("predicted time")
ergo_format_ms("remaining time")
ergo_format_region("old")
ergo_format_region("min"),
break;
}
// We'll add it anyway given that we haven't reached the
// minimum number of old regions.
expensive_region_num += 1;
}
} else {
if (old_cset_region_length() >= min_old_cset_length) {
// In the non-auto-tuning case, we'll finish adding regions
// to the CSet if we reach the minimum.
"finish adding old regions to CSet",
ergo_format_reason("old CSet region num reached min")
ergo_format_region("old")
ergo_format_region("min"),
break;
}
}
// We will add this region to the CSet.
}
"finish adding old regions to CSet",
ergo_format_reason("candidate old regions not available"));
}
if (expensive_region_num > 0) {
// We print the information once here at the end, predicated on
// whether we added any apparently expensive regions or not, to
// avoid generating output per region.
"added expensive regions to CSet",
ergo_format_reason("old CSet region num not reached min")
ergo_format_region("old")
ergo_format_region("expensive")
ergo_format_region("min")
ergo_format_ms("remaining time"),
}
cset_chooser->verify();
}
"finish choosing CSet",
ergo_format_region("eden")
ergo_format_region("survivors")
ergo_format_region("old")
ergo_format_ms("predicted pause time")
ergo_format_ms("target pause time"),
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
}
if(TraceGen0Time) {
}
}
if(TraceGen0Time) {
}
}
if(TraceGen0Time) {
}
}
if(TraceGen0Time) {
}
}
if(TraceGen0Time) {
}
}
}
}
if (!TraceGen0Time) {
return;
}
if (_young_pause_num == 0 && _mixed_pause_num == 0) {
} else {
}
}
if (TraceGen1Time) {
}
}
if (!TraceGen1Time) {
return;
}
if (_all_full_gc_times.num() > 0) {
}
}