g1CollectorPolicy.cpp revision 1879
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "runtime/arguments.hpp"
#include "runtime/mutexLocker.hpp"
#define PREDICTIONS_VERBOSE 0
// <NEW PREDICTION>
// Different defaults for different number of GC threads
// They were chosen by running GCOld and SPECjbb on debris with different
// numbers of GC threads and choosing them based on the results
// all the same
static double rs_length_diff_defaults[] = {
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
};
static double cost_per_card_ms_defaults[] = {
0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
};
// all the same
static double fully_young_cards_per_entry_ratio_defaults[] = {
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
};
static double cost_per_entry_ms_defaults[] = {
0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
};
static double cost_per_byte_ms_defaults[] = {
0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
};
// these should be pretty consistent
static double constant_other_time_ms_defaults[] = {
5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
};
static double young_other_cost_per_region_ms_defaults[] = {
0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
};
static double non_young_other_cost_per_region_ms_defaults[] = {
1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
};
// </NEW PREDICTION>
? ParallelGCThreads : 1),
_n_pauses(0),
_all_pause_times_ms(new NumberSeq()),
_stop_world_start(0.0),
_all_stop_world_times_ms(new NumberSeq()),
_all_yield_times_ms(new NumberSeq()),
_all_mod_union_times_ms(new NumberSeq()),
#ifndef PRODUCT
_cur_clear_ct_time_ms(0.0),
_min_clear_cc_time_ms(-1.0),
_max_clear_cc_time_ms(-1.0),
_cur_clear_cc_time_ms(0.0),
_cum_clear_cc_time_ms(0.0),
_num_cc_clears(0L),
#endif
_aux_num(10),
_cur_aux_start_times_ms(new double[_aux_num]),
_cur_aux_times_ms(new double[_aux_num]),
_cur_aux_times_set(new bool[_aux_num]),
// <NEW PREDICTION>
new TruncatedSeq(TruncatedSeqLength)),
new TruncatedSeq(TruncatedSeqLength)),
_pause_time_target_ms((double) MaxGCPauseMillis),
// </NEW PREDICTION>
_in_young_gc_mode(false),
_full_young_gcs(true),
_during_marking(false),
_in_marking_window(false),
_in_marking_window_im(false),
_known_garbage_ratio(0.0),
_num_markings(0),
_n_marks(0),
_all_full_gc_times_ms(new NumberSeq()),
// G1PausesBtwnConcMark defaults to -1
// so the hack is to do the cast QQQ FIXME
_initiate_conc_mark_if_possible(false),
_during_initial_mark_pause(false),
_should_revert_to_full_young_gcs(false),
_last_full_young_gc(false),
// Incremental CSet attributes
_inc_cset_size(0),
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#endif // _MSC_VER
// add here any more surv rate groups
_survivors_age_table(true),
_gc_overhead_perc(0.0)
{
// Set up the region size and associated fields. Given that the
// policy is created before the heap, we have to set this up here,
// so it's done as soon as possible.
// Verify PLAB sizes
char buffer[128];
}
_par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
_par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
_par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
_par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
_par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
_par_last_termination_times_ms = new double[_parallel_gc_threads];
_par_last_termination_attempts = new double[_parallel_gc_threads];
_par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
// start conservatively
// <NEW PREDICTION>
int index;
if (ParallelGCThreads == 0)
index = 0;
else if (ParallelGCThreads > 8)
index = 7;
else
// </NEW PREDICTION>
// Below, we might need to calculate the pause time target based on
// the pause interval. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll
// arrange that the pause interval to be pause time target + 1 to
// ensure that a) the pause time target is maximized with respect to
// the pause interval and b) we maintain the invariant that pause
// time target < pause interval. If the user does not want this
// maximum flexibility, they will have to set the pause interval
// explicitly.
// First make sure that, if either parameter is set, its value is
// reasonable.
if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
if (MaxGCPauseMillis < 1) {
vm_exit_during_initialization("MaxGCPauseMillis should be "
"greater than 0");
}
}
if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
if (GCPauseIntervalMillis < 1) {
vm_exit_during_initialization("GCPauseIntervalMillis should be "
"greater than 0");
}
}
// Then, if the pause time target parameter was not set, set it to
// the default value.
if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
// The default pause time target in G1 is 200ms
} else {
// We do not allow the pause interval to be set without the
// pause time target
vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
"without setting MaxGCPauseMillis");
}
}
// Then, if the interval parameter was not set, set it according to
// the pause time target (this will also deal with the case when the
// pause time target is the default value).
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
}
// Finally, make sure that the two parameters are consistent.
if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
char buffer[256];
"MaxGCPauseMillis (%u) should be less than "
"GCPauseIntervalMillis (%u)",
}
// start conservatively (around 50ms is about right)
// if G1FixedSurvivorSpaceSize is 0 which means the size is not
// fixed, then _max_survivor_regions will be calculated at
// calculate_young_list_target_length during initialization
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
}
// Increment "i", mod "len"
i++; if (i == len) i = 0;
}
void G1CollectorPolicy::initialize_flags() {
if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified");
}
}
// The easiest way to deal with the parsing of the NewSize /
// MaxNewSize / etc. parameteres is to re-use the code in the
// TwoGenerationCollectorPolicy class. This is similar to what
// ParallelScavenge does with its GenerationSizer class (see
// ParallelScavengeHeap::initialize()). We might change this in the
// future, but it's a good start.
class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
}
public:
G1YoungGenSizer() {
}
return size_to_region_num(_min_gen0_size);
}
return size_to_region_num(_initial_gen0_size);
}
return size_to_region_num(_max_gen0_size);
}
};
void G1CollectorPolicy::init() {
// Set aside an initial future to_space.
if (G1Gen) {
_in_young_gc_mode = true;
if (UseAdaptiveSizePolicy) {
} else {
set_adaptive_young_list_length(false);
}
} else {
_in_young_gc_mode = false;
}
// We may immediately start allocating regions and placing them on the
// collection set list. Initialize the per-collection set info
}
// Create the jstat counters for the policy.
{
}
if (!adaptive_young_list_length())
return;
double alloc_rate_ms = predict_alloc_rate_ms();
}
}
if (adaptive_young_list_length()) {
} else {
if (full_young_gcs())
else
}
}
if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
// we are in fully-young mode and there are free regions in the heap
double survivor_regions_evac_time =
// the result
size_t final_young_length = 0;
// if we're still under the pause target...
if (base_time_ms <= target_pause_time_ms) {
// We make sure that the shortest young length that makes sense
// fits within the target pause time.
// The shortest young length will fit within the target pause time;
// we'll now check whether the absolute maximum number of young
// regions will fit in the target pause time. If not, we'll do
// a binary search between min_young_length and max_young_length
if (max_young_length > min_young_length) {
// Let's check if the initial max young length will fit within the
// target pause. If so then there is no need to search for a maximal
// young length - we'll return the initial maximum
// The maximum young length will satisfy the target pause time.
// We are done so set min young length to this maximum length.
// The code after the loop will then set final_young_length using
// the value cached in the minimum length.
} else {
// The maximum possible number of young regions will not fit within
// the target pause time so let's search....
while (max_young_length > min_young_length) {
// The current max young length will fit within the target
// pause time. Note we do not exit the loop here. By setting
// min = max, and then increasing the max below means that
// we will continue searching for an upper bound in the
// range [max..max+diff]
}
}
// the above loop found a maximal young length that will fit
// within the target pause time.
}
}
}
}
// and we're done!
// we should have at least one region in the target young length
// let's keep an eye of how long we spend on this calculation
// right now, I assume that we'll print it when we need it; we
// should really adde it to the breakdown of a pause
#ifdef TRACE_CALC_YOUNG_LENGTH
// leave this in for debugging, just in case
#endif // TRACE_CALC_YOUNG_LENGTH
// bummer; this means that, if we do a pause when the maximal
// length dictates, we'll violate the pause spacing target (the
// min length was calculate based on the application's current
// alloc rate);
// so, we have to bite the bullet, and allocate the minimum
// number. We'll violate our target, but we just can't meet it.
#ifdef TRACE_CALC_YOUNG_LENGTH
// leave this in for debugging, just in case
#endif // TRACE_CALC_YOUNG_LENGTH
}
} else {
// we are in a partially-young mode or we've run out of regions (due
// to evacuation failure)
#ifdef TRACE_CALC_YOUNG_LENGTH
// leave this in for debugging, just in case
#endif // TRACE_CALC_YOUNG_LENGTH
// we'll do the pause as soon as possible by choosing the minimum
}
}
// This is used by: calculate_young_list_target_length(rs_length). It
// returns true iff:
// the predicted pause time for the given young list will not overflow
// the target pause time
// and:
// the predicted amount of surviving data will not overflow the
// the amount of free space available for survivor regions.
//
bool
double base_time_ms,
double target_pause_time_ms) {
if (young_length >= init_free_regions)
// end condition 1: not enough space for the young regions
return false;
double accum_surv_rate_adj = 0.0;
double accum_surv_rate =
double young_other_time_ms =
double pause_time_ms =
// end condition 2: over the target pause time
return false;
// end condition 3: out of to-space (conservatively)
return false;
// success!
return true;
}
double G1CollectorPolicy::predict_survivor_regions_evac_time() {
double survivor_regions_evac_time = 0.0;
for (HeapRegion * r = _recorded_survivor_head;
r = r->get_next_young_region()) {
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
}
return survivor_regions_evac_time;
}
void G1CollectorPolicy::check_prediction_validity() {
if (rs_lengths > _rs_lengths_prediction) {
// add 10% to avoid having to recalculate often
}
}
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
guarantee(false, "Not using this policy feature yet.");
return NULL;
}
// This method controls how a collector handles one or more
// of its generations being fully allocated.
bool is_tlab) {
guarantee(false, "Not using this policy feature yet.");
return NULL;
}
#ifndef PRODUCT
bool G1CollectorPolicy::verify_young_ages() {
return
// also call verify_young_ages on any additional surv rate groups
}
bool
bool ret = true;
int prev_age = -1;
ret = false;
}
if (surv_rate_group == group) {
if (age < 0) {
ret = false;
}
ret = false;
}
}
}
return ret;
}
#endif // PRODUCT
// Release the future to-space so that it is available for compaction into.
}
void G1CollectorPolicy::record_full_collection_end() {
// Consider this like a collection pause for the purposes of allocation
// since last pause.
// transitions and make sure we start with fully young GCs after the
// Full GC.
set_full_young_gcs(true);
_last_full_young_gc = false;
_should_revert_to_full_young_gcs = false;
_known_garbage_bytes = 0;
_known_garbage_ratio = 0.0;
_in_marking_window = false;
_in_marking_window_im = false;
// also call this on any additional surv rate groups
// Reset survivors SurvRateGroup.
}
}
}
void G1CollectorPolicy::record_stop_world_start() {
}
size_t start_used) {
if (PrintGCDetails) {
if (in_young_gc_mode())
}
"sanity");
_stop_world_start = 0.0;
#ifdef DEBUG
// initialise these to something well known so that we can spot
// if they are not set properly
for (int i = 0; i < _parallel_gc_threads; ++i) {
_par_last_gc_worker_start_times_ms[i] = -1234.0;
_par_last_ext_root_scan_times_ms[i] = -1234.0;
_par_last_mark_stack_scan_times_ms[i] = -1234.0;
_par_last_update_rs_times_ms[i] = -1234.0;
_par_last_update_rs_processed_buffers[i] = -1234.0;
_par_last_scan_rs_times_ms[i] = -1234.0;
_par_last_obj_copy_times_ms[i] = -1234.0;
_par_last_termination_times_ms[i] = -1234.0;
_par_last_termination_attempts[i] = -1234.0;
_par_last_gc_worker_end_times_ms[i] = -1234.0;
}
#endif
for (int i = 0; i < _aux_num; ++i) {
_cur_aux_times_ms[i] = 0.0;
_cur_aux_times_set[i] = false;
}
_satb_drain_time_set = false;
if (in_young_gc_mode())
_last_young_gc_full = false;
// do that for any other surv rate groups
}
}
}
void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
_during_marking = true;
}
}
_during_marking = false;
}
}
}
void
}
void
if (G1PolicyVerbose > 0)
}
// The important thing about this is that it includes "os::elapsedTime".
// We did a marking, so reset the "since_last_mark" variables.
double considerConcMarkCost = 1.0;
// If there are available processors, concurrent activity is free...
os::active_processor_count()) {
considerConcMarkCost = 0.0;
}
}
void
if (in_young_gc_mode()) {
_should_revert_to_full_young_gcs = false;
_last_full_young_gc = true;
_in_marking_window = false;
if (adaptive_young_list_length())
}
}
void G1CollectorPolicy::record_concurrent_pause() {
if (_stop_world_start > 0.0) {
}
}
void G1CollectorPolicy::record_concurrent_pause_end() {
}
}
}
template<class T>
T sum = (T)0;
for (int i = 0; i < n; i++) {
int j = (start + i) % N;
}
return sum;
}
const char* str,
double* data,
bool summary) {
double total = 0.0;
int j;
for (j = 0; j < level; ++j)
for (uint i = 0; i < ParallelGCThreads; ++i) {
}
if (summary) {
for (j = 0; j < level; ++j)
}
}
const char* str,
double* data,
bool summary) {
double total = 0.0;
int j;
for (j = 0; j < level; ++j)
for (uint i = 0; i < ParallelGCThreads; ++i) {
}
if (summary) {
for (j = 0; j < level; ++j)
}
}
const char* str,
double value) {
for (int j = 0; j < level; ++j)
}
const char* str,
int value) {
for (int j = 0; j < level; ++j)
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
double ret = 0.0;
for (uint i = 0; i < ParallelGCThreads; ++i)
return ret / (double) ParallelGCThreads;
} else {
return data[0];
}
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
return ret;
} else {
return data[0];
}
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
double sum = 0.0;
for (uint i = 0; i < ParallelGCThreads; i++)
return sum;
} else {
return data[0];
}
}
double* data2) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
}
}
return ret;
}
// Anything below that is considered to be zero
#define MIN_TIMER_GRANULARITY 0.0000001
void G1CollectorPolicy::record_collection_pause_end() {
double elapsed_ms = _last_pause_time_ms;
bool last_pause_included_initial_mark = false;
#ifndef PRODUCT
if (G1YoungSurvRateVerbose) {
// do that for any other surv rate groups too
}
#endif // PRODUCT
if (in_young_gc_mode()) {
if (cur_used_bytes > min_used_targ &&
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
}
}
}
end_time_sec, false);
"Negative RS size?");
// This assert is exempted when we're doing parallel collection pauses,
// because the fragmentation caused by the parallel GC allocation buffers
// can lead to more memory being used during collection than was used
// before. Best leave this out until the fragmentation problem is fixed.
// Pauses in which evacuation failed can also lead to negative
// collections, since no space is reclaimed from a region containing an
// object whose evacuation failed.
// Further, we're now always doing parallel collection. But I'm still
// leaving this here as a placeholder for a more precise assertion later.
// (DLD, 10/05.)
|| _g1->evacuation_failed()
"Negative collection");
double survival_fraction =
(double)surviving_bytes/
(double)_collection_set_bytes_used_before;
_n_pauses++;
if (update_stats) {
// We exempt parallel collection from this check because Alloc Buffer
// fragmentation can produce negative collections. Same with evac
// failure.
// Further, we're now always doing parallel collection. But I'm still
// leaving this here as a placeholder for a more precise assertion later.
// (DLD, 10/05.
|| _g1->evacuation_failed()
"Or else negative collection!");
// this is where we update the allocation rate of the application
double app_time_ms =
if (app_time_ms < MIN_TIMER_GRANULARITY) {
// This usually happens due to the timer not having the required
// granularity. Some Linuxes are the usual culprits.
// We'll just set it to something (arbitrarily) small.
app_time_ms = 1.0;
}
double interval_ms =
if (recent_avg_pause_time_ratio() < 0.0 ||
#ifndef PRODUCT
// Dump info to allow post-facto debugging
// In debug mode, terminate the JVM if the user wants to debug at this point.
#endif // !PRODUCT
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
if (_recent_avg_pause_time_ratio < 0.0) {
_recent_avg_pause_time_ratio = 0.0;
} else {
_recent_avg_pause_time_ratio = 1.0;
}
}
}
if (G1PolicyVerbose > 1) {
}
double update_rs_processed_buffers =
double parallel_other_time = _cur_collection_par_time_ms -
if (update_stats) {
if (_satb_drain_time_set)
else
if (parallel) {
}
}
if (G1PolicyVerbose > 1) {
" CH Strong: %10.6f ms (avg: %10.6f ms)\n"
" G1 Strong: %10.6f ms (avg: %10.6f ms)\n"
" Evac: %10.6f ms (avg: %10.6f ms)\n"
" ET-RS: %10.6f ms (avg: %10.6f ms)\n"
" |RS|: " SIZE_FORMAT,
rs_size);
" survival : %6.2f%% (%6.2f%% avg)",
survival_fraction*100.0,
recent_avg_survival_fraction()*100.0);
recent_avg_pause_time_ratio() * 100.0);
}
double other_time_ms = elapsed_ms;
if (_satb_drain_time_set) {
}
if (parallel) {
} else {
}
if (PrintGCDetails) {
elapsed_ms / 1000.0);
if (_satb_drain_time_set) {
}
if (_last_satb_drain_processed_buffers >= 0) {
}
if (parallel) {
_par_last_gc_worker_start_times_ms, false);
_par_last_termination_attempts, true);
_par_last_gc_worker_end_times_ms, false);
} else {
(int)update_rs_processed_buffers);
}
#ifndef PRODUCT
if (_num_cc_clears > 0) {
}
#endif
for (int i = 0; i < _aux_num; ++i) {
if (_cur_aux_times_set[i]) {
char buffer[96];
}
}
}
if (PrintGCDetails)
if (PrintGC || PrintGCDetails)
if (PrintGCDetails)
if (update_stats) {
}
for (int i = 0; i < _aux_num; ++i)
if (_cur_aux_times_set[i])
// Reset marks-between-pauses counter.
// Update the efficiency-since-mark vars.
if (elapsed_ms < MIN_TIMER_GRANULARITY) {
// This usually happens due to the timer not having the required
// granularity. Some Linuxes are the usual culprits.
// We'll just set it to something (arbitrarily) small.
proc_ms = 1.0;
}
bool new_in_marking_window_im = false;
if (during_initial_mark_pause()) {
new_in_marking_window = true;
new_in_marking_window_im = true;
}
if (in_young_gc_mode()) {
if (_last_full_young_gc) {
set_full_young_gcs(false);
_last_full_young_gc = false;
}
if ( !_last_young_gc_full ) {
if ( _should_revert_to_full_young_gcs ||
_known_garbage_ratio < 0.05 ||
set_full_young_gcs(true);
}
}
_should_revert_to_full_young_gcs = false;
if (_last_young_gc_full && !_during_marking)
}
// do that for any other surv rate groupsx
// <NEW PREDICTION>
if (update_stats) {
double pause_time_ms = elapsed_ms;
if (_max_pending_cards >= _pending_cards)
double cost_per_card_ms = 0.0;
if (_pending_cards > 0) {
}
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
if (_last_young_gc_full)
else
}
if (_max_rs_lengths > 0) {
double cards_per_entry_ratio =
(double) cards_scanned / (double) _max_rs_lengths;
if (_last_young_gc_full)
else
}
if (rs_length_diff >= 0)
double cost_per_byte_ms = 0.0;
if (copied_bytes > 0) {
if (_in_marking_window)
else
}
double all_other_time_ms = pause_time_ms -
double young_other_time_ms = 0.0;
if (_recorded_young_regions > 0) {
(double) _recorded_young_regions);
}
double non_young_other_time_ms = 0.0;
if (_recorded_non_young_regions > 0) {
(double) _recorded_non_young_regions);
}
double constant_other_time_ms = all_other_time_ms -
double survival_ratio = 0.0;
if (_bytes_in_collection_set_before_gc > 0) {
survival_ratio = (double) bytes_in_to_space_during_gc() /
(double) _bytes_in_collection_set_before_gc;
}
double expensive_region_limit_ms =
(double) MaxGCPauseMillis - predict_constant_other_time_ms();
if (expensive_region_limit_ms < 0.0) {
// this means that the other time was predicted to be longer than
// than the max pause time
expensive_region_limit_ms = (double) MaxGCPauseMillis;
}
if (PREDICTIONS_VERBOSE) {
"REGIONS %d %d %d "
"PENDING_CARDS %d %d "
"CARDS_SCANNED %d %d "
"RS_LENGTHS %d %d "
"RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
"SURVIVAL_RATIO %1.6lf %1.6lf "
"OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
"OTHER_YOUNG %1.6lf %1.6lf "
"OTHER_NON_YOUNG %1.6lf %1.6lf "
"VTIME_DIFF %1.6lf TERMINATION %1.6lf "
"ELAPSED %1.6lf %1.6lf ",
(!_last_young_gc_full) ? 2 :
(last_pause_included_initial_mark) ? 1 : 0,
}
if (G1PolicyVerbose > 0) {
}
}
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
// </NEW PREDICTION>
}
// <NEW PREDICTION>
double update_rs_processed_buffers,
double goal_ms) {
if (G1UseAdaptiveConcRefinement) {
int g = cg1r->green_zone();
if (update_rs_time > goal_ms) {
g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
} else {
}
}
// Change the refinement threads params
cg1r->set_green_zone(g);
cg1r->yellow_zone());
// Change the barrier params
}
} else {
}
}
double
if (young_num == 0)
return 0.0;
young_num += adjustment;
if (full_young_gcs())
else
double accum_yg_surv_rate =
return
}
double
if (full_young_gcs())
else
}
double
return
}
double
bool young) {
if (full_young_gcs())
else
double region_elapsed_time_ms =
if (young)
else
return region_elapsed_time_ms;
}
else {
"invariant" );
}
return bytes_to_copy;
}
void
_recorded_rs_lengths = 0;
#endif // PREDICTIONS_VERBOSE
}
void
if (!young) {
}
#endif // PREDICTIONS_VERBOSE
}
void
record_cset_region_info(hr, false);
}
void
}
#endif // PREDICTIONS_VERBOSE
}
}
}
void
// The _predicted_pause_time_ms field is referenced in code
// not under PREDICTIONS_VERBOSE. Let's initialize it.
_predicted_pause_time_ms = -1.0;
if (full_young_gcs())
else
#endif // PREDICTIONS_VERBOSE
}
void G1CollectorPolicy::check_if_region_is_too_expensive(double
// I don't think we need to do this when in young GC mode since
// marking will be initiated next time we hit the soft limit anyway...
if (!in_young_gc_mode()) {
set_full_young_gcs(true);
// We might want to do something different here. However,
// right now we don't support the non-generational G1 mode
// (and in fact we are planning to remove the associated code,
// see CR 6814390). So, let's leave it as is and this will be
// removed some time in the future
} else
// no point in doing another partial one
_should_revert_to_full_young_gcs = true;
}
}
// </NEW PREDICTION>
double elapsed_ms) {
}
double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
else return _recent_pause_times_ms->avg();
}
double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
if (_recent_CH_strong_roots_times_ms->num() == 0)
return (double)MaxGCPauseMillis/3.0;
else return _recent_CH_strong_roots_times_ms->avg();
}
double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
if (_recent_G1_strong_roots_times_ms->num() == 0)
return (double)MaxGCPauseMillis/3.0;
else return _recent_G1_strong_roots_times_ms->avg();
}
double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
else return _recent_evac_times_ms->avg();
}
int G1CollectorPolicy::number_of_recent_gcs() {
return _recent_pause_times_ms->num();
}
double G1CollectorPolicy::recent_avg_survival_fraction() {
}
double G1CollectorPolicy::last_survival_fraction() {
}
double
TruncatedSeq* before) {
// We exempt parallel collection from this check because Alloc Buffer
// fragmentation can produce negative collections.
// Further, we're now always doing parallel collection. But I'm still
// leaving this here as a placeholder for a more precise assertion later.
// (DLD, 10/05.)
_g1->evacuation_failed() ||
return recent_survival_rate;
} else {
return 1.0; // Be conservative.
}
}
double
TruncatedSeq* before) {
// We exempt parallel collection from this check because Alloc Buffer
// fragmentation can produce negative collections.
// Further, we're now always doing parallel collection. But I'm still
// leaving this here as a placeholder for a more precise assertion later.
// (DLD, 10/05.)
return last_survival_rate;
} else {
return 1.0;
}
}
static const int survival_min_obs = 5;
static const double min_survival_rate = 0.1;
double
double latest) {
if (number_of_recent_gcs() < survival_min_obs) {
}
// In the parallel case, LAB fragmentation can produce "negative
// collections"; so can evac failure. Cap at 1.0
return res;
}
// We will double the existing space, or take
// G1ExpandByPercentOfAvailable % of the available expansion
// space, whichever is smaller, bounded below by a minimum
// expansion (unless that's all that's left.)
if (G1PolicyVerbose > 1) {
"committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
" Answer = %d.\n",
}
return expand_bytes;
} else {
return 0;
}
}
void G1CollectorPolicy::note_start_of_mark_thread() {
}
class CountCSClosure: public HeapRegionClosure {
public:
_g1_policy(g1_policy) {}
bool doHeapRegion(HeapRegion* r) {
return false;
}
};
void G1CollectorPolicy::count_CS_bytes_used() {
CountCSClosure cs_closure(this);
}
static void print_indent(int level) {
for (int j = 0; j < level+1; ++j)
}
const char* str,
}
const char* str,
}
NumberSeq* calc_other_times_ms) const {
bool should_print = false;
if (sum_ratio > 1.1) {
should_print = true;
}
if (avg_ratio > 1.1) {
should_print = true;
}
}
}
should_print = true;
}
should_print = true;
}
if (should_print)
}
if (body_summary != NULL) {
if (parallel) {
{
NumberSeq* other_parts[] = {
};
6, other_parts);
}
} else {
}
}
{
if (body_summary != NULL) {
if (parallel) {
// parallel
NumberSeq* other_parts[] = {
};
3, other_parts);
} else {
// serial
NumberSeq* other_parts[] = {
};
6, other_parts);
}
}
}
} else {
print_indent(0);
}
}
void G1CollectorPolicy::print_tracing_info() const {
if (TraceGen0Time) {
for (int i = 0; i < _aux_num; ++i) {
if (_all_aux_times_ms[i].num() > 0) {
char buffer[96];
}
}
"Tenured %8d (%6.2lf%%)",
}
if (TraceGen1Time) {
if (_all_full_gc_times_ms->num() > 0) {
}
}
}
void G1CollectorPolicy::print_yg_surv_rate_info() const {
#ifndef PRODUCT
// add this call for any other surv rate groups
#endif // PRODUCT
}
bool
bool ret;
if (G1FixedEdenSize) {
}
if (young_list_length < young_list_max_length) {
ret = true;
} else {
ret = false;
}
return ret;
}
#ifndef PRODUCT
// for debugging, bit of a hack...
static char*
region_num_to_mbs(int length) {
static char buffer[64];
return buffer;
}
#endif // PRODUCT
switch (purpose) {
case GCAllocForSurvived:
return _max_survivor_regions;
case GCAllocForTenured:
return REGIONS_UNLIMITED;
default:
return REGIONS_UNLIMITED;
};
}
// Calculates survivor space parameters.
{
if (G1FixedSurvivorSpaceSize == 0) {
} else {
}
if (G1FixedTenuringThreshold) {
} else {
}
}
bool
word_size) {
if (G1FixedEdenSize) {
}
if (in_young_gc_mode()) {
if (reached_target_length) {
"invariant" );
return true;
}
} else {
guarantee( false, "should not reach here" );
}
return false;
}
#ifndef PRODUCT
class HRSortIndexIsOKClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
if (!r->continuesHumongous()) {
}
return false;
}
};
return true;
}
#endif
bool
if (!during_cycle) {
return true;
} else {
return false;
}
}
void
// We are about to decide on whether this pause will be an
// initial-mark pause.
// First, during_initial_mark_pause() should not be already set. We
// will set it here if we have to. However, it should be cleared by
// the end of the pause (it's only set for the duration of an
// initial-mark pause).
if (initiate_conc_mark_if_possible()) {
// We had noticed on a previous pause that the heap occupancy has
// gone over the initiating threshold and we should start a
// concurrent marking cycle. So we might initiate one.
if (!during_cycle) {
// The concurrent marking thread is not "during a cycle", i.e.,
// it has completed the last one. So we can go ahead and
// initiate a new cycle.
// And we can now clear initiate_conc_mark_if_possible() as
// we've already acted on it.
} else {
// The concurrent marking thread is still finishing up the
// previous cycle. If we start one right now the two cycles
// overlap. In particular, the concurrent marking thread might
// be in the process of clearing the next marking bitmap (which
// we will use for the next cycle if we start one). Starting a
// cycle now will be bad given that parts of the marking
// information might get cleared by the marking thread. And we
// cannot wait for the marking thread to finish the cycle as it
// periodically yields while clearing the next marking bitmap
// and, if it's in a yield point, it's waiting for us to
// finish. So, at this point we will not start a cycle and we'll
// let the concurrent marking thread complete the last one.
}
}
}
void
}
class NextNonCSElemFinder: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
if (!r->in_collection_set()) {
_res = r;
return true;
} else {
return false;
}
}
};
class KnownGarbageClosure: public HeapRegionClosure {
public:
{}
bool doHeapRegion(HeapRegion* r) {
// We only include humongous regions in collection
// sets when concurrent mark shows that their contained object is
// unreachable.
// Do we have any marking information for this region?
if (r->is_marked()) {
// We don't include humongous regions in collection
// sets because we collect them immediately at the end of a marking
// cycle. We also don't include young regions because we *must*
// include them in the next collection pause.
if (!r->isHumongous() && !r->is_young()) {
}
}
return false;
}
};
class ParKnownGarbageHRClosure: public HeapRegionClosure {
int _worker;
int _invokes;
void get_new_chunk() {
}
void add_region(HeapRegion* r) {
if (_cur_chunk_idx == _cur_chunk_end) {
}
}
public:
int worker) :
_invokes(0)
{}
bool doHeapRegion(HeapRegion* r) {
// We only include humongous regions in collection
// sets when concurrent mark shows that their contained object is
// unreachable.
_invokes++;
// Do we have any marking information for this region?
if (r->is_marked()) {
// We don't include humongous regions in collection
// sets because we collect them immediately at the end of a marking
// cycle.
// We also do not include young regions in collection sets
if (!r->isHumongous() && !r->is_young()) {
add_region(r);
}
}
return false;
}
};
class ParKnownGarbageTask: public AbstractGangTask {
public:
AbstractGangTask("ParKnownGarbageTask"),
{}
void work(int i) {
// Back to zero for the claim value.
if (G1PrintParCleanupStats) {
}
}
};
void
double start;
double clear_marked_end;
if (G1PrintParCleanupStats) {
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
WorkUnit);
(int) WorkUnit);
"sanity check");
} else {
}
double known_garbage_end;
if (G1PrintParCleanupStats) {
}
double sort_end;
if (G1PrintParCleanupStats) {
}
double work2_end;
if (G1PrintParCleanupStats) {
}
}
// Add the heap region at the head of the non-incremental collection set
void G1CollectorPolicy::
if (G1PrintHeapRegions) {
}
if (_g1->mark_in_progress())
hr->set_in_collection_set(true);
}
// Initialize the per-collection-set information
_inc_cset_size = 0;
if (in_young_gc_mode()) {
}
_inc_cset_max_finger = 0;
}
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
// evacuation pause,
// * adding the current allocation region to the incremental cset
// when it is retired, and
// * updating existing policy information for a region in the
// incremental cset via young list RSet sampling.
// Therefore this routine may be called at a safepoint by the
// VM thread, or in-between safepoints by mutator threads (when
// retiring the current allocation region) or a concurrent
// refine thread (RSet sampling).
// Cache the values we have added to the aggregated informtion
// in the heap region in case we have to remove this region from
// the incremental collection set, or it is updated by the
// rset sampling code
// Record the number of bytes used in this region
// Cache the values we have added to the aggregated informtion
// in the heap region in case we have to remove this region from
// the incremental collection set, or it is updated by the
// rset sampling code
#endif // PREDICTIONS_VERBOSE
}
// This routine is currently only called as part of the updating of
// existing policy information for regions in the incremental cset that
// is performed by the concurrent refine thread(s) as part of young list
// RSet sampling. Therefore we should not be at a safepoint.
// the given heap region from the collection set info.
// Clear the values cached in the heap region
hr->set_recorded_rs_length(0);
// Subtract the number of bytes used in this region
// Clear the values cached in the heap region
#endif // PREDICTIONS_VERBOSE
}
// Update the collection set information that is dependent on the new RS length
}
// information in the heap region here (before the region gets added
// to the collection set). An individual heap region's cached values
// are calculated, aggregated with the policy collection set info,
// and cached in the heap region here (initially) and (subsequently)
// by the Young List sampling code.
hr->set_in_collection_set(true);
}
// Add the region at the RHS of the incremental cset
// We should only ever be appending survivors at the end of a pause
// Do the 'common' stuff
// Now add the region at the right hand side
if (_inc_cset_tail == NULL) {
_inc_cset_head = hr;
} else {
}
_inc_cset_tail = hr;
if (G1PrintHeapRegions) {
}
}
// Add the region to the LHS of the incremental cset
// Survivors should be added to the RHS at the end of a pause
// Do the 'common' stuff
// Add the region at the left hand side
if (_inc_cset_head == NULL) {
_inc_cset_tail = hr;
}
_inc_cset_head = hr;
if (G1PrintHeapRegions) {
}
}
#ifndef PRODUCT
"age: %4d, y: %d, surv: %d",
csr->is_survivor());
}
}
#endif // !PRODUCT
void
double target_pause_time_ms) {
// Set this here - in case we're not doing young collections.
err_msg("target_pause_time_ms = %1.6lf should be positive",
double predicted_pause_time_ms = base_time_ms;
// the 10% and 50% values are arbitrary...
_within_target = false;
} else {
_within_target = true;
}
// We figure out the number of bytes available for future to-space.
// For new regions without marking information, we must assume the
// worst-case of complete survival. If we have marking information for a
// region, we can bound the amount of live data. We can add a number of
// such regions, as long as the sum of the live data bounds does not
// exceed the available evacuation space.
_collection_set_size = 0;
// Adjust for expansion and slop.
HeapRegion* hr;
if (in_young_gc_mode()) {
if (G1PolicyVerbose > 0) {
}
_young_cset_length = 0;
_last_young_gc_full = full_young_gcs() ? true : false;
if (_last_young_gc_full)
else
// The young list is laid with the survivor regions from the previous
// pause are appended to the RHS of the young list, i.e.
// [Newly Young Regions ++ Survivors from last pause].
}
// Clear the fields that point to the survivor list - they are
// all young now.
if (_g1->mark_in_progress())
// For young regions in the collection set, we assume the worst
// case of complete survival
// The number of recorded young regions is the incremental
// collection set's current size
#endif // PREDICTIONS_VERBOSE
if (G1PolicyVerbose > 0) {
max_live_bytes/K);
}
// We are doing young collections so reset this.
// Note we can use either _collection_set_size or
// _young_cset_length here
if (_collection_set_size > 0 && _last_young_gc_full) {
// don't bother adding more regions...
}
}
if (!in_young_gc_mode() || !full_young_gcs()) {
bool should_continue = true;
do {
if (G1PolicyVerbose > 0) {
max_live_bytes/K);
}
}
} while (should_continue);
if (!adaptive_young_list_length() &&
_should_revert_to_full_young_gcs = true;
}
}
}
}
}