/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "gc_interface/gcCause.hpp"
#include "memory/collectorPolicy.hpp"
#include "utilities/ostream.hpp"
#include "utilities/workgroup.hpp"
// The throughput goal is implemented as
// _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
// gc_cost_ratio is the ratio
// application cost / gc cost
// For example a gc_cost_ratio of 4 translates into a
// throughput goal of .80
double gc_pause_goal_sec,
_gc_overhead_limit_exceeded(false),
"No opportunity to clear SoftReferences before GC overhead limit");
// Start the timers
_young_gen_policy_is_ready = false;
}
// If the number of GC threads was set on the command line,
// use it.
// Else
// Calculate the number of GC threads based on the number of Java threads.
// Calculate the number of GC threads based on the size of the heap.
// Use the larger.
const uintx min_workers,
// If the user has specifically set the number of
// GC threads, use them.
// If the user has turned off using a dynamic number of GC threads
// or the users has requested a specific number, set the active
// number of workers to all the workers.
// Always use at least min_workers but use up to
// GCThreadsPerJavaThreads * application threads.
// Choose a number of GC threads based on the current size
// of the heap. This may be complicated because the size of
// the heap depends on factors such as the thoughput goal.
// Still a large heap should be collected by more GC threads.
// Limit the number of workers to the the number created,
// (workers()).
(uintx) total_workers);
// Increase GC workers instantly but decrease them more
// slowly.
if (new_active_workers < prev_active_workers) {
}
// Check once more that the number of workers is within the limits.
// Assume this is debugging and jiggle the number of GC threads.
if (new_active_workers == prev_active_workers) {
if (new_active_workers < total_workers) {
} else if (new_active_workers > min_workers) {
}
}
if (new_active_workers == total_workers) {
if (_debug_perturbation) {
}
}
"Jiggled active workers too much");
}
if (TraceDynamicGCThreads) {
"active_workers(): %d new_acitve_workers: %d "
"prev_active_workers: %d\n"
" active_workers_by_JT: %d active_workers_by_heap_size: %d",
}
return new_active_workers;
}
// If the user has specifically set the number of
// GC threads, use them.
// If the user has turned off using a dynamic number of GC threads
// or the users has requested a specific number, set the active
// number of workers to all the workers.
int new_active_workers;
if (!UseDynamicNumberOfGCThreads ||
} else {
2, /* Minimum number of workers */
}
return new_active_workers;
}
if (!UseDynamicNumberOfGCThreads ||
return ConcGCThreads;
} else {
1, /* Minimum number of workers */
return no_of_gc_threads;
}
}
return decrement_tenuring_threshold_for_gc_cost() ||
}
// Update the interval time
_minor_timer.stop();
// Save most recent collection time
}
double minor_pause_in_ms) {
}
// Update the pause time.
_minor_timer.stop();
// Sample for performance counter
// Cost of collection (unit-less)
if ((_latest_minor_mutator_interval_seconds > 0.0) &&
(minor_pause_in_seconds > 0.0)) {
double interval_in_seconds =
// Sample for performance counter
}
// The policy does not have enough data until at least some
// minor collections have been done.
// Calculate variables used to estimate pause time vs. gen sizes
if (PrintAdaptiveSizePolicy && Verbose) {
"minor gc cost: %f average: %f", collection_cost,
}
// Calculate variable used to estimate collection cost vs. gen sizes
}
// Interval times use this timer to measure the mutator time.
// Reset the timer after the GC pause.
}
return eden_heap_delta;
}
}
return eden_heap_delta;
}
return promo_heap_delta;
}
}
return promo_heap_delta;
}
_major_timer.stop();
return result;
}
// Linear decay of major gc cost
if(time_since_major_gc() > 0.0) {
(((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
/ time_since_major_gc();
}
// The decayed cost should always be smaller than the
// average cost but the vagaries of finite arithmetic could
// produce a larger value in decayed_major_gc_cost so protect
// against that.
}
// Use a value of the major gc cost that has been decayed
// by the factor
//
// average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
// time-since-last-major-gc
//
// if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
// is less than time-since-last-major-gc.
//
// In cases where there are initial major gc's that
// are of a relatively high cost but no later major
// gc's, the total gc cost can remain high because
// the major gc cost remains unchanged (since there are no major
// gc's). In such a situation the value of the unchanging
// major gc cost can keep the mutator throughput below
// the goal when in fact the major gc cost is becoming diminishingly
// small. Use the decaying gc cost only to decide whether to
// adjust for throughput. Using it also to determine the adjustment
// to be made for throughput also seems reasonable but there is
// no test case to use to decide if it is the right thing to do
// don't do it yet.
(AdaptiveSizeMajorGCDecayTimeScale > 0) &&
(avg_major_interval > 0.00)) {
// Decay the major gc cost?
if (time_since_last_major_gc >
((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
// Decay using the time-since-last-major-gc
if (PrintGCDetails && Verbose) {
" %f time since last major gc: %f",
}
}
}
return result;
}
}
bool is_full_gc,
// Ignore explicit GC's. Exiting here does not set the flag and
// does not reset the count. Updating of the averages for system
// GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
return;
}
// eden_limit is the upper limit on the size of eden based on
// the maximum size of the young generation and the sizes
// of the survivor space.
// The question being asked is whether the gc costs are high
// and the space being recovered by a collection is low.
// free_in_young_gen is the free space in the young generation
// after a collection and promo_live is the free space in the old
// generation after a collection.
//
// Use the minimum of the current value of the live in the
// young gen or the average of the live in the young gen.
// If the current value drops quickly, that should be taken
// into account (i.e., don't trigger if the amount of free
// space has suddenly jumped up). If the current is much
// higher than the average, use the average since it represents
// the longer term behavor.
max_eden_size - live_in_eden : 0;
// But don't force a promo size below the current promo size. Otherwise,
// the promo size will shrink for no good reason.
if (PrintAdaptiveSizePolicy && (Verbose ||
"PSAdaptiveSizePolicy::compute_generation_free_space limits:"
" promo_limit: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" total_free_limit: " SIZE_FORMAT
" max_old_gen_size: " SIZE_FORMAT
" max_eden_size: " SIZE_FORMAT
" mem_free_limit: " SIZE_FORMAT,
(size_t) mem_free_limit);
}
bool print_gc_overhead_limit_would_be_exceeded = false;
if (is_full_gc) {
if (gc_cost() > gc_cost_limit &&
// Collections, on average, are taking too much time, and
// gc_cost() > gc_cost_limit
// we have too little space available after a full gc.
// total_free_limit < mem_free_limit
// where
// total_free_limit is the free space available in
// both generations
// total_mem is the total space available for allocation
// in both generations (survivor spaces are not included
// just as they are not included in eden_limit).
// mem_free_limit is a fraction of total_mem judged to be an
// acceptable amount that is still unused.
// The heap can ask for the value of this variable when deciding
// whether to thrown an OutOfMemory error.
// Note that the gc time limit test only works for the collections
// of the young gen + tenured gen and not for collections of the
// permanent gen. That is because the calculation of the space
// freed by the collection is the free space in the young gen +
// tenured gen.
// At this point the GC overhead limit is being exceeded.
if (UseGCOverheadLimit) {
if (gc_overhead_limit_count() >=
// All conditions have been met for throwing an out-of-memory
// Avoid consecutive OOM due to the gc time limit by resetting
// the counter.
} else {
// The required consecutive collections which exceed the
// GC time limit may or may not have been reached. We
// are approaching that condition and so as not to
// throw an out-of-memory before all SoftRef's have been
// cleared, set _should_clear_all_soft_refs in CollectorPolicy.
// The clearing will be done on the next GC.
if (near_limit) {
if (PrintGCDetails && Verbose) {
"will be clearing all SoftReference");
}
}
}
}
// Set this even when the overhead limit will not
// cause an out-of-memory. Diagnostic message indicating
// that the overhead limit is being exceeded is sometimes
// printed.
} else {
// Did not exceed overhead limits
}
}
if (gc_overhead_limit_exceeded()) {
"of %d%%", GCTimeLimit);
} else if (print_gc_overhead_limit_would_be_exceeded) {
"of %d%% %d consecutive time(s)",
}
}
}
// Printing
// Should only be used with adaptive size policy turned on.
// Otherwise, there may be variables that are undefined.
if (!UseAdaptiveSizePolicy) return false;
// Print goal for which action is needed.
bool change_for_pause = false;
if ((change_old_gen_for_maj_pauses() ==
action = (char*) " *** pause time goal ***";
change_for_pause = true;
} else if ((change_old_gen_for_throughput() ==
action = (char*) " *** throughput goal ***";
} else if (decrease_for_footprint()) {
action = (char*) " *** reduced footprint ***";
} else {
// No actions were taken. This can legitimately be the
// situation if not enough data has been gathered to make
// decisions.
return false;
}
// Pauses
// Currently the size of the old gen is only adjusted to
// change the major pause times.
if (change_young_gen_for_min_pauses() ==
} else if (change_for_pause) {
}
} else if (change_for_pause) {
}
// Throughput
"Both generations should be growing");
} else if (change_young_gen_for_throughput() ==
// Only the young generation may grow at start up (before
// enough full collections have been done to grow the old generation).
}
// Minimum footprint
if (decrease_for_footprint() != 0) {
}
return true;
}
int tenuring_threshold_arg) const {
return false;
}
// Tenuring threshold
bool tenuring_threshold_changed = true;
" survivor space overflow) = ");
} else if (decrement_tenuring_threshold_for_gc_cost()) {
" GC costs) = ");
} else if (increment_tenuring_threshold_for_gc_cost()) {
" GC costs) = ");
} else {
tenuring_threshold_changed = false;
}
if (tenuring_threshold_changed) {
}
return true;
}