/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/generation.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp"
{
_shrink_factor = 0;
// initialize performance counters
// Generation Counters -- generation 1, 1 subspace
#ifndef SERIALGC
if (UseParNewGC && ParallelGCThreads > 0) {
if (_alloc_buffers == NULL)
vm_exit_during_initialization("Could not allocate alloc_buffers");
for (uint i = 0; i < ParallelGCThreads; i++) {
_alloc_buffers[i] =
if (_alloc_buffers[i] == NULL)
vm_exit_during_initialization("Could not allocate alloc_buffers");
}
} else {
}
#endif // SERIALGC
}
return "tenured generation";
}
_shrink_factor = 0;
// We don't have floating point command-line arguments
// Note: argument processing ensures that MinHeapFreeRatio < 100.
// Compute some numbers about the state of the heap.
// Don't shrink less than the initial generation size
" minimum_free_percentage: %6.2f"
" maximum_used_percentage: %6.2f",
" free_after_gc : %6.1fK"
" used_after_gc : %6.1fK"
" capacity_after_gc : %6.1fK",
free_after_gc / (double) K,
used_after_gc / (double) K,
capacity_after_gc / (double) K);
" free_percentage: %6.2f",
}
// If we have less free space than we want then expand
// Don't expand unless it's significant
if (expand_bytes >= _min_heap_delta_bytes) {
}
" minimum_desired_capacity: %6.1fK"
" expand_bytes: %6.1fK"
" _min_heap_delta_bytes: %6.1fK",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
_min_heap_delta_bytes / (double) K);
}
return;
}
// No expansion, now see if we want to shrink
// We would never want to shrink more than this
if (MaxHeapFreeRatio < 100) {
" maximum_free_percentage: %6.2f"
" minimum_used_percentage: %6.2f",
" _capacity_at_prologue: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" maximum_desired_capacity: %6.1fK",
_capacity_at_prologue / (double) K,
minimum_desired_capacity / (double) K,
maximum_desired_capacity / (double) K);
}
"sanity check");
// Capacity too large, compute shrinking size
// We don't want shrink all the way back to initSize if people call
// System.gc(), because some programs do that between "phases" and then
// we'd just have to grow the heap up again for the next phase. So we
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
// on the third call, and 100% by the fourth call. But if we recompute
// size without shrinking, it goes back to 0%.
if (current_shrink_factor == 0) {
_shrink_factor = 10;
} else {
}
" shrinking:"
" initSize: %.1fK"
" maximum_desired_capacity: %.1fK",
maximum_desired_capacity / (double) K);
" shrink_bytes: %.1fK"
" current_shrink_factor: %d"
" new shrink factor: %d"
" _min_heap_delta_bytes: %.1fK",
shrink_bytes / (double) K,
_min_heap_delta_bytes / (double) K);
}
}
}
if (capacity_after_gc > _capacity_at_prologue) {
// We might have expanded for promotions, in which case we might want to
// take back that expansion if there's room after GC. That keeps us from
// stretching the heap with promotions when there's plenty of room.
// We have two shrinking computations, take the largest
" aggressive shrinking:"
" _capacity_at_prologue: %.1fK"
" capacity_after_gc: %.1fK"
" expansion_for_promotion: %.1fK"
" shrink_bytes: %.1fK",
capacity_after_gc / (double) K,
_capacity_at_prologue / (double) K,
expansion_for_promotion / (double) K,
shrink_bytes / (double) K);
}
}
// Don't shrink unless it's significant
if (shrink_bytes >= _min_heap_delta_bytes) {
}
"sanity check");
}
_used_at_prologue = used();
if (VerifyBeforeGC) {
}
}
if (VerifyAfterGC) {
}
}
bool is_tlab) {
// This should be one big conditional or (||), but I want to be able to tell
// why it returns what it returns (without re-evaluating the conditionals
// in case they aren't idempotent), so I'm doing it this way.
// DeMorgan says it's okay.
bool result = false;
result = true;
" full");
}
}
result = true;
size);
}
}
// If we don't have very much free space.
// XXX: 10000 should be a percentage of the capacity!!!
result = true;
" free(): " SIZE_FORMAT,
free());
}
}
// If we had to expand to accomodate promotions from younger generations
result = true;
}
}
return result;
}
bool clear_all_soft_refs,
bool is_tlab) {
}
bool full) {
// If the next lower level(s) has been collected, gather any statistics
// that are of interest at this point.
// Calculate size of data promoted from the younger generations
// before doing the collection.
// If the younger gen collections were skipped, then the
// number of promoted bytes will be 0 and adding it to the
// average will incorrectly lessen the average. It is, however,
// also possible that no promotion was needed.
if (used_before_gc >= _used_at_prologue) {
}
}
}
if (UsePerfData) {
}
}
#ifndef SERIALGC
bool is_lab = true;
#ifndef PRODUCT
return NULL;
}
#endif // #ifndef PRODUCT
// Slow path:
// Is small enough; abandon this buffer and start a new one.
}
}
};
// Otherwise, buffer allocation failed; try allocating object
// individually.
}
}
}
// Restore the mark word copied above.
return obj;
}
"should contain whole object");
} else {
}
}
}
if (UseParNewGC) {
for (uint i = 0; i < ParallelGCThreads; i++) {
}
}
}
// Verify that any retained parallel allocation buffers do not
// intersect with dirty cards.
if (UseParNewGC) {
for (uint i = 0; i < ParallelGCThreads; i++) {
}
}
}
#else // SERIALGC
#endif // SERIALGC
}
return res;
}