defNewGeneration.cpp revision 4309
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/shared/collectorCounters.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/defNewGeneration.inline.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/iterator.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/space.inline.hpp"
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/stack.inline.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "thread_bsd.inline.hpp"
#endif
//
// DefNewGeneration functions.
// Methods of protected closure types.
}
assert(false, "Do not call.");
}
}
}
void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
}
void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
{}
do {
}
{}
do {
}
{
}
{
}
{
}
int level,
const char* policy)
_promo_failure_drain_in_progress(false),
_should_allocate_from_space(false)
{
_eden_space = new ConcEdenSpace(this);
} else {
_eden_space = new EdenSpace(this);
}
_from_space = new ContiguousSpace();
_to_space = new ContiguousSpace();
vm_exit_during_initialization("Could not allocate a new gen space");
// Compute the maximum eden and survivor space sizes. These sizes
// are computed assuming the entire reserved space is committed.
// These values are exported as performance counters.
// allocate the performance counters
// Generation counters -- generation 0, 3 subspaces
}
bool clear_space,
bool mangle_space) {
// If the spaces are being cleared (only done at heap initialization
// currently), the survivor spaces need not be empty.
// Otherwise, no care is taken for used areas in the survivor spaces
// so check.
"Initialization of the survivor spaces assumes these are empty");
// Compute sizes
if (eden_size < minimum_eden_size) {
// May happen due to 64Kb rounding, if so adjust eden size back up
}
// A minimum eden size implies that there is a part of eden that
// is being used and that affects the initialization of any
// newly formed eden.
bool live_in_eden = minimum_eden_size > 0;
// If not clearing the spaces, do some checking to verify that
// the space are already mangled.
if (!clear_space) {
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into another
// a failure of the check may not correctly indicate which space
// is not properly mangled.
if (ZapUnusedHeapArea) {
}
}
// Reset the spaces for their new regions.
clear_space && !live_in_eden,
// If clear_space and live_in_eden, we will not have cleared any
// portion of eden above its top. This can cause newly
// expanded space not to be mangled if using ZapUnusedHeapArea.
// We explicitly do such mangling here.
eden()->mangle_unused_area();
}
// Set next compaction spaces.
// The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion
// failure handling when to-space can contain live objects.
}
void DefNewGeneration::swap_spaces() {
ContiguousSpace* s = from();
_from_space = to();
_to_space = s;
// The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion
// failure handling when to-space can contain live objects.
if (UsePerfData) {
CSpaceCounters* c = _from_counters;
_to_counters = c;
}
}
if (success && ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
}
// Do not attempt an expand-to-the reserve size. The
// request should properly observe the maximum size of
// the generation so an expand-to-reserve should be
// unnecessary. Also a second call to expand-to-reserve
// value potentially can cause an undue expansion.
// For example if the first expand fail for unknown reasons,
// but the second succeeds and expands the heap to its maximum
// value.
"expanded heap instead");
}
}
return success;
}
void DefNewGeneration::compute_new_size() {
// This is called after a gc that includes the following generation
// (which is required to exist.) So from-space will normally be empty.
// Note that we check both spaces, since if scavenge failed they revert roles.
// If not we bail out (otherwise we would have to relocate the objects)
return;
}
"DefNewGeneration cannot be an oldest gen");
"just checking");
// All space sizes must be multiples of Generation::GenGrain.
// Compute desired new generation size based on NewRatio and
// NewSizeThreadIncrease
// Adjust new generation size
bool changed = false;
if (desired_new_size > new_size_before) {
changed = true;
}
// If the heap failed to expand to the desired size,
// "changed" will be false. If the expansion failed
// (and at this point it was expected to succeed),
// ignore the failure (leaving "changed" as false).
}
// bail out of shrinking if objects in eden
changed = true;
}
if (changed) {
// The spaces have already been mangled at this point but
// may not have been cleared (set top = bottom) and should be.
// Mangling was done when the heap was being expanded.
SIZE_FORMAT "K [eden="
if (WizardMode) {
}
gclog_or_tty->cr();
}
}
}
// $$$ This may be wrong in case of "scavenge failure"?
}
assert(false, "NYI -- are you sure you want to call this?");
}
}
}
}
}
}
}
}
}
bool usedOnly) {
}
// The last collection bailed out, we are running out of heap space,
// so we try to allocate the from-space, too.
if (Verbose && PrintGCDetails) {
" will_fail: %s"
" heap_lock: %s"
" free: " SIZE_FORMAT,
size,
"true" : "false",
}
if (Heap_lock->owned_by_self() ||
// If the Heap_lock is not locked by this thread, this will be called
// again later with the Heap_lock held.
}
}
}
return result;
}
bool is_tlab,
bool parallel) {
// We don't attempt to expand the young generation (but perhaps we should.)
}
bool clear_all_soft_refs,
bool is_tlab) {
"This must be the youngest gen, and not the only gen");
// If the next generation is too full to accommodate promotion
// from this generation, pass on collection; let the next generation
// do it.
if (!collection_attempt_is_safe()) {
if (Verbose && PrintGCDetails) {
}
return;
}
// Capture heap used before collection (for printing).
// These can be shared for all code paths
IsAliveClosure is_alive(this);
ScanWeakRefClosure scan_weak_ref(this);
"save marks have not been newly set.");
// Not very pretty.
FastScanClosure fsc_with_no_gc_barrier(this, false);
FastScanClosure fsc_with_gc_barrier(this, true);
"save marks have not been newly set.");
true, // Process younger gens, if any,
// as strong roots.
true, // activate StrongRootsScope
false, // not collecting perm generation.
true, // walk *all* scavengable nmethods
// "evacuate followers".
const ReferenceProcessorStats& stats =
if (!_promotion_failed) {
// Swap the survivor spaces.
if (ZapUnusedHeapArea) {
// This is now done here because of the piece-meal mangling which
// can check for valid mangling at intermediate points in the
// collection(s). When a minor collection fails to collect
// sufficient space resizing of the young generation can occur
// an redistribute the spaces in the young generation. Mangle
// here so that unzapped regions don't get distributed to
// other spaces.
to()->mangle_unused_area();
}
swap_spaces();
// Set the desired survivor size to half the real survivor space
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
if (PrintGC && !PrintGCDetails) {
}
} else {
if (PrintGCDetails) {
}
// Add to-space to the list of space to compact
// when a promotion failure has occurred. In that
// case there can be live objects in to-space
// as a result of a partial evacuation of eden
// and from-space.
swap_spaces(); // For uniformity wrt ParNewGeneration.
// Inform the next generation that a promotion failure occurred.
// Reset the PromotionFailureALot counters.
}
// set new iteration safe limit for the survivor spaces
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
}
class RemoveForwardPointerClosure: public ObjectClosure {
public:
}
};
_promotion_failed = false;
}
void DefNewGeneration::remove_forwarding_pointers() {
// Now restore saved marks, if any.
"should be the same");
while (!_objs_with_preserved_marks.is_empty()) {
}
_objs_with_preserved_marks.clear(true);
_preserved_marks_of_objs.clear(true);
}
"Oversaving!");
}
if (m->must_be_preserved_for_promotion_failure(obj)) {
preserve_mark(obj, m);
}
}
if (PrintPromotionFailure && !_promotion_failed) {
}
_promotion_failed = true;
// forward to self
if (!_promo_failure_drain_in_progress) {
// prevent recursion in copy_to_survivor_space()
_promo_failure_drain_in_progress = true;
_promo_failure_drain_in_progress = false;
}
}
"shouldn't be scavenging this oop");
// Try allocating obj in to-space (unless too old)
}
// Otherwise try allocating obj tenured
return old;
}
} else {
// Prefetch beyond obj
// Copy obj
// Increment age if obj still in new generation
}
// Done, insert forward pointer to obj in this header
return obj;
}
while (!_promo_failure_scan_stack.is_empty()) {
}
}
void DefNewGeneration::save_marks() {
eden()->set_saved_mark();
to()->set_saved_mark();
from()->set_saved_mark();
}
void DefNewGeneration::reset_saved_marks() {
eden()->reset_saved_mark();
to()->reset_saved_mark();
from()->reset_saved_mark();
}
bool DefNewGeneration::no_allocs_since_save_marks() {
return to()->saved_mark_at_top();
}
\
void DefNewGeneration:: \
cl->set_generation(this); \
cl->reset_generation(); \
save_marks(); \
}
if (requestor == this || _promotion_failed) return;
/* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
if (to_space->top() > to_space->bottom()) {
trace("to_space not empty when contribute_scratch called");
}
*/
if (free_words >= MinFreeScratchWords) {
}
}
void DefNewGeneration::reset_scratch() {
// If contributing scratch in to_space, mangle all of
// to_space if ZapUnusedHeapArea. This is needed because
// top is not maintained while using to-space as scratch.
if (ZapUnusedHeapArea) {
}
}
bool DefNewGeneration::collection_attempt_is_safe() {
if (Verbose && PrintGCDetails) {
}
return false;
}
"This must be the youngest gen, and not the only gen");
}
}
DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
// Check if the heap is approaching full after a collection has
// been done. Generally the young generation is empty at
// a minimum at the end of a collection. If it is not, then
// the heap is approaching full.
if (full) {
DEBUG_ONLY(seen_incremental_collection_failed = false;)
if (Verbose && PrintGCDetails) {
gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
}
set_should_allocate_from_space(); // we seem to be running out of space
} else {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
}
clear_should_allocate_from_space(); // if set
}
} else {
#ifdef ASSERT
// It is possible that incremental_collection_failed() == true
// here, because an attempted scavenge did not succeed. The policy
// is normally expected to cause a full collection which should
// clear that condition, so we should not be here twice in a row
// with incremental_collection_failed() == true without having done
// a full collection in between.
if (Verbose && PrintGCDetails) {
gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
}
} else if (seen_incremental_collection_failed) {
if (Verbose && PrintGCDetails) {
}
(gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
"Twice in a row");
seen_incremental_collection_failed = false;
}
#endif // ASSERT
}
if (ZapUnusedHeapArea) {
}
if (!CleanChunkPoolAsync) {
}
// update the generation and space performance counters
}
void DefNewGeneration::record_spaces_top() {
eden()->set_top_for_allocations();
to()->set_top_for_allocations();
from()->set_top_for_allocations();
}
void DefNewGeneration::ref_processor_init() {
}
void DefNewGeneration::update_counters() {
if (UsePerfData) {
}
}
void DefNewGeneration::verify() {
}
}
const char* DefNewGeneration::name() const {
return "def new generation";
}
// Moved from inline file as they are not called inline
return eden();
}
bool is_tlab) {
// This is the slow-path allocation for the DefNewGeneration.
// Most allocations are fast-path in compiled code.
// We try to allocate from the eden. If that works, we are happy.
// Note that since DefNewGeneration supports lock-free allocation, we
// have to use it here, as well.
return result;
}
do {
// Tell the next generation we reached a limit.
} else {
"invalid state after allocation_limit_reached returned null");
}
} else {
// The allocation failed and the soft limit is equal to the hard limit,
// there are no reasons to do an attempt to allocate
break;
}
// Try to allocate until succeeded or the soft limit can't be adjusted
// If the eden is full and the last collection bailed out, we are running
// out of heap space, and we try to allocate the from-space, too.
// allocate_from_space can't be inlined because that would introduce a
// circular dependency at compile time.
}
return result;
}
bool is_tlab) {
}
// Ensure that _end and _soft_end are the same in eden space.
}
}
return unsafe_max_alloc_nogc();
}