defNewGeneration.cpp revision 457
0N/A/*
196N/A * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
0N/A * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
0N/A * CA 95054 USA or visit www.sun.com if you need additional information or
0N/A * have any questions.
0N/A *
0N/A */
0N/A
0N/A# include "incls/_precompiled.incl"
0N/A# include "incls/_defNewGeneration.cpp.incl"
0N/A
0N/A//
0N/A// DefNewGeneration functions.
0N/A
0N/A// Methods of protected closure types.
0N/A
0N/ADefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
0N/A assert(g->level() == 0, "Optimized for youngest gen.");
0N/A}
0N/Avoid DefNewGeneration::IsAliveClosure::do_object(oop p) {
0N/A assert(false, "Do not call.");
0N/A}
0N/Abool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
0N/A return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
0N/A}
0N/A
0N/ADefNewGeneration::KeepAliveClosure::
0N/AKeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
0N/A GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
0N/A assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
0N/A _rs = (CardTableRS*)rs;
0N/A}
0N/A
113N/Avoid DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
113N/Avoid DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
0N/A
0N/A
0N/ADefNewGeneration::FastKeepAliveClosure::
0N/AFastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
0N/A DefNewGeneration::KeepAliveClosure(cl) {
0N/A _boundary = g->reserved().end();
0N/A}
0N/A
113N/Avoid DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
113N/Avoid DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
0N/A
0N/ADefNewGeneration::EvacuateFollowersClosure::
0N/AEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
0N/A ScanClosure* cur, ScanClosure* older) :
0N/A _gch(gch), _level(level),
0N/A _scan_cur_or_nonheap(cur), _scan_older(older)
0N/A{}
0N/A
0N/Avoid DefNewGeneration::EvacuateFollowersClosure::do_void() {
0N/A do {
0N/A _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
0N/A _scan_older);
0N/A } while (!_gch->no_allocs_since_save_marks(_level));
0N/A}
0N/A
0N/ADefNewGeneration::FastEvacuateFollowersClosure::
0N/AFastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
0N/A DefNewGeneration* gen,
0N/A FastScanClosure* cur, FastScanClosure* older) :
0N/A _gch(gch), _level(level), _gen(gen),
0N/A _scan_cur_or_nonheap(cur), _scan_older(older)
0N/A{}
0N/A
0N/Avoid DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
0N/A do {
0N/A _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
0N/A _scan_older);
0N/A } while (!_gch->no_allocs_since_save_marks(_level));
0N/A guarantee(_gen->promo_failure_scan_stack() == NULL
0N/A || _gen->promo_failure_scan_stack()->length() == 0,
0N/A "Failed to finish scan");
0N/A}
0N/A
0N/AScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
0N/A OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
0N/A{
0N/A assert(_g->level() == 0, "Optimized for youngest generation");
0N/A _boundary = _g->reserved().end();
0N/A}
0N/A
113N/Avoid ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
113N/Avoid ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
113N/A
0N/AFastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
0N/A OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
0N/A{
0N/A assert(_g->level() == 0, "Optimized for youngest generation");
0N/A _boundary = _g->reserved().end();
0N/A}
0N/A
113N/Avoid FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
113N/Avoid FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
113N/A
0N/AScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
0N/A OopClosure(g->ref_processor()), _g(g)
0N/A{
0N/A assert(_g->level() == 0, "Optimized for youngest generation");
0N/A _boundary = _g->reserved().end();
0N/A}
0N/A
113N/Avoid ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
113N/Avoid ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
113N/A
113N/Avoid FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
113N/Avoid FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
0N/A
0N/ADefNewGeneration::DefNewGeneration(ReservedSpace rs,
0N/A size_t initial_size,
0N/A int level,
0N/A const char* policy)
0N/A : Generation(rs, initial_size, level),
0N/A _objs_with_preserved_marks(NULL),
0N/A _preserved_marks_of_objs(NULL),
0N/A _promo_failure_scan_stack(NULL),
0N/A _promo_failure_drain_in_progress(false),
0N/A _should_allocate_from_space(false)
0N/A{
0N/A MemRegion cmr((HeapWord*)_virtual_space.low(),
0N/A (HeapWord*)_virtual_space.high());
0N/A Universe::heap()->barrier_set()->resize_covered_region(cmr);
0N/A
0N/A if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
0N/A _eden_space = new ConcEdenSpace(this);
0N/A } else {
0N/A _eden_space = new EdenSpace(this);
0N/A }
0N/A _from_space = new ContiguousSpace();
0N/A _to_space = new ContiguousSpace();
0N/A
0N/A if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
0N/A vm_exit_during_initialization("Could not allocate a new gen space");
0N/A
0N/A // Compute the maximum eden and survivor space sizes. These sizes
0N/A // are computed assuming the entire reserved space is committed.
0N/A // These values are exported as performance counters.
0N/A uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
0N/A uintx size = _virtual_space.reserved_size();
0N/A _max_survivor_size = compute_survivor_size(size, alignment);
0N/A _max_eden_size = size - (2*_max_survivor_size);
0N/A
0N/A // allocate the performance counters
0N/A
0N/A // Generation counters -- generation 0, 3 subspaces
0N/A _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
0N/A _gc_counters = new CollectorCounters(policy, 0);
0N/A
0N/A _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
0N/A _gen_counters);
0N/A _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
0N/A _gen_counters);
0N/A _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
0N/A _gen_counters);
0N/A
263N/A compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
0N/A update_counters();
0N/A _next_gen = NULL;
0N/A _tenuring_threshold = MaxTenuringThreshold;
0N/A _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
0N/A}
0N/A
263N/Avoid DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
263N/A bool clear_space,
263N/A bool mangle_space) {
263N/A uintx alignment =
263N/A GenCollectedHeap::heap()->collector_policy()->min_alignment();
263N/A
263N/A // If the spaces are being cleared (only done at heap initialization
263N/A // currently), the survivor spaces need not be empty.
263N/A // Otherwise, no care is taken for used areas in the survivor spaces
263N/A // so check.
263N/A assert(clear_space || (to()->is_empty() && from()->is_empty()),
263N/A "Initialization of the survivor spaces assumes these are empty");
0N/A
0N/A // Compute sizes
0N/A uintx size = _virtual_space.committed_size();
0N/A uintx survivor_size = compute_survivor_size(size, alignment);
0N/A uintx eden_size = size - (2*survivor_size);
0N/A assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
0N/A
0N/A if (eden_size < minimum_eden_size) {
0N/A // May happen due to 64Kb rounding, if so adjust eden size back up
0N/A minimum_eden_size = align_size_up(minimum_eden_size, alignment);
0N/A uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
0N/A uintx unaligned_survivor_size =
0N/A align_size_down(maximum_survivor_size, alignment);
0N/A survivor_size = MAX2(unaligned_survivor_size, alignment);
0N/A eden_size = size - (2*survivor_size);
0N/A assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
0N/A assert(eden_size >= minimum_eden_size, "just checking");
0N/A }
0N/A
0N/A char *eden_start = _virtual_space.low();
0N/A char *from_start = eden_start + eden_size;
0N/A char *to_start = from_start + survivor_size;
0N/A char *to_end = to_start + survivor_size;
0N/A
0N/A assert(to_end == _virtual_space.high(), "just checking");
0N/A assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
0N/A assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
0N/A assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
0N/A
0N/A MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
0N/A MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
0N/A MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
0N/A
263N/A // A minimum eden size implies that there is a part of eden that
263N/A // is being used and that affects the initialization of any
263N/A // newly formed eden.
263N/A bool live_in_eden = minimum_eden_size > 0;
263N/A
263N/A // If not clearing the spaces, do some checking to verify that
263N/A // the space are already mangled.
263N/A if (!clear_space) {
263N/A // Must check mangling before the spaces are reshaped. Otherwise,
263N/A // the bottom or end of one space may have moved into another
263N/A // a failure of the check may not correctly indicate which space
263N/A // is not properly mangled.
263N/A if (ZapUnusedHeapArea) {
263N/A HeapWord* limit = (HeapWord*) _virtual_space.high();
263N/A eden()->check_mangled_unused_area(limit);
263N/A from()->check_mangled_unused_area(limit);
263N/A to()->check_mangled_unused_area(limit);
263N/A }
263N/A }
263N/A
263N/A // Reset the spaces for their new regions.
263N/A eden()->initialize(edenMR,
263N/A clear_space && !live_in_eden,
263N/A SpaceDecorator::Mangle);
263N/A // If clear_space and live_in_eden, we will not have cleared any
0N/A // portion of eden above its top. This can cause newly
0N/A // expanded space not to be mangled if using ZapUnusedHeapArea.
0N/A // We explicitly do such mangling here.
263N/A if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
0N/A eden()->mangle_unused_area();
0N/A }
263N/A from()->initialize(fromMR, clear_space, mangle_space);
263N/A to()->initialize(toMR, clear_space, mangle_space);
263N/A
263N/A // Set next compaction spaces.
0N/A eden()->set_next_compaction_space(from());
0N/A // The to-space is normally empty before a compaction so need
0N/A // not be considered. The exception is during promotion
0N/A // failure handling when to-space can contain live objects.
0N/A from()->set_next_compaction_space(NULL);
0N/A}
0N/A
0N/Avoid DefNewGeneration::swap_spaces() {
0N/A ContiguousSpace* s = from();
0N/A _from_space = to();
0N/A _to_space = s;
0N/A eden()->set_next_compaction_space(from());
0N/A // The to-space is normally empty before a compaction so need
0N/A // not be considered. The exception is during promotion
0N/A // failure handling when to-space can contain live objects.
0N/A from()->set_next_compaction_space(NULL);
0N/A
0N/A if (UsePerfData) {
0N/A CSpaceCounters* c = _from_counters;
0N/A _from_counters = _to_counters;
0N/A _to_counters = c;
0N/A }
0N/A}
0N/A
0N/Abool DefNewGeneration::expand(size_t bytes) {
0N/A MutexLocker x(ExpandHeap_lock);
263N/A HeapWord* prev_high = (HeapWord*) _virtual_space.high();
0N/A bool success = _virtual_space.expand_by(bytes);
263N/A if (success && ZapUnusedHeapArea) {
263N/A // Mangle newly committed space immediately because it
263N/A // can be done here more simply that after the new
263N/A // spaces have been computed.
263N/A HeapWord* new_high = (HeapWord*) _virtual_space.high();
263N/A MemRegion mangle_region(prev_high, new_high);
263N/A SpaceMangler::mangle_region(mangle_region);
263N/A }
0N/A
0N/A // Do not attempt an expand-to-the reserve size. The
0N/A // request should properly observe the maximum size of
0N/A // the generation so an expand-to-reserve should be
0N/A // unnecessary. Also a second call to expand-to-reserve
0N/A // value potentially can cause an undue expansion.
0N/A // For example if the first expand fail for unknown reasons,
0N/A // but the second succeeds and expands the heap to its maximum
0N/A // value.
0N/A if (GC_locker::is_active()) {
0N/A if (PrintGC && Verbose) {
263N/A gclog_or_tty->print_cr("Garbage collection disabled, "
263N/A "expanded heap instead");
0N/A }
0N/A }
0N/A
0N/A return success;
0N/A}
0N/A
0N/A
0N/Avoid DefNewGeneration::compute_new_size() {
0N/A // This is called after a gc that includes the following generation
0N/A // (which is required to exist.) So from-space will normally be empty.
0N/A // Note that we check both spaces, since if scavenge failed they revert roles.
0N/A // If not we bail out (otherwise we would have to relocate the objects)
0N/A if (!from()->is_empty() || !to()->is_empty()) {
0N/A return;
0N/A }
0N/A
0N/A int next_level = level() + 1;
0N/A GenCollectedHeap* gch = GenCollectedHeap::heap();
0N/A assert(next_level < gch->_n_gens,
0N/A "DefNewGeneration cannot be an oldest gen");
0N/A
0N/A Generation* next_gen = gch->_gens[next_level];
0N/A size_t old_size = next_gen->capacity();
0N/A size_t new_size_before = _virtual_space.committed_size();
0N/A size_t min_new_size = spec()->init_size();
0N/A size_t max_new_size = reserved().byte_size();
0N/A assert(min_new_size <= new_size_before &&
0N/A new_size_before <= max_new_size,
0N/A "just checking");
0N/A // All space sizes must be multiples of Generation::GenGrain.
0N/A size_t alignment = Generation::GenGrain;
0N/A
0N/A // Compute desired new generation size based on NewRatio and
0N/A // NewSizeThreadIncrease
0N/A size_t desired_new_size = old_size/NewRatio;
0N/A int threads_count = Threads::number_of_non_daemon_threads();
0N/A size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
0N/A desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
0N/A
0N/A // Adjust new generation size
0N/A desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
0N/A assert(desired_new_size <= max_new_size, "just checking");
0N/A
0N/A bool changed = false;
0N/A if (desired_new_size > new_size_before) {
0N/A size_t change = desired_new_size - new_size_before;
0N/A assert(change % alignment == 0, "just checking");
0N/A if (expand(change)) {
0N/A changed = true;
0N/A }
0N/A // If the heap failed to expand to the desired size,
0N/A // "changed" will be false. If the expansion failed
0N/A // (and at this point it was expected to succeed),
0N/A // ignore the failure (leaving "changed" as false).
0N/A }
0N/A if (desired_new_size < new_size_before && eden()->is_empty()) {
0N/A // bail out of shrinking if objects in eden
0N/A size_t change = new_size_before - desired_new_size;
0N/A assert(change % alignment == 0, "just checking");
0N/A _virtual_space.shrink_by(change);
0N/A changed = true;
0N/A }
0N/A if (changed) {
263N/A // The spaces have already been mangled at this point but
263N/A // may not have been cleared (set top = bottom) and should be.
263N/A // Mangling was done when the heap was being expanded.
263N/A compute_space_boundaries(eden()->used(),
263N/A SpaceDecorator::Clear,
263N/A SpaceDecorator::DontMangle);
263N/A MemRegion cmr((HeapWord*)_virtual_space.low(),
263N/A (HeapWord*)_virtual_space.high());
0N/A Universe::heap()->barrier_set()->resize_covered_region(cmr);
0N/A if (Verbose && PrintGC) {
0N/A size_t new_size_after = _virtual_space.committed_size();
0N/A size_t eden_size_after = eden()->capacity();
0N/A size_t survivor_size_after = from()->capacity();
263N/A gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
263N/A SIZE_FORMAT "K [eden="
0N/A SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
263N/A new_size_before/K, new_size_after/K,
263N/A eden_size_after/K, survivor_size_after/K);
0N/A if (WizardMode) {
0N/A gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
0N/A thread_increase_size/K, threads_count);
0N/A }
0N/A gclog_or_tty->cr();
0N/A }
0N/A }
0N/A}
0N/A
0N/Avoid DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
0N/A // $$$ This may be wrong in case of "scavenge failure"?
0N/A eden()->object_iterate(cl);
0N/A}
0N/A
0N/Avoid DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
0N/A assert(false, "NYI -- are you sure you want to call this?");
0N/A}
0N/A
0N/A
0N/Asize_t DefNewGeneration::capacity() const {
0N/A return eden()->capacity()
0N/A + from()->capacity(); // to() is only used during scavenge
0N/A}
0N/A
0N/A
0N/Asize_t DefNewGeneration::used() const {
0N/A return eden()->used()
0N/A + from()->used(); // to() is only used during scavenge
0N/A}
0N/A
0N/A
0N/Asize_t DefNewGeneration::free() const {
0N/A return eden()->free()
0N/A + from()->free(); // to() is only used during scavenge
0N/A}
0N/A
0N/Asize_t DefNewGeneration::max_capacity() const {
0N/A const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
0N/A const size_t reserved_bytes = reserved().byte_size();
0N/A return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
0N/A}
0N/A
0N/Asize_t DefNewGeneration::unsafe_max_alloc_nogc() const {
0N/A return eden()->free();
0N/A}
0N/A
0N/Asize_t DefNewGeneration::capacity_before_gc() const {
0N/A return eden()->capacity();
0N/A}
0N/A
0N/Asize_t DefNewGeneration::contiguous_available() const {
0N/A return eden()->free();
0N/A}
0N/A
0N/A
0N/AHeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
0N/AHeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
0N/A
0N/Avoid DefNewGeneration::object_iterate(ObjectClosure* blk) {
0N/A eden()->object_iterate(blk);
0N/A from()->object_iterate(blk);
0N/A}
0N/A
0N/A
0N/Avoid DefNewGeneration::space_iterate(SpaceClosure* blk,
0N/A bool usedOnly) {
0N/A blk->do_space(eden());
0N/A blk->do_space(from());
0N/A blk->do_space(to());
0N/A}
0N/A
0N/A// The last collection bailed out, we are running out of heap space,
0N/A// so we try to allocate the from-space, too.
0N/AHeapWord* DefNewGeneration::allocate_from_space(size_t size) {
0N/A HeapWord* result = NULL;
0N/A if (PrintGC && Verbose) {
0N/A gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
0N/A " will_fail: %s"
0N/A " heap_lock: %s"
0N/A " free: " SIZE_FORMAT,
0N/A size,
0N/A GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
0N/A Heap_lock->is_locked() ? "locked" : "unlocked",
0N/A from()->free());
0N/A }
0N/A if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
0N/A if (Heap_lock->owned_by_self() ||
0N/A (SafepointSynchronize::is_at_safepoint() &&
0N/A Thread::current()->is_VM_thread())) {
0N/A // If the Heap_lock is not locked by this thread, this will be called
0N/A // again later with the Heap_lock held.
0N/A result = from()->allocate(size);
0N/A } else if (PrintGC && Verbose) {
0N/A gclog_or_tty->print_cr(" Heap_lock is not owned by self");
0N/A }
0N/A } else if (PrintGC && Verbose) {
0N/A gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
0N/A }
0N/A if (PrintGC && Verbose) {
0N/A gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
0N/A }
0N/A return result;
0N/A}
0N/A
0N/AHeapWord* DefNewGeneration::expand_and_allocate(size_t size,
0N/A bool is_tlab,
0N/A bool parallel) {
0N/A // We don't attempt to expand the young generation (but perhaps we should.)
0N/A return allocate(size, is_tlab);
0N/A}
0N/A
0N/A
0N/Avoid DefNewGeneration::collect(bool full,
0N/A bool clear_all_soft_refs,
0N/A size_t size,
0N/A bool is_tlab) {
0N/A assert(full || size > 0, "otherwise we don't want to collect");
0N/A GenCollectedHeap* gch = GenCollectedHeap::heap();
0N/A _next_gen = gch->next_gen(this);
0N/A assert(_next_gen != NULL,
0N/A "This must be the youngest gen, and not the only gen");
0N/A
0N/A // If the next generation is too full to accomodate promotion
0N/A // from this generation, pass on collection; let the next generation
0N/A // do it.
0N/A if (!collection_attempt_is_safe()) {
0N/A gch->set_incremental_collection_will_fail();
0N/A return;
0N/A }
0N/A assert(to()->is_empty(), "Else not collection_attempt_is_safe");
0N/A
0N/A init_assuming_no_promotion_failure();
0N/A
0N/A TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
0N/A // Capture heap used before collection (for printing).
0N/A size_t gch_prev_used = gch->used();
0N/A
0N/A SpecializationStats::clear();
0N/A
0N/A // These can be shared for all code paths
0N/A IsAliveClosure is_alive(this);
0N/A ScanWeakRefClosure scan_weak_ref(this);
0N/A
0N/A age_table()->clear();
263N/A to()->clear(SpaceDecorator::Mangle);
0N/A
0N/A gch->rem_set()->prepare_for_younger_refs_iterate(false);
0N/A
0N/A assert(gch->no_allocs_since_save_marks(0),
0N/A "save marks have not been newly set.");
0N/A
0N/A // Not very pretty.
0N/A CollectorPolicy* cp = gch->collector_policy();
0N/A
0N/A FastScanClosure fsc_with_no_gc_barrier(this, false);
0N/A FastScanClosure fsc_with_gc_barrier(this, true);
0N/A
0N/A set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
0N/A FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
0N/A &fsc_with_no_gc_barrier,
0N/A &fsc_with_gc_barrier);
0N/A
0N/A assert(gch->no_allocs_since_save_marks(0),
0N/A "save marks have not been newly set.");
0N/A
0N/A gch->gen_process_strong_roots(_level,
0N/A true, // Process younger gens, if any, as
0N/A // strong roots.
0N/A false,// not collecting permanent generation.
0N/A SharedHeap::SO_AllClasses,
0N/A &fsc_with_gc_barrier,
0N/A &fsc_with_no_gc_barrier);
0N/A
0N/A // "evacuate followers".
0N/A evacuate_followers.do_void();
0N/A
0N/A FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
453N/A ReferenceProcessor* rp = ref_processor();
457N/A rp->setup_policy(clear_all_soft_refs);
453N/A rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
453N/A NULL);
0N/A if (!promotion_failed()) {
0N/A // Swap the survivor spaces.
263N/A eden()->clear(SpaceDecorator::Mangle);
263N/A from()->clear(SpaceDecorator::Mangle);
263N/A if (ZapUnusedHeapArea) {
263N/A // This is now done here because of the piece-meal mangling which
263N/A // can check for valid mangling at intermediate points in the
263N/A // collection(s). When a minor collection fails to collect
263N/A // sufficient space resizing of the young generation can occur
263N/A // an redistribute the spaces in the young generation. Mangle
263N/A // here so that unzapped regions don't get distributed to
263N/A // other spaces.
263N/A to()->mangle_unused_area();
263N/A }
0N/A swap_spaces();
0N/A
0N/A assert(to()->is_empty(), "to space should be empty now");
0N/A
0N/A // Set the desired survivor size to half the real survivor space
0N/A _tenuring_threshold =
0N/A age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
0N/A
0N/A if (PrintGC && !PrintGCDetails) {
0N/A gch->print_heap_change(gch_prev_used);
0N/A }
0N/A } else {
0N/A assert(HandlePromotionFailure,
0N/A "Should not be here unless promotion failure handling is on");
0N/A assert(_promo_failure_scan_stack != NULL &&
0N/A _promo_failure_scan_stack->length() == 0, "post condition");
0N/A
0N/A // deallocate stack and it's elements
0N/A delete _promo_failure_scan_stack;
0N/A _promo_failure_scan_stack = NULL;
0N/A
0N/A remove_forwarding_pointers();
0N/A if (PrintGCDetails) {
0N/A gclog_or_tty->print(" (promotion failed)");
0N/A }
0N/A // Add to-space to the list of space to compact
0N/A // when a promotion failure has occurred. In that
0N/A // case there can be live objects in to-space
0N/A // as a result of a partial evacuation of eden
0N/A // and from-space.
0N/A swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect().
0N/A from()->set_next_compaction_space(to());
0N/A gch->set_incremental_collection_will_fail();
0N/A
0N/A // Reset the PromotionFailureALot counters.
0N/A NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
0N/A }
0N/A // set new iteration safe limit for the survivor spaces
0N/A from()->set_concurrent_iteration_safe_limit(from()->top());
0N/A to()->set_concurrent_iteration_safe_limit(to()->top());
0N/A SpecializationStats::print();
0N/A update_time_of_last_gc(os::javaTimeMillis());
0N/A}
0N/A
0N/Aclass RemoveForwardPointerClosure: public ObjectClosure {
0N/Apublic:
0N/A void do_object(oop obj) {
0N/A obj->init_mark();
0N/A }
0N/A};
0N/A
0N/Avoid DefNewGeneration::init_assuming_no_promotion_failure() {
0N/A _promotion_failed = false;
0N/A from()->set_next_compaction_space(NULL);
0N/A}
0N/A
0N/Avoid DefNewGeneration::remove_forwarding_pointers() {
0N/A RemoveForwardPointerClosure rspc;
0N/A eden()->object_iterate(&rspc);
0N/A from()->object_iterate(&rspc);
0N/A // Now restore saved marks, if any.
0N/A if (_objs_with_preserved_marks != NULL) {
0N/A assert(_preserved_marks_of_objs != NULL, "Both or none.");
0N/A assert(_objs_with_preserved_marks->length() ==
0N/A _preserved_marks_of_objs->length(), "Both or none.");
0N/A for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
0N/A oop obj = _objs_with_preserved_marks->at(i);
0N/A markOop m = _preserved_marks_of_objs->at(i);
0N/A obj->set_mark(m);
0N/A }
0N/A delete _objs_with_preserved_marks;
0N/A delete _preserved_marks_of_objs;
0N/A _objs_with_preserved_marks = NULL;
0N/A _preserved_marks_of_objs = NULL;
0N/A }
0N/A}
0N/A
0N/Avoid DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
0N/A if (m->must_be_preserved_for_promotion_failure(obj)) {
0N/A if (_objs_with_preserved_marks == NULL) {
0N/A assert(_preserved_marks_of_objs == NULL, "Both or none.");
0N/A _objs_with_preserved_marks = new (ResourceObj::C_HEAP)
0N/A GrowableArray<oop>(PreserveMarkStackSize, true);
0N/A _preserved_marks_of_objs = new (ResourceObj::C_HEAP)
0N/A GrowableArray<markOop>(PreserveMarkStackSize, true);
0N/A }
0N/A _objs_with_preserved_marks->push(obj);
0N/A _preserved_marks_of_objs->push(m);
0N/A }
0N/A}
0N/A
0N/Avoid DefNewGeneration::handle_promotion_failure(oop old) {
0N/A preserve_mark_if_necessary(old, old->mark());
0N/A // forward to self
0N/A old->forward_to(old);
0N/A _promotion_failed = true;
0N/A
0N/A push_on_promo_failure_scan_stack(old);
0N/A
0N/A if (!_promo_failure_drain_in_progress) {
0N/A // prevent recursion in copy_to_survivor_space()
0N/A _promo_failure_drain_in_progress = true;
0N/A drain_promo_failure_scan_stack();
0N/A _promo_failure_drain_in_progress = false;
0N/A }
0N/A}
0N/A
113N/Aoop DefNewGeneration::copy_to_survivor_space(oop old) {
0N/A assert(is_in_reserved(old) && !old->is_forwarded(),
0N/A "shouldn't be scavenging this oop");
0N/A size_t s = old->size();
0N/A oop obj = NULL;
0N/A
0N/A // Try allocating obj in to-space (unless too old)
0N/A if (old->age() < tenuring_threshold()) {
0N/A obj = (oop) to()->allocate(s);
0N/A }
0N/A
0N/A // Otherwise try allocating obj tenured
0N/A if (obj == NULL) {
113N/A obj = _next_gen->promote(old, s);
0N/A if (obj == NULL) {
0N/A if (!HandlePromotionFailure) {
0N/A // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
0N/A // is incorrectly set. In any case, its seriously wrong to be here!
0N/A vm_exit_out_of_memory(s*wordSize, "promotion");
0N/A }
0N/A
0N/A handle_promotion_failure(old);
0N/A return old;
0N/A }
0N/A } else {
0N/A // Prefetch beyond obj
0N/A const intx interval = PrefetchCopyIntervalInBytes;
0N/A Prefetch::write(obj, interval);
0N/A
0N/A // Copy obj
0N/A Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
0N/A
0N/A // Increment age if obj still in new generation
0N/A obj->incr_age();
0N/A age_table()->add(obj, s);
0N/A }
0N/A
0N/A // Done, insert forward pointer to obj in this header
0N/A old->forward_to(obj);
0N/A
0N/A return obj;
0N/A}
0N/A
0N/Avoid DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
0N/A if (_promo_failure_scan_stack == NULL) {
0N/A _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
0N/A GrowableArray<oop>(40, true);
0N/A }
0N/A
0N/A _promo_failure_scan_stack->push(obj);
0N/A}
0N/A
0N/Avoid DefNewGeneration::drain_promo_failure_scan_stack() {
0N/A assert(_promo_failure_scan_stack != NULL, "precondition");
0N/A
0N/A while (_promo_failure_scan_stack->length() > 0) {
0N/A oop obj = _promo_failure_scan_stack->pop();
0N/A obj->oop_iterate(_promo_failure_scan_stack_closure);
0N/A }
0N/A}
0N/A
0N/Avoid DefNewGeneration::save_marks() {
0N/A eden()->set_saved_mark();
0N/A to()->set_saved_mark();
0N/A from()->set_saved_mark();
0N/A}
0N/A
0N/A
0N/Avoid DefNewGeneration::reset_saved_marks() {
0N/A eden()->reset_saved_mark();
0N/A to()->reset_saved_mark();
0N/A from()->reset_saved_mark();
0N/A}
0N/A
0N/A
0N/Abool DefNewGeneration::no_allocs_since_save_marks() {
0N/A assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
0N/A assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
0N/A return to()->saved_mark_at_top();
0N/A}
0N/A
0N/A#define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
0N/A \
0N/Avoid DefNewGeneration:: \
0N/Aoop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
0N/A cl->set_generation(this); \
0N/A eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
0N/A to()->oop_since_save_marks_iterate##nv_suffix(cl); \
0N/A from()->oop_since_save_marks_iterate##nv_suffix(cl); \
0N/A cl->reset_generation(); \
0N/A save_marks(); \
0N/A}
0N/A
0N/AALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
0N/A
0N/A#undef DefNew_SINCE_SAVE_MARKS_DEFN
0N/A
0N/Avoid DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
0N/A size_t max_alloc_words) {
0N/A if (requestor == this || _promotion_failed) return;
0N/A assert(requestor->level() > level(), "DefNewGeneration must be youngest");
0N/A
0N/A /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
0N/A if (to_space->top() > to_space->bottom()) {
0N/A trace("to_space not empty when contribute_scratch called");
0N/A }
0N/A */
0N/A
0N/A ContiguousSpace* to_space = to();
0N/A assert(to_space->end() >= to_space->top(), "pointers out of order");
0N/A size_t free_words = pointer_delta(to_space->end(), to_space->top());
0N/A if (free_words >= MinFreeScratchWords) {
0N/A ScratchBlock* sb = (ScratchBlock*)to_space->top();
0N/A sb->num_words = free_words;
0N/A sb->next = list;
0N/A list = sb;
0N/A }
0N/A}
0N/A
263N/Avoid DefNewGeneration::reset_scratch() {
263N/A // If contributing scratch in to_space, mangle all of
263N/A // to_space if ZapUnusedHeapArea. This is needed because
263N/A // top is not maintained while using to-space as scratch.
263N/A if (ZapUnusedHeapArea) {
263N/A to()->mangle_unused_area_complete();
263N/A }
263N/A}
263N/A
0N/Abool DefNewGeneration::collection_attempt_is_safe() {
0N/A if (!to()->is_empty()) {
0N/A return false;
0N/A }
0N/A if (_next_gen == NULL) {
0N/A GenCollectedHeap* gch = GenCollectedHeap::heap();
0N/A _next_gen = gch->next_gen(this);
0N/A assert(_next_gen != NULL,
0N/A "This must be the youngest gen, and not the only gen");
0N/A }
0N/A
0N/A // Decide if there's enough room for a full promotion
0N/A // When using extremely large edens, we effectively lose a
0N/A // large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
0N/A // flag to reduce the minimum evacuation space requirements. If
0N/A // there is not enough space to evacuate eden during a scavenge,
0N/A // the VM will immediately exit with an out of memory error.
0N/A // This flag has not been tested
0N/A // with collectors other than simple mark & sweep.
0N/A //
0N/A // Note that with the addition of promotion failure handling, the
0N/A // VM will not immediately exit but will undo the young generation
0N/A // collection. The parameter is left here for compatibility.
0N/A const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
0N/A
0N/A // worst_case_evacuation is based on "used()". For the case where this
0N/A // method is called after a collection, this is still appropriate because
0N/A // the case that needs to be detected is one in which a full collection
0N/A // has been done and has overflowed into the young generation. In that
0N/A // case a minor collection will fail (the overflow of the full collection
0N/A // means there is no space in the old generation for any promotion).
0N/A size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
0N/A
0N/A return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
0N/A HandlePromotionFailure);
0N/A}
0N/A
0N/Avoid DefNewGeneration::gc_epilogue(bool full) {
0N/A // Check if the heap is approaching full after a collection has
0N/A // been done. Generally the young generation is empty at
0N/A // a minimum at the end of a collection. If it is not, then
0N/A // the heap is approaching full.
0N/A GenCollectedHeap* gch = GenCollectedHeap::heap();
0N/A clear_should_allocate_from_space();
0N/A if (collection_attempt_is_safe()) {
0N/A gch->clear_incremental_collection_will_fail();
0N/A } else {
0N/A gch->set_incremental_collection_will_fail();
0N/A if (full) { // we seem to be running out of space
0N/A set_should_allocate_from_space();
0N/A }
0N/A }
0N/A
263N/A if (ZapUnusedHeapArea) {
263N/A eden()->check_mangled_unused_area_complete();
263N/A from()->check_mangled_unused_area_complete();
263N/A to()->check_mangled_unused_area_complete();
263N/A }
263N/A
0N/A // update the generation and space performance counters
0N/A update_counters();
0N/A gch->collector_policy()->counters()->update_counters();
0N/A}
0N/A
263N/Avoid DefNewGeneration::record_spaces_top() {
263N/A assert(ZapUnusedHeapArea, "Not mangling unused space");
263N/A eden()->set_top_for_allocations();
263N/A to()->set_top_for_allocations();
263N/A from()->set_top_for_allocations();
263N/A}
263N/A
263N/A
0N/Avoid DefNewGeneration::update_counters() {
0N/A if (UsePerfData) {
0N/A _eden_counters->update_all();
0N/A _from_counters->update_all();
0N/A _to_counters->update_all();
0N/A _gen_counters->update_all();
0N/A }
0N/A}
0N/A
0N/Avoid DefNewGeneration::verify(bool allow_dirty) {
0N/A eden()->verify(allow_dirty);
0N/A from()->verify(allow_dirty);
0N/A to()->verify(allow_dirty);
0N/A}
0N/A
0N/Avoid DefNewGeneration::print_on(outputStream* st) const {
0N/A Generation::print_on(st);
0N/A st->print(" eden");
0N/A eden()->print_on(st);
0N/A st->print(" from");
0N/A from()->print_on(st);
0N/A st->print(" to ");
0N/A to()->print_on(st);
0N/A}
0N/A
0N/A
0N/Aconst char* DefNewGeneration::name() const {
0N/A return "def new generation";
0N/A}
113N/A
113N/A// Moved from inline file as they are not called inline
113N/ACompactibleSpace* DefNewGeneration::first_compaction_space() const {
113N/A return eden();
113N/A}
113N/A
113N/AHeapWord* DefNewGeneration::allocate(size_t word_size,
113N/A bool is_tlab) {
113N/A // This is the slow-path allocation for the DefNewGeneration.
113N/A // Most allocations are fast-path in compiled code.
113N/A // We try to allocate from the eden. If that works, we are happy.
113N/A // Note that since DefNewGeneration supports lock-free allocation, we
113N/A // have to use it here, as well.
113N/A HeapWord* result = eden()->par_allocate(word_size);
113N/A if (result != NULL) {
113N/A return result;
113N/A }
113N/A do {
113N/A HeapWord* old_limit = eden()->soft_end();
113N/A if (old_limit < eden()->end()) {
113N/A // Tell the next generation we reached a limit.
113N/A HeapWord* new_limit =
113N/A next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
113N/A if (new_limit != NULL) {
113N/A Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
113N/A } else {
113N/A assert(eden()->soft_end() == eden()->end(),
113N/A "invalid state after allocation_limit_reached returned null");
113N/A }
113N/A } else {
113N/A // The allocation failed and the soft limit is equal to the hard limit,
113N/A // there are no reasons to do an attempt to allocate
113N/A assert(old_limit == eden()->end(), "sanity check");
113N/A break;
113N/A }
113N/A // Try to allocate until succeeded or the soft limit can't be adjusted
113N/A result = eden()->par_allocate(word_size);
113N/A } while (result == NULL);
113N/A
113N/A // If the eden is full and the last collection bailed out, we are running
113N/A // out of heap space, and we try to allocate the from-space, too.
113N/A // allocate_from_space can't be inlined because that would introduce a
113N/A // circular dependency at compile time.
113N/A if (result == NULL) {
113N/A result = allocate_from_space(word_size);
113N/A }
113N/A return result;
113N/A}
113N/A
113N/AHeapWord* DefNewGeneration::par_allocate(size_t word_size,
113N/A bool is_tlab) {
113N/A return eden()->par_allocate(word_size);
113N/A}
113N/A
113N/Avoid DefNewGeneration::gc_prologue(bool full) {
113N/A // Ensure that _end and _soft_end are the same in eden space.
113N/A eden()->set_soft_end(eden()->end());
113N/A}
113N/A
113N/Asize_t DefNewGeneration::tlab_capacity() const {
113N/A return eden()->capacity();
113N/A}
113N/A
113N/Asize_t DefNewGeneration::unsafe_max_tlab_alloc() const {
113N/A return unsafe_max_alloc_nogc();
113N/A}