0N/A/*
1879N/A * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#include "precompiled.hpp"
1879N/A#include "gc_implementation/parallelScavenge/asPSYoungGen.hpp"
1879N/A#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psScavenge.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
1879N/A#include "gc_implementation/shared/gcUtil.hpp"
1879N/A#include "gc_implementation/shared/spaceDecorator.hpp"
1879N/A#include "oops/oop.inline.hpp"
1879N/A#include "runtime/java.hpp"
0N/A
0N/AASPSYoungGen::ASPSYoungGen(size_t init_byte_size,
0N/A size_t minimum_byte_size,
0N/A size_t byte_size_limit) :
0N/A PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
0N/A _gen_size_limit(byte_size_limit) {
0N/A}
0N/A
0N/A
0N/AASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs,
0N/A size_t init_byte_size,
0N/A size_t minimum_byte_size,
0N/A size_t byte_size_limit) :
0N/A //PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
0N/A PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit),
0N/A _gen_size_limit(byte_size_limit) {
0N/A
0N/A assert(vs->committed_size() == init_byte_size, "Cannot replace with");
0N/A
0N/A _virtual_space = vs;
0N/A}
0N/A
0N/Avoid ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
0N/A size_t alignment) {
0N/A assert(_init_gen_size != 0, "Should have a finite size");
0N/A _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
0N/A if (!_virtual_space->expand_by(_init_gen_size)) {
0N/A vm_exit_during_initialization("Could not reserve enough space for "
0N/A "object heap");
0N/A }
0N/A}
0N/A
0N/Avoid ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
0N/A initialize_virtual_space(rs, alignment);
0N/A initialize_work();
0N/A}
0N/A
0N/Asize_t ASPSYoungGen::available_for_expansion() {
0N/A
0N/A size_t current_committed_size = virtual_space()->committed_size();
0N/A assert((gen_size_limit() >= current_committed_size),
0N/A "generation size limit is wrong");
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
0N/A size_t result = gen_size_limit() - current_committed_size;
0N/A size_t result_aligned = align_size_down(result, heap->young_gen_alignment());
0N/A return result_aligned;
0N/A}
0N/A
0N/A// Return the number of bytes the young gen is willing give up.
0N/A//
0N/A// Future implementations could check the survivors and if to_space is in the
0N/A// right place (below from_space), take a chunk from to_space.
0N/Asize_t ASPSYoungGen::available_for_contraction() {
0N/A
0N/A size_t uncommitted_bytes = virtual_space()->uncommitted_size();
0N/A if (uncommitted_bytes != 0) {
0N/A return uncommitted_bytes;
0N/A }
0N/A
0N/A if (eden_space()->is_empty()) {
0N/A // Respect the minimum size for eden and for the young gen as a whole.
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
13N/A const size_t eden_alignment = heap->intra_heap_alignment();
0N/A const size_t gen_alignment = heap->young_gen_alignment();
0N/A
0N/A assert(eden_space()->capacity_in_bytes() >= eden_alignment,
0N/A "Alignment is wrong");
0N/A size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
0N/A eden_avail = align_size_down(eden_avail, gen_alignment);
0N/A
0N/A assert(virtual_space()->committed_size() >= min_gen_size(),
0N/A "minimum gen size is wrong");
0N/A size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
0N/A assert(virtual_space()->is_aligned(gen_avail), "not aligned");
0N/A
0N/A const size_t max_contraction = MIN2(eden_avail, gen_avail);
0N/A // See comment for ASPSOldGen::available_for_contraction()
0N/A // for reasons the "increment" fraction is used.
0N/A PSAdaptiveSizePolicy* policy = heap->size_policy();
0N/A size_t result = policy->eden_increment_aligned_down(max_contraction);
0N/A size_t result_aligned = align_size_down(result, gen_alignment);
0N/A if (PrintAdaptiveSizePolicy && Verbose) {
0N/A gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
0N/A result_aligned/K);
0N/A gclog_or_tty->print_cr(" max_contraction %d K", max_contraction/K);
0N/A gclog_or_tty->print_cr(" eden_avail %d K", eden_avail/K);
0N/A gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K);
0N/A }
0N/A return result_aligned;
0N/A
0N/A }
0N/A
0N/A return 0;
0N/A}
0N/A
0N/A// The current implementation only considers to the end of eden.
0N/A// If to_space is below from_space, to_space is not considered.
0N/A// to_space can be.
0N/Asize_t ASPSYoungGen::available_to_live() {
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
13N/A const size_t alignment = heap->intra_heap_alignment();
0N/A
0N/A // Include any space that is committed but is not in eden.
0N/A size_t available = pointer_delta(eden_space()->bottom(),
0N/A virtual_space()->low(),
0N/A sizeof(char));
0N/A
0N/A const size_t eden_capacity = eden_space()->capacity_in_bytes();
0N/A if (eden_space()->is_empty() && eden_capacity > alignment) {
0N/A available += eden_capacity - alignment;
0N/A }
0N/A return available;
0N/A}
0N/A
0N/A// Similar to PSYoungGen::resize_generation() but
0N/A// allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size
0N/A// expands at the low end of the virtual space
0N/A// moves the boundary between the generations in order to expand
0N/A// some additional diagnostics
0N/A// If no additional changes are required, this can be deleted
0N/A// and the changes factored back into PSYoungGen::resize_generation().
0N/Abool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
0N/A const size_t alignment = virtual_space()->alignment();
0N/A size_t orig_size = virtual_space()->committed_size();
0N/A bool size_changed = false;
0N/A
0N/A // There used to be a guarantee here that
0N/A // (eden_size + 2*survivor_size) <= _max_gen_size
0N/A // This requirement is enforced by the calculation of desired_size
0N/A // below. It may not be true on entry since the size of the
0N/A // eden_size is no bounded by the generation size.
0N/A
0N/A assert(max_size() == reserved().byte_size(), "max gen size problem?");
0N/A assert(min_gen_size() <= orig_size && orig_size <= max_size(),
0N/A "just checking");
0N/A
0N/A // Adjust new generation size
0N/A const size_t eden_plus_survivors =
0N/A align_size_up(eden_size + 2 * survivor_size, alignment);
0N/A size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
0N/A min_gen_size());
0N/A assert(desired_size <= gen_size_limit(), "just checking");
0N/A
0N/A if (desired_size > orig_size) {
0N/A // Grow the generation
0N/A size_t change = desired_size - orig_size;
263N/A HeapWord* prev_low = (HeapWord*) virtual_space()->low();
0N/A if (!virtual_space()->expand_by(change)) {
0N/A return false;
0N/A }
263N/A if (ZapUnusedHeapArea) {
263N/A // Mangle newly committed space immediately because it
263N/A // can be done here more simply that after the new
263N/A // spaces have been computed.
263N/A HeapWord* new_low = (HeapWord*) virtual_space()->low();
263N/A assert(new_low < prev_low, "Did not grow");
263N/A
263N/A MemRegion mangle_region(new_low, prev_low);
263N/A SpaceMangler::mangle_region(mangle_region);
263N/A }
0N/A size_changed = true;
0N/A } else if (desired_size < orig_size) {
0N/A size_t desired_change = orig_size - desired_size;
0N/A
0N/A // How much is available for shrinking.
0N/A size_t available_bytes = limit_gen_shrink(desired_change);
0N/A size_t change = MIN2(desired_change, available_bytes);
0N/A virtual_space()->shrink_by(change);
0N/A size_changed = true;
0N/A } else {
0N/A if (Verbose && PrintGC) {
0N/A if (orig_size == gen_size_limit()) {
0N/A gclog_or_tty->print_cr("ASPSYoung generation size at maximum: "
0N/A SIZE_FORMAT "K", orig_size/K);
0N/A } else if (orig_size == min_gen_size()) {
0N/A gclog_or_tty->print_cr("ASPSYoung generation size at minium: "
0N/A SIZE_FORMAT "K", orig_size/K);
0N/A }
0N/A }
0N/A }
0N/A
0N/A if (size_changed) {
0N/A reset_after_change();
0N/A if (Verbose && PrintGC) {
0N/A size_t current_size = virtual_space()->committed_size();
0N/A gclog_or_tty->print_cr("ASPSYoung generation size changed: "
0N/A SIZE_FORMAT "K->" SIZE_FORMAT "K",
0N/A orig_size/K, current_size/K);
0N/A }
0N/A }
0N/A
0N/A guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
0N/A virtual_space()->committed_size() == max_size(), "Sanity");
0N/A
0N/A return true;
0N/A}
0N/A
0N/A// Similar to PSYoungGen::resize_spaces() but
0N/A// eden always starts at the low end of the committed virtual space
0N/A// current implementation does not allow holes between the spaces
0N/A// _young_generation_boundary has to be reset because it changes.
0N/A// so additional verification
263N/A
0N/Avoid ASPSYoungGen::resize_spaces(size_t requested_eden_size,
0N/A size_t requested_survivor_size) {
263N/A assert(UseAdaptiveSizePolicy, "sanity check");
0N/A assert(requested_eden_size > 0 && requested_survivor_size > 0,
0N/A "just checking");
0N/A
0N/A space_invariants();
0N/A
0N/A // We require eden and to space to be empty
0N/A if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
0N/A return;
0N/A }
0N/A
0N/A if (PrintAdaptiveSizePolicy && Verbose) {
0N/A gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
0N/A SIZE_FORMAT
0N/A ", requested_survivor_size: " SIZE_FORMAT ")",
0N/A requested_eden_size, requested_survivor_size);
0N/A gclog_or_tty->print_cr(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
0N/A SIZE_FORMAT,
0N/A eden_space()->bottom(),
0N/A eden_space()->end(),
0N/A pointer_delta(eden_space()->end(),
0N/A eden_space()->bottom(),
0N/A sizeof(char)));
0N/A gclog_or_tty->print_cr(" from: [" PTR_FORMAT ".." PTR_FORMAT ") "
0N/A SIZE_FORMAT,
0N/A from_space()->bottom(),
0N/A from_space()->end(),
0N/A pointer_delta(from_space()->end(),
0N/A from_space()->bottom(),
0N/A sizeof(char)));
0N/A gclog_or_tty->print_cr(" to: [" PTR_FORMAT ".." PTR_FORMAT ") "
0N/A SIZE_FORMAT,
0N/A to_space()->bottom(),
0N/A to_space()->end(),
0N/A pointer_delta( to_space()->end(),
0N/A to_space()->bottom(),
0N/A sizeof(char)));
0N/A }
0N/A
0N/A // There's nothing to do if the new sizes are the same as the current
0N/A if (requested_survivor_size == to_space()->capacity_in_bytes() &&
0N/A requested_survivor_size == from_space()->capacity_in_bytes() &&
0N/A requested_eden_size == eden_space()->capacity_in_bytes()) {
0N/A if (PrintAdaptiveSizePolicy && Verbose) {
0N/A gclog_or_tty->print_cr(" capacities are the right sizes, returning");
0N/A }
0N/A return;
0N/A }
0N/A
0N/A char* eden_start = (char*)virtual_space()->low();
0N/A char* eden_end = (char*)eden_space()->end();
0N/A char* from_start = (char*)from_space()->bottom();
0N/A char* from_end = (char*)from_space()->end();
0N/A char* to_start = (char*)to_space()->bottom();
0N/A char* to_end = (char*)to_space()->end();
0N/A
0N/A assert(eden_start < from_start, "Cannot push into from_space");
0N/A
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
13N/A const size_t alignment = heap->intra_heap_alignment();
263N/A const bool maintain_minimum =
263N/A (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
0N/A
263N/A bool eden_from_to_order = from_start < to_start;
0N/A // Check whether from space is below to space
263N/A if (eden_from_to_order) {
0N/A // Eden, from, to
263N/A
0N/A if (PrintAdaptiveSizePolicy && Verbose) {
0N/A gclog_or_tty->print_cr(" Eden, from, to:");
0N/A }
0N/A
0N/A // Set eden
263N/A // "requested_eden_size" is a goal for the size of eden
263N/A // and may not be attainable. "eden_size" below is
263N/A // calculated based on the location of from-space and
263N/A // the goal for the size of eden. from-space is
263N/A // fixed in place because it contains live data.
263N/A // The calculation is done this way to avoid 32bit
263N/A // overflow (i.e., eden_start + requested_eden_size
263N/A // may too large for representation in 32bits).
263N/A size_t eden_size;
263N/A if (maintain_minimum) {
263N/A // Only make eden larger than the requested size if
263N/A // the minimum size of the generation has to be maintained.
263N/A // This could be done in general but policy at a higher
263N/A // level is determining a requested size for eden and that
263N/A // should be honored unless there is a fundamental reason.
263N/A eden_size = pointer_delta(from_start,
263N/A eden_start,
263N/A sizeof(char));
263N/A } else {
263N/A eden_size = MIN2(requested_eden_size,
263N/A pointer_delta(from_start, eden_start, sizeof(char)));
263N/A }
263N/A
0N/A eden_end = eden_start + eden_size;
1409N/A assert(eden_end >= eden_start, "addition overflowed");
0N/A
0N/A // To may resize into from space as long as it is clear of live data.
0N/A // From space must remain page aligned, though, so we need to do some
0N/A // extra calculations.
0N/A
0N/A // First calculate an optimal to-space
0N/A to_end = (char*)virtual_space()->high();
0N/A to_start = (char*)pointer_delta(to_end,
0N/A (char*)requested_survivor_size,
0N/A sizeof(char));
0N/A
0N/A // Does the optimal to-space overlap from-space?
0N/A if (to_start < (char*)from_space()->end()) {
0N/A assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
0N/A
0N/A // Calculate the minimum offset possible for from_end
0N/A size_t from_size =
0N/A pointer_delta(from_space()->top(), from_start, sizeof(char));
0N/A
0N/A // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
0N/A if (from_size == 0) {
0N/A from_size = alignment;
0N/A } else {
0N/A from_size = align_size_up(from_size, alignment);
0N/A }
0N/A
0N/A from_end = from_start + from_size;
0N/A assert(from_end > from_start, "addition overflow or from_size problem");
0N/A
0N/A guarantee(from_end <= (char*)from_space()->end(),
0N/A "from_end moved to the right");
0N/A
0N/A // Now update to_start with the new from_end
0N/A to_start = MAX2(from_end, to_start);
0N/A }
0N/A
0N/A guarantee(to_start != to_end, "to space is zero sized");
0N/A
0N/A if (PrintAdaptiveSizePolicy && Verbose) {
0N/A gclog_or_tty->print_cr(" [eden_start .. eden_end): "
0N/A "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
0N/A eden_start,
0N/A eden_end,
0N/A pointer_delta(eden_end, eden_start, sizeof(char)));
0N/A gclog_or_tty->print_cr(" [from_start .. from_end): "
0N/A "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
0N/A from_start,
0N/A from_end,
0N/A pointer_delta(from_end, from_start, sizeof(char)));
0N/A gclog_or_tty->print_cr(" [ to_start .. to_end): "
0N/A "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
0N/A to_start,
0N/A to_end,
0N/A pointer_delta( to_end, to_start, sizeof(char)));
0N/A }
0N/A } else {
0N/A // Eden, to, from
0N/A if (PrintAdaptiveSizePolicy && Verbose) {
0N/A gclog_or_tty->print_cr(" Eden, to, from:");
0N/A }
0N/A
0N/A // To space gets priority over eden resizing. Note that we position
0N/A // to space as if we were able to resize from space, even though from
0N/A // space is not modified.
0N/A // Giving eden priority was tried and gave poorer performance.
0N/A to_end = (char*)pointer_delta(virtual_space()->high(),
0N/A (char*)requested_survivor_size,
0N/A sizeof(char));
0N/A to_end = MIN2(to_end, from_start);
0N/A to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
0N/A sizeof(char));
0N/A // if the space sizes are to be increased by several times then
0N/A // 'to_start' will point beyond the young generation. In this case
0N/A // 'to_start' should be adjusted.
0N/A to_start = MAX2(to_start, eden_start + alignment);
0N/A
0N/A // Compute how big eden can be, then adjust end.
263N/A // See comments above on calculating eden_end.
263N/A size_t eden_size;
263N/A if (maintain_minimum) {
263N/A eden_size = pointer_delta(to_start, eden_start, sizeof(char));
263N/A } else {
263N/A eden_size = MIN2(requested_eden_size,
263N/A pointer_delta(to_start, eden_start, sizeof(char)));
263N/A }
0N/A eden_end = eden_start + eden_size;
1409N/A assert(eden_end >= eden_start, "addition overflowed");
0N/A
0N/A // Don't let eden shrink down to 0 or less.
0N/A eden_end = MAX2(eden_end, eden_start + alignment);
0N/A to_start = MAX2(to_start, eden_end);
0N/A
0N/A if (PrintAdaptiveSizePolicy && Verbose) {
0N/A gclog_or_tty->print_cr(" [eden_start .. eden_end): "
0N/A "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
0N/A eden_start,
0N/A eden_end,
0N/A pointer_delta(eden_end, eden_start, sizeof(char)));
0N/A gclog_or_tty->print_cr(" [ to_start .. to_end): "
0N/A "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
0N/A to_start,
0N/A to_end,
0N/A pointer_delta( to_end, to_start, sizeof(char)));
0N/A gclog_or_tty->print_cr(" [from_start .. from_end): "
0N/A "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
0N/A from_start,
0N/A from_end,
0N/A pointer_delta(from_end, from_start, sizeof(char)));
0N/A }
0N/A }
0N/A
0N/A
0N/A guarantee((HeapWord*)from_start <= from_space()->bottom(),
0N/A "from start moved to the right");
0N/A guarantee((HeapWord*)from_end >= from_space()->top(),
0N/A "from end moved into live data");
0N/A assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
0N/A assert(is_object_aligned((intptr_t)from_start), "checking alignment");
0N/A assert(is_object_aligned((intptr_t)to_start), "checking alignment");
0N/A
0N/A MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
0N/A MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
0N/A MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
0N/A
0N/A // Let's make sure the call to initialize doesn't reset "top"!
0N/A DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
0N/A
0N/A // For PrintAdaptiveSizePolicy block below
0N/A size_t old_from = from_space()->capacity_in_bytes();
0N/A size_t old_to = to_space()->capacity_in_bytes();
0N/A
263N/A if (ZapUnusedHeapArea) {
263N/A // NUMA is a special case because a numa space is not mangled
263N/A // in order to not prematurely bind its address to memory to
263N/A // the wrong memory (i.e., don't want the GC thread to first
263N/A // touch the memory). The survivor spaces are not numa
263N/A // spaces and are mangled.
263N/A if (UseNUMA) {
263N/A if (eden_from_to_order) {
263N/A mangle_survivors(from_space(), fromMR, to_space(), toMR);
263N/A } else {
263N/A mangle_survivors(to_space(), toMR, from_space(), fromMR);
263N/A }
263N/A }
263N/A
263N/A // If not mangling the spaces, do some checking to verify that
263N/A // the spaces are already mangled.
263N/A // The spaces should be correctly mangled at this point so
263N/A // do some checking here. Note that they are not being mangled
263N/A // in the calls to initialize().
263N/A // Must check mangling before the spaces are reshaped. Otherwise,
263N/A // the bottom or end of one space may have moved into an area
263N/A // covered by another space and a failure of the check may
263N/A // not correctly indicate which space is not properly mangled.
263N/A
263N/A HeapWord* limit = (HeapWord*) virtual_space()->high();
263N/A eden_space()->check_mangled_unused_area(limit);
263N/A from_space()->check_mangled_unused_area(limit);
263N/A to_space()->check_mangled_unused_area(limit);
263N/A }
263N/A // When an existing space is being initialized, it is not
263N/A // mangled because the space has been previously mangled.
263N/A eden_space()->initialize(edenMR,
263N/A SpaceDecorator::Clear,
263N/A SpaceDecorator::DontMangle);
263N/A to_space()->initialize(toMR,
263N/A SpaceDecorator::Clear,
263N/A SpaceDecorator::DontMangle);
263N/A from_space()->initialize(fromMR,
263N/A SpaceDecorator::DontClear,
263N/A SpaceDecorator::DontMangle);
263N/A
0N/A PSScavenge::set_young_generation_boundary(eden_space()->bottom());
0N/A
0N/A assert(from_space()->top() == old_from_top, "from top changed!");
0N/A
0N/A if (PrintAdaptiveSizePolicy) {
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
0N/A assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
0N/A
0N/A gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
0N/A "collection: %d "
0N/A "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
0N/A "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
0N/A heap->total_collections(),
0N/A old_from, old_to,
0N/A from_space()->capacity_in_bytes(),
0N/A to_space()->capacity_in_bytes());
0N/A gclog_or_tty->cr();
0N/A }
0N/A space_invariants();
0N/A}
0N/Avoid ASPSYoungGen::reset_after_change() {
0N/A assert_locked_or_safepoint(Heap_lock);
0N/A
0N/A _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
0N/A (HeapWord*)virtual_space()->high_boundary());
0N/A PSScavenge::reference_processor()->set_span(_reserved);
0N/A
0N/A HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
0N/A HeapWord* eden_bottom = eden_space()->bottom();
0N/A if (new_eden_bottom != eden_bottom) {
0N/A MemRegion eden_mr(new_eden_bottom, eden_space()->end());
263N/A eden_space()->initialize(eden_mr,
263N/A SpaceDecorator::Clear,
263N/A SpaceDecorator::Mangle);
0N/A PSScavenge::set_young_generation_boundary(eden_space()->bottom());
0N/A }
0N/A MemRegion cmr((HeapWord*)virtual_space()->low(),
0N/A (HeapWord*)virtual_space()->high());
0N/A Universe::heap()->barrier_set()->resize_covered_region(cmr);
0N/A
0N/A space_invariants();
0N/A}