0N/A/*
2348N/A * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#include "precompiled.hpp"
1879N/A#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psPermGen.hpp"
1879N/A#include "gc_implementation/shared/gcUtil.hpp"
1879N/A#include "gc_implementation/shared/markSweep.inline.hpp"
1879N/A#include "oops/markOop.inline.hpp"
0N/A
0N/APSPermGen::PSPermGen(ReservedSpace rs, size_t alignment,
0N/A size_t initial_size, size_t min_size, size_t max_size,
0N/A const char* gen_name, int level) :
0N/A PSOldGen(rs, alignment, initial_size, min_size, max_size, gen_name, level),
0N/A _last_used(0)
0N/A{
0N/A assert(object_mark_sweep() != NULL, "Sanity");
0N/A
0N/A object_mark_sweep()->set_allowed_dead_ratio(PermMarkSweepDeadRatio);
0N/A _avg_size = new AdaptivePaddedAverage(AdaptivePermSizeWeight,
0N/A PermGenPadding);
0N/A}
0N/A
0N/AHeapWord* PSPermGen::allocate_permanent(size_t size) {
0N/A assert_locked_or_safepoint(Heap_lock);
2599N/A HeapWord* obj = allocate_noexpand(size);
0N/A
0N/A if (obj == NULL) {
2599N/A obj = expand_and_allocate(size);
0N/A }
0N/A
0N/A return obj;
0N/A}
0N/A
0N/Avoid PSPermGen::compute_new_size(size_t used_before_collection) {
0N/A // Update our padded average of objects allocated in perm
0N/A // gen between collections.
0N/A assert(used_before_collection >= _last_used,
0N/A "negative allocation amount since last GC?");
0N/A
0N/A const size_t alloc_since_last_gc = used_before_collection - _last_used;
0N/A _avg_size->sample(alloc_since_last_gc);
0N/A
0N/A const size_t current_live = used_in_bytes();
0N/A // Stash away the current amount live for the next call to this method.
0N/A _last_used = current_live;
0N/A
0N/A // We have different alignment constraints than the rest of the heap.
0N/A const size_t alignment = MAX2(MinPermHeapExpansion,
0N/A virtual_space()->alignment());
0N/A
0N/A // Compute the desired size:
0N/A // The free space is the newly computed padded average,
0N/A // so the desired size is what's live + the free space.
0N/A size_t desired_size = current_live + (size_t)_avg_size->padded_average();
0N/A desired_size = align_size_up(desired_size, alignment);
0N/A
0N/A // ...and no larger or smaller than our max and min allowed.
0N/A desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size);
0N/A assert(desired_size <= _max_gen_size, "just checking");
0N/A
0N/A const size_t size_before = _virtual_space->committed_size();
0N/A
0N/A if (desired_size == size_before) {
0N/A // no change, we're done
0N/A return;
0N/A }
0N/A
0N/A {
0N/A // We'll be growing or shrinking the heap: in either case,
0N/A // we need to hold a lock.
0N/A MutexLocker x(ExpandHeap_lock);
0N/A if (desired_size > size_before) {
0N/A const size_t change_bytes = desired_size - size_before;
0N/A const size_t aligned_change_bytes =
0N/A align_size_up(change_bytes, alignment);
0N/A expand_by(aligned_change_bytes);
0N/A } else {
0N/A // Shrinking
0N/A const size_t change_bytes =
0N/A size_before - desired_size;
0N/A const size_t aligned_change_bytes = align_size_down(change_bytes, alignment);
0N/A shrink(aligned_change_bytes);
0N/A }
0N/A }
0N/A
0N/A // While this code isn't controlled by AdaptiveSizePolicy, it's
0N/A // convenient to see all resizing decsions under the same flag.
0N/A if (PrintAdaptiveSizePolicy) {
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
0N/A assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
0N/A
0N/A gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: "
0N/A "collection: %d "
0N/A "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
0N/A heap->total_collections(),
0N/A size_before, _virtual_space->committed_size());
0N/A }
0N/A}
0N/A
0N/Avoid PSPermGen::precompact() {
0N/A // Reset start array first.
0N/A _start_array.reset();
0N/A object_mark_sweep()->precompact();
0N/A}