/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psPermGen.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "gc_interface/gcCause.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/referenceProcessor.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
#include "services/memoryService.hpp"
#include "utilities/events.hpp"
#include "utilities/stack.inline.hpp"
}
// This method contains all heap specific policy for invoking mark sweep.
// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
// the heap. It will do nothing further. If we need to bail out for policy
// reasons, scavenge before full gc, or any other specialized behavior, it
// needs to be added here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
//
// Note that the all_soft_refs_clear flag in the collector policy
// may be true because this method can be called without intervening
// activity. For example when the heap space is tight and full measure
// are being taken to free space.
if (ScavengeBeforeFullGC) {
}
const bool clear_all_soft_refs =
}
// This method contains no policy. You should probably
// be calling invoke() instead.
if (GC_locker::check_active_before_gc()) {
return false;
}
// The scope of casr should end after code that can change
// CollectorPolicy::_should_clear_all_soft_refs.
// Increment the invocation count
// Save information needed to minimize mangling
// We need to track unique mark sweep invocations as well.
// Fill in TLABs
}
// Verify object start arrays
if (VerifyObjectStartArray &&
}
// Filled in below to track the state of the young gen after the collection.
bool eden_empty;
bool survivors_empty;
bool young_gen_empty;
{
// Let the size policy know we're starting
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
CodeCache::gc_prologue();
Threads::gc_prologue();
// Capture heap size before collection for printing.
// Capture perm gen size before collection for sizing.
// For PrintGCDetails
// Don't add any more derived pointers during phase3
if (ZapUnusedHeapArea) {
// Do a complete mangle (top to end) because the usage for
// scratch does not maintain a top pointer.
}
if (!eden_empty) {
}
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
if (young_gen_empty) {
} else {
}
}
Threads::gc_epilogue();
CodeCache::gc_epilogue();
// Update time of last GC
// Let the size policy know we're done
if (UseAdaptiveSizePolicy) {
if (PrintAdaptiveSizePolicy) {
gclog_or_tty->stamp();
heap->total_collections());
if (Verbose) {
" perm_gen_capacity: %d ",
}
}
// Don't check if the size_policy is ready here. Let
// the size_policy check that internally.
// Calculate optimal free space amounts
"Sizes of space in young gen are out-of-bounds");
old_gen->used_in_bytes(),
old_gen->max_gen_size(),
true /* full gc*/,
heap->collector_policy());
// Don't resize the young generation at an major collection. A
// desired young generation size may have been calculated but
// resizing the young generation complicates the code because the
// resizing of the old generation may have moved the boundary
// between the young generation and the old generation. Let the
// young generation resizing happen at the minor collections.
}
if (PrintAdaptiveSizePolicy) {
heap->total_collections());
}
}
if (UsePerfData) {
old_gen->capacity_in_bytes());
}
heap->resize_all_tlabs();
// We collected the perm gen, so we'll resize it here.
if (PrintGC) {
if (PrintGCDetails) {
// Don't print a GC timestamp here. This is after the GC so
// would be confusing.
}
// Do perm gen after heap becase prev_used does
// not include the perm gen (done this way in the other
// collectors).
if (PrintGCDetails) {
}
}
// Track memory usage and detect low memory
heap->update_counters();
}
}
// Re-verify object start arrays
if (VerifyObjectStartArray &&
}
if (ZapUnusedHeapArea) {
}
#ifdef TRACESPINNING
#endif
return true;
}
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
return false;
}
// Both generations must be completely committed.
return false;
}
return false;
}
// Figure out how much to take from eden. Include the average amount promoted
// in the total; otherwise the next young gen GC will simply bail out to a
// full GC.
if (absorb_size >= eden_capacity) {
return false; // Must leave some space in eden.
}
return false; // Respect young gen minimum size.
}
if (TraceAdaptiveGCBoundary && Verbose) {
absorb_size / K,
}
// Fill the unused part of the old gen.
if (unused_words > 0) {
return false; // If the old gen cannot be filled, must give up.
}
}
// Take the live data from eden and set both top and end in the old gen to
// eden top. (Need to set end because reset_after_change() mangles the region
// from end to virtual_space->high() in debug builds).
// Update the object start array for the filler object and the data from eden.
}
// Could update the promoted average here, but it is not typically updated at
// full GCs and the value to use is unclear. Something like
//
// cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
return true;
}
_preserved_count = 0;
// We want to calculate the size in bytes first.
// Now divide by the size of a PreservedMark
_preserved_count_max /= sizeof(PreservedMark);
}
_preserved_mark_stack.clear(true);
_preserved_oop_stack.clear(true);
_objarray_stack.clear(true);
_revisit_klass_stack.clear(true);
_revisit_mdo_stack.clear(true);
}
// Recursively traverse all live objects and mark them
trace(" 1");
// General strong roots.
{
//CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
}
// Flush marking stack.
follow_stack();
// Process reference objects found during marking
{
}
// Follow system dictionary roots and unload classes
// Follow code cache roots
follow_stack(); // Flush marking stack
// Update subklass/sibling/implementor links of live klasses
// Visit memoized mdo's and clear unmarked weak refs
// Visit interned string tables and delete unmarked oops
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
trace("2");
// Now all live objects are marked, compute the new object addresses.
// It is imperative that we traverse perm_gen LAST. If dead space is
// allowed a range of dead object may get overwritten by a dead int
// array. If perm_gen is not traversed last a klassOop may get
// overwritten. This is fine since it is dead, but if the class has dead
// instances we have to skip them, and in order to find their size we
// need the klassOop!
//
// It is not required that we traverse spaces in the same order in
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4.
// Begin compacting into the old gen
// This will also compact the young gen spaces.
old_gen->precompact();
// Compact the perm gen into the perm gen
perm_gen->precompact();
}
// This should be moved to the shared markSweep code!
public:
};
// Adjust the pointers to reflect the new locations
trace("3");
// General strong roots.
// SO_AllClasses
//CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
// Global (weak) JNI handles
adjust_marks();
}
EventMark m("4 compact heap");
trace("4");
// All pointers are now adjusted, move objects accordingly
// It is imperative that we traverse perm_gen first in phase4. All
// classes must be allocated earlier than their instances, and traversing
// perm_gen first makes sure that all klassOops have moved to their new
// location before any instance does a dispatch through it's klass!
}
// We need a monotonically non-deccreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
// XXX See note in genCollectedHeap::millis_since_last_gc().
if (ret_val < 0) {
return 0;
}
return ret_val;
}
// We need a monotonically non-deccreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
}