/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
#include "gc_implementation/parallelScavenge/psTasks.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "gc_interface/gcCause.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/referenceProcessor.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.psgc.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memoryService.hpp"
#include "utilities/stack.inline.hpp"
// Define before use
public:
assert(false, "Do not call.");
}
}
};
protected:
public:
}
template <class T> void do_oop_work(T* p) {
"expected an oop while scanning weak refs");
// Weak refs may be visited more than once.
}
}
};
private:
public:
virtual void do_void() {
_promotion_manager->drain_stacks(true);
"stacks should be empty at this point");
}
};
if (obj->is_forwarded()) {
}
}
};
public:
{ }
private:
};
{
}
public:
{ }
{
}
};
};
{
}
}
}
manager->execute_and_wait(q);
}
{
}
manager->execute_and_wait(q);
}
// This method contains all heap specific policy for invoking scavenge.
// PSScavenge::invoke_no_policy() will do nothing but attempt to
// scavenge. It will not clean up after failed promotions, bail out if
// we've exceeded policy time limits, or any other special behavior.
// All such policy should be placed here.
//
// Note that this method should only be called from the vm_thread while
// at a safepoint!
bool full_gc_done = false;
if (UsePerfData) {
}
if (need_full_gc) {
if (UseParallelOldGC) {
} else {
}
}
return full_gc_done;
}
// This method contains no policy. You should probably
// be calling invoke() instead.
if (GC_locker::check_active_before_gc()) {
return false;
}
// Check for potential problems.
if (!should_attempt_scavenge()) {
return false;
}
bool promotion_failure_occurred = false;
// Gather the feedback data for eden occupancy.
}
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
}
// Fill in TLABs
}
{
// Let the size policy know we're starting
// Verify the object start arrays.
if (VerifyObjectStartArray &&
}
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
}
if (!ScavengeWithObjectsInToSpace) {
"Attempt to scavenge with live objects in to_space");
} else if (ZapUnusedHeapArea) {
}
reference_processor()->setup_policy(false);
// We track how much was promoted to the next generation for
// the AdaptiveSizePolicy.
// For PrintGCDetails
// Reset our survivor overflow.
set_survivor_overflow(false);
// creating the promotion_manager. We pass the top
// values to the card_table, to prevent it from
// straying into the promotion labs.
// Release all previously held resources
// Set the number of GC threads to be used in this collection
// Get the active number of workers here and use that value
// throughout the methods.
// We'll use the promotion manager again later.
{
for(uint i=0; i < stripe_total; i++) {
}
// We scan the thread roots in parallel
if (active_workers > 1) {
for (uint j = 0; j < active_workers; j++) {
}
}
gc_task_manager()->execute_and_wait(q);
}
// Process reference objects discovered during scavenge
{
if (reference_processor()->processing_is_mt()) {
&_gc_timer);
} else {
}
// Enqueue reference objects discovered during scavenge.
if (reference_processor()->processing_is_mt()) {
} else {
}
}
if (!JavaObjectsInPerm) {
// Unlink any dead interned Strings
// Process the remaining live ones
}
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
if (promotion_failure_occurred) {
if (PrintGC) {
}
}
// Let the size policy know we're done. Note that we count promotion
// failure cleanup time as part of the collection (otherwise, we're
// implicitly saying it's mutator time).
if (!promotion_failure_occurred) {
// Swap the survivor spaces.
young_gen->swap_spaces();
// A successful scavenge should restart the GC time limit count which is
// for full GC's.
if (UseAdaptiveSizePolicy) {
// Calculate the new survivor size and tenuring threshold
if (PrintAdaptiveSizePolicy) {
gclog_or_tty->stamp();
heap->total_collections());
if (Verbose) {
" perm_gen_capacity: %d ",
}
}
if (UsePerfData) {
}
if (PrintTenuringDistribution) {
gclog_or_tty->cr();
}
if (UsePerfData) {
}
// Do call at minor collections?
// Don't check if the size_policy is ready at this
// level. Let the size_policy check that internally.
if (UseAdaptiveSizePolicy &&
// Calculate optimial free space amounts
"Sizes of space in young gen are out-of-bounds");
old_gen->used_in_bytes(),
old_gen->max_gen_size(),
false /* full gc*/,
heap->collector_policy());
}
// Resize the young generation at every collection
// even if new sizes have not been calculated. This is
// to allow resizes that may have been inhibited by the
// relative location of the "to" and "from" spaces.
// Resizing the old gen at minor collects can cause increases
// that don't feed back to the generation sizing policy until
// a major collection. Don't resize the old gen here.
if (PrintAdaptiveSizePolicy) {
heap->total_collections());
}
}
// Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
// cause the change of the heap layout. Make sure eden is reshaped if that's the case.
// Also update() will case adaptive NUMA chunk resizing.
heap->resize_all_tlabs();
}
{
}
// Re-verify object start arrays
if (VerifyObjectStartArray &&
}
// Verify all old -> young cards are now precise
if (VerifyRememberedSets) {
// Precise verification will give false positives. Until this is fixed,
// use imprecise verification.
// CardTableExtension::verify_all_young_refs_precise();
}
if (PrintGC) {
if (PrintGCDetails) {
// Don't print a GC timestamp here. This is after the GC so
// would be confusing.
}
}
// Track memory usage and detect low memory
heap->update_counters();
}
}
if (ZapUnusedHeapArea) {
}
if (PrintGCTaskTimeStamps) {
scavenge_exit.ticks());
}
#ifdef TRACESPINNING
#endif
return !promotion_failure_occurred;
}
// This method iterates over all objects in the young generation,
// unforwarding markOops. It then restores any preserved mark oops,
// and clears the _preserved_mark_stack.
{
// Unforward all pointers in the young gen.
}
// Restore any saved marks.
while (!_preserved_oop_stack.is_empty()) {
}
// Clear the preserved mark and oop stack caches.
_preserved_mark_stack.clear(true);
_preserved_oop_stack.clear(true);
}
// Reset the PromotionFailureALot counters.
}
// This method is called whenever an attempt to promote an object
// fails. Some markOops will need preservation, some will not. Note
// that the entire eden is traversed after a failed promotion, with
// all forwarded headers replaced by the default markOop. This means
// it is not necessary to preserve most markOops.
// Should use per-worker private stacks here rather than
// locking a common pair of stacks.
}
}
if (UsePerfData) {
}
if (!ScavengeWithObjectsInToSpace) {
// Do not attempt to promote unless to_space is empty
if (UsePerfData) {
}
return false;
}
}
// Test to see if the scavenge will likely fail.
// A similar test is done in the policy's should_full_GC(). If this is
// changed, decide if that test should also be changed.
if (PrintGCDetails && Verbose) {
" padded_average_promoted " SIZE_FORMAT
" free in old gen " SIZE_FORMAT,
old_gen->free_in_bytes());
if (young_gen->used_in_bytes() <
}
}
if (result) {
} else {
if (UsePerfData) {
}
}
return result;
}
// Used to add tasks
"shouldn't return NULL");
return ParallelScavengeHeap::gc_task_manager();
}
// Arguments must have been parsed
if (AlwaysTenure) {
_tenuring_threshold = 0;
} else if (NeverTenure) {
} else {
// We want to smooth out our startup times for the AdaptiveSizePolicy
}
// Set boundary between young_gen and old_gen
"perm above old");
"old above young");
// Initialize ref handling object for scavenging.
(int) ParallelGCThreads, // mt processing degree
true, // mt discovery
(int) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
NULL, // header provides liveness info
false); // next field updates do not need write barrier
// Cache the cardtable
}