psCompactionManager.cpp revision 375
0N/A/*
0N/A * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
0N/A * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
0N/A * CA 95054 USA or visit www.sun.com if you need additional information or
0N/A * have any questions.
0N/A *
0N/A */
0N/A
0N/A#include "incls/_precompiled.incl"
0N/A#include "incls/_psCompactionManager.cpp.incl"
0N/A
0N/APSOldGen* ParCompactionManager::_old_gen = NULL;
0N/AParCompactionManager** ParCompactionManager::_manager_array = NULL;
0N/AOopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
0N/AObjectStartArray* ParCompactionManager::_start_array = NULL;
0N/AParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
375N/ARegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
0N/A
0N/AParCompactionManager::ParCompactionManager() :
0N/A _action(CopyAndUpdate) {
0N/A
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
0N/A assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
0N/A
0N/A _old_gen = heap->old_gen();
0N/A _start_array = old_gen()->start_array();
0N/A
0N/A
0N/A marking_stack()->initialize();
0N/A
0N/A // We want the overflow stack to be permanent
0N/A _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
375N/A#ifdef USE_RegionTaskQueueWithOverflow
375N/A region_stack()->initialize();
0N/A#else
375N/A region_stack()->initialize();
0N/A
0N/A // We want the overflow stack to be permanent
375N/A _region_overflow_stack =
0N/A new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
0N/A#endif
0N/A
0N/A // Note that _revisit_klass_stack is allocated out of the
0N/A // C heap (as opposed to out of ResourceArena).
0N/A int size =
0N/A (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
0N/A _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
0N/A
0N/A}
0N/A
0N/AParCompactionManager::~ParCompactionManager() {
0N/A delete _overflow_stack;
0N/A delete _revisit_klass_stack;
0N/A // _manager_array and _stack_array are statics
0N/A // shared with all instances of ParCompactionManager
0N/A // should not be deallocated.
0N/A}
0N/A
0N/Avoid ParCompactionManager::initialize(ParMarkBitMap* mbm) {
0N/A assert(PSParallelCompact::gc_task_manager() != NULL,
0N/A "Needed for initialization");
0N/A
0N/A _mark_bitmap = mbm;
0N/A
0N/A uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
0N/A
0N/A assert(_manager_array == NULL, "Attempt to initialize twice");
0N/A _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
0N/A guarantee(_manager_array != NULL, "Could not initialize promotion manager");
0N/A
0N/A _stack_array = new OopTaskQueueSet(parallel_gc_threads);
0N/A guarantee(_stack_array != NULL, "Count not initialize promotion manager");
375N/A _region_array = new RegionTaskQueueSet(parallel_gc_threads);
375N/A guarantee(_region_array != NULL, "Count not initialize promotion manager");
0N/A
0N/A // Create and register the ParCompactionManager(s) for the worker threads.
0N/A for(uint i=0; i<parallel_gc_threads; i++) {
0N/A _manager_array[i] = new ParCompactionManager();
0N/A guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
0N/A stack_array()->register_queue(i, _manager_array[i]->marking_stack());
375N/A#ifdef USE_RegionTaskQueueWithOverflow
375N/A region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
0N/A#else
375N/A region_array()->register_queue(i, _manager_array[i]->region_stack());
0N/A#endif
0N/A }
0N/A
0N/A // The VMThread gets its own ParCompactionManager, which is not available
0N/A // for work stealing.
0N/A _manager_array[parallel_gc_threads] = new ParCompactionManager();
0N/A guarantee(_manager_array[parallel_gc_threads] != NULL,
0N/A "Could not create ParCompactionManager");
0N/A assert(PSParallelCompact::gc_task_manager()->workers() != 0,
0N/A "Not initialized?");
0N/A}
0N/A
0N/Abool ParCompactionManager::should_update() {
0N/A assert(action() != NotValid, "Action is not set");
0N/A return (action() == ParCompactionManager::Update) ||
0N/A (action() == ParCompactionManager::CopyAndUpdate) ||
0N/A (action() == ParCompactionManager::UpdateAndCopy);
0N/A}
0N/A
0N/Abool ParCompactionManager::should_copy() {
0N/A assert(action() != NotValid, "Action is not set");
0N/A return (action() == ParCompactionManager::Copy) ||
0N/A (action() == ParCompactionManager::CopyAndUpdate) ||
0N/A (action() == ParCompactionManager::UpdateAndCopy);
0N/A}
0N/A
0N/Abool ParCompactionManager::should_verify_only() {
0N/A assert(action() != NotValid, "Action is not set");
0N/A return action() == ParCompactionManager::VerifyUpdate;
0N/A}
0N/A
0N/Abool ParCompactionManager::should_reset_only() {
0N/A assert(action() != NotValid, "Action is not set");
0N/A return action() == ParCompactionManager::ResetObjects;
0N/A}
0N/A
0N/A// For now save on a stack
0N/Avoid ParCompactionManager::save_for_scanning(oop m) {
0N/A stack_push(m);
0N/A}
0N/A
0N/Avoid ParCompactionManager::stack_push(oop obj) {
0N/A
0N/A if(!marking_stack()->push(obj)) {
0N/A overflow_stack()->push(obj);
0N/A }
0N/A}
0N/A
0N/Aoop ParCompactionManager::retrieve_for_scanning() {
0N/A
0N/A // Should not be used in the parallel case
0N/A ShouldNotReachHere();
0N/A return NULL;
0N/A}
0N/A
375N/A// Save region on a stack
375N/Avoid ParCompactionManager::save_for_processing(size_t region_index) {
0N/A#ifdef ASSERT
0N/A const ParallelCompactData& sd = PSParallelCompact::summary_data();
375N/A ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
375N/A assert(region_ptr->claimed(), "must be claimed");
375N/A assert(region_ptr->_pushed++ == 0, "should only be pushed once");
0N/A#endif
375N/A region_stack_push(region_index);
0N/A}
0N/A
375N/Avoid ParCompactionManager::region_stack_push(size_t region_index) {
0N/A
375N/A#ifdef USE_RegionTaskQueueWithOverflow
375N/A region_stack()->save(region_index);
0N/A#else
375N/A if(!region_stack()->push(region_index)) {
375N/A region_overflow_stack()->push(region_index);
0N/A }
0N/A#endif
0N/A}
0N/A
375N/Abool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
375N/A#ifdef USE_RegionTaskQueueWithOverflow
375N/A return region_stack()->retrieve(region_index);
0N/A#else
0N/A // Should not be used in the parallel case
0N/A ShouldNotReachHere();
0N/A return false;
0N/A#endif
0N/A}
0N/A
0N/AParCompactionManager*
0N/AParCompactionManager::gc_thread_compaction_manager(int index) {
0N/A assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
0N/A assert(_manager_array != NULL, "Sanity");
0N/A return _manager_array[index];
0N/A}
0N/A
0N/Avoid ParCompactionManager::reset() {
0N/A for(uint i=0; i<ParallelGCThreads+1; i++) {
0N/A manager_array(i)->revisit_klass_stack()->clear();
0N/A }
0N/A}
0N/A
0N/Avoid ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
0N/A#ifdef ASSERT
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
0N/A assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
0N/A MutableSpace* to_space = heap->young_gen()->to_space();
0N/A MutableSpace* old_space = heap->old_gen()->object_space();
0N/A MutableSpace* perm_space = heap->perm_gen()->object_space();
0N/A#endif /* ASSERT */
0N/A
0N/A
0N/A do {
0N/A
0N/A // Drain overflow stack first, so other threads can steal from
0N/A // claimed stack while we work.
0N/A while(!overflow_stack()->is_empty()) {
0N/A oop obj = overflow_stack()->pop();
0N/A obj->follow_contents(this);
0N/A }
0N/A
0N/A oop obj;
0N/A // obj is a reference!!!
0N/A while (marking_stack()->pop_local(obj)) {
0N/A // It would be nice to assert about the type of objects we might
0N/A // pop, but they can come from anywhere, unfortunately.
0N/A obj->follow_contents(this);
0N/A }
0N/A } while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
0N/A
0N/A assert(marking_stack()->size() == 0, "Sanity");
0N/A assert(overflow_stack()->length() == 0, "Sanity");
0N/A}
0N/A
375N/Avoid ParCompactionManager::drain_region_overflow_stack() {
375N/A size_t region_index = (size_t) -1;
375N/A while(region_stack()->retrieve_from_overflow(region_index)) {
375N/A PSParallelCompact::fill_and_update_region(this, region_index);
0N/A }
0N/A}
0N/A
375N/Avoid ParCompactionManager::drain_region_stacks() {
0N/A#ifdef ASSERT
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
0N/A assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
0N/A MutableSpace* to_space = heap->young_gen()->to_space();
0N/A MutableSpace* old_space = heap->old_gen()->object_space();
0N/A MutableSpace* perm_space = heap->perm_gen()->object_space();
0N/A#endif /* ASSERT */
0N/A
0N/A#if 1 // def DO_PARALLEL - the serial code hasn't been updated
0N/A do {
0N/A
375N/A#ifdef USE_RegionTaskQueueWithOverflow
0N/A // Drain overflow stack first, so other threads can steal from
0N/A // claimed stack while we work.
375N/A size_t region_index = (size_t) -1;
375N/A while(region_stack()->retrieve_from_overflow(region_index)) {
375N/A PSParallelCompact::fill_and_update_region(this, region_index);
0N/A }
0N/A
375N/A while (region_stack()->retrieve_from_stealable_queue(region_index)) {
375N/A PSParallelCompact::fill_and_update_region(this, region_index);
0N/A }
375N/A } while (!region_stack()->is_empty());
0N/A#else
0N/A // Drain overflow stack first, so other threads can steal from
0N/A // claimed stack while we work.
375N/A while(!region_overflow_stack()->is_empty()) {
375N/A size_t region_index = region_overflow_stack()->pop();
375N/A PSParallelCompact::fill_and_update_region(this, region_index);
0N/A }
0N/A
375N/A size_t region_index = -1;
0N/A // obj is a reference!!!
375N/A while (region_stack()->pop_local(region_index)) {
0N/A // It would be nice to assert about the type of objects we might
0N/A // pop, but they can come from anywhere, unfortunately.
375N/A PSParallelCompact::fill_and_update_region(this, region_index);
0N/A }
375N/A } while((region_stack()->size() != 0) ||
375N/A (region_overflow_stack()->length() != 0));
0N/A#endif
0N/A
375N/A#ifdef USE_RegionTaskQueueWithOverflow
375N/A assert(region_stack()->is_empty(), "Sanity");
0N/A#else
375N/A assert(region_stack()->size() == 0, "Sanity");
375N/A assert(region_overflow_stack()->length() == 0, "Sanity");
0N/A#endif
0N/A#else
0N/A oop obj;
0N/A while (obj = retrieve_for_scanning()) {
0N/A obj->follow_contents(this);
0N/A }
0N/A#endif
0N/A}
0N/A
0N/A#ifdef ASSERT
0N/Abool ParCompactionManager::stacks_have_been_allocated() {
0N/A return (revisit_klass_stack()->data_addr() != NULL);
0N/A}
0N/A#endif