/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/pcTasks.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/universe.hpp"
#include "oops/objArrayKlass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
//
// ThreadRootsMarkingTask
//
if (_java_thread != NULL)
if (_vm_thread != NULL)
// Do the real work
}
switch (_root_type) {
case universe:
break;
case jni_handles:
break;
case threads:
{
}
break;
case object_synchronizer:
break;
case flat_profiler:
break;
case management:
break;
case jvmti:
break;
case system_dictionary:
break;
case code_cache:
//CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
break;
default:
fatal("Unknown root type");
}
// Do the real work
}
//
// RefProcTaskProxy
//
{
}
//
// RefProcTaskExecutor
//
{
for(uint i=0; i<parallel_gc_threads; i++) {
}
if (task.marks_oops_alive()) {
if (parallel_gc_threads>1) {
for (uint j=0; j<active_gc_threads; j++) {
}
}
}
}
{
for(uint i=0; i<parallel_gc_threads; i++) {
}
}
//
// StealMarkingTask
//
_terminator(t) {}
do {
}
}
} while (!terminator()->offer_termination());
}
//
// StealRegionCompactionTask
//
_terminator(t) {}
// If not all threads are active, get a draining stack
// from the list. Else, just use this threads draining stack.
if (use_all_workers) {
err_msg("all_workers_active has been incorrectly set: "
} else {
}
if (TraceDynamicGCThreads) {
"region_stack_index %d region_stack = 0x%x "
" empty (%d) use all workers %d",
}
// Has to drain stacks first because there may be regions on
// preloaded onto the stack and this thread may never have
// done a draining task. Are the draining tasks needed?
// If we're the termination task, try 10 rounds of stealing before
// setting the termination flag
while(true) {
} else {
if (terminator()->offer_termination()) {
break;
}
// Go around again.
}
}
return;
}
}
if (use_all_workers) {
err_msg("all_workers_active has been incorrectly set: "
} else {
}
if (TraceDynamicGCThreads) {
"use all workers %d",
}
// Process any regions already in the compaction managers stacks.
if (!use_all_workers) {
// Always give up the region stack.
"region_stack and region_stack_index are inconsistent");
if (TraceDynamicGCThreads) {
}
}
}