0N/A/*
2945N/A * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#include "precompiled.hpp"
1879N/A#include "classfile/systemDictionary.hpp"
1879N/A#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
1879N/A#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
1879N/A#include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
1879N/A#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psOldGen.hpp"
1879N/A#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
1879N/A#include "oops/objArrayKlass.inline.hpp"
1879N/A#include "oops/oop.hpp"
1879N/A#include "oops/oop.inline.hpp"
1879N/A#include "oops/oop.pcgc.inline.hpp"
1879N/A#include "utilities/stack.inline.hpp"
0N/A
0N/APSOldGen* ParCompactionManager::_old_gen = NULL;
0N/AParCompactionManager** ParCompactionManager::_manager_array = NULL;
2941N/A
2941N/ARegionTaskQueue** ParCompactionManager::_region_list = NULL;
2941N/A
0N/AOopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
1311N/AParCompactionManager::ObjArrayTaskQueueSet*
1311N/A ParCompactionManager::_objarray_queues = NULL;
0N/AObjectStartArray* ParCompactionManager::_start_array = NULL;
0N/AParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
1558N/ARegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
0N/A
2941N/Auint* ParCompactionManager::_recycled_stack_index = NULL;
2941N/Aint ParCompactionManager::_recycled_top = -1;
2941N/Aint ParCompactionManager::_recycled_bottom = -1;
2941N/A
0N/AParCompactionManager::ParCompactionManager() :
2941N/A _action(CopyAndUpdate),
2941N/A _region_stack(NULL),
2941N/A _region_stack_index((uint)max_uintx) {
0N/A
0N/A ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
0N/A assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
0N/A
0N/A _old_gen = heap->old_gen();
0N/A _start_array = old_gen()->start_array();
0N/A
0N/A marking_stack()->initialize();
1558N/A _objarray_stack.initialize();
2941N/A}
2941N/A
2941N/AParCompactionManager::~ParCompactionManager() {
2941N/A delete _recycled_stack_index;
0N/A}
0N/A
0N/Avoid ParCompactionManager::initialize(ParMarkBitMap* mbm) {
0N/A assert(PSParallelCompact::gc_task_manager() != NULL,
0N/A "Needed for initialization");
0N/A
0N/A _mark_bitmap = mbm;
0N/A
0N/A uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
0N/A
0N/A assert(_manager_array == NULL, "Attempt to initialize twice");
3863N/A _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
1311N/A guarantee(_manager_array != NULL, "Could not allocate manager_array");
0N/A
2941N/A _region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*,
3863N/A parallel_gc_threads+1, mtGC);
2941N/A guarantee(_region_list != NULL, "Could not initialize promotion manager");
2941N/A
3863N/A _recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads, mtGC);
2941N/A
2941N/A // parallel_gc-threads + 1 to be consistent with the number of
2941N/A // compaction managers.
2941N/A for(uint i=0; i<parallel_gc_threads + 1; i++) {
2941N/A _region_list[i] = new RegionTaskQueue();
2941N/A region_list(i)->initialize();
2941N/A }
2941N/A
0N/A _stack_array = new OopTaskQueueSet(parallel_gc_threads);
1311N/A guarantee(_stack_array != NULL, "Could not allocate stack_array");
1311N/A _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
1311N/A guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
375N/A _region_array = new RegionTaskQueueSet(parallel_gc_threads);
1311N/A guarantee(_region_array != NULL, "Could not allocate region_array");
0N/A
0N/A // Create and register the ParCompactionManager(s) for the worker threads.
0N/A for(uint i=0; i<parallel_gc_threads; i++) {
0N/A _manager_array[i] = new ParCompactionManager();
0N/A guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
0N/A stack_array()->register_queue(i, _manager_array[i]->marking_stack());
1558N/A _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
2941N/A region_array()->register_queue(i, region_list(i));
0N/A }
0N/A
0N/A // The VMThread gets its own ParCompactionManager, which is not available
0N/A // for work stealing.
0N/A _manager_array[parallel_gc_threads] = new ParCompactionManager();
0N/A guarantee(_manager_array[parallel_gc_threads] != NULL,
0N/A "Could not create ParCompactionManager");
0N/A assert(PSParallelCompact::gc_task_manager()->workers() != 0,
0N/A "Not initialized?");
0N/A}
0N/A
2941N/Aint ParCompactionManager::pop_recycled_stack_index() {
2941N/A assert(_recycled_bottom <= _recycled_top, "list is empty");
2941N/A // Get the next available index
2941N/A if (_recycled_bottom < _recycled_top) {
2941N/A uint cur, next, last;
2941N/A do {
2941N/A cur = _recycled_bottom;
2941N/A next = cur + 1;
2941N/A last = Atomic::cmpxchg(next, &_recycled_bottom, cur);
2941N/A } while (cur != last);
2941N/A return _recycled_stack_index[next];
2941N/A } else {
2941N/A return -1;
2941N/A }
2941N/A}
2941N/A
2941N/Avoid ParCompactionManager::push_recycled_stack_index(uint v) {
2941N/A // Get the next available index
2941N/A int cur = Atomic::add(1, &_recycled_top);
2941N/A _recycled_stack_index[cur] = v;
2941N/A assert(_recycled_bottom <= _recycled_top, "list top and bottom are wrong");
2941N/A}
2941N/A
0N/Abool ParCompactionManager::should_update() {
0N/A assert(action() != NotValid, "Action is not set");
0N/A return (action() == ParCompactionManager::Update) ||
0N/A (action() == ParCompactionManager::CopyAndUpdate) ||
0N/A (action() == ParCompactionManager::UpdateAndCopy);
0N/A}
0N/A
0N/Abool ParCompactionManager::should_copy() {
0N/A assert(action() != NotValid, "Action is not set");
0N/A return (action() == ParCompactionManager::Copy) ||
0N/A (action() == ParCompactionManager::CopyAndUpdate) ||
0N/A (action() == ParCompactionManager::UpdateAndCopy);
0N/A}
0N/A
2941N/Avoid ParCompactionManager::region_list_push(uint list_index,
2941N/A size_t region_index) {
2941N/A region_list(list_index)->push(region_index);
2941N/A}
2941N/A
2941N/Avoid ParCompactionManager::verify_region_list_empty(uint list_index) {
2941N/A assert(region_list(list_index)->is_empty(), "Not empty");
2941N/A}
2941N/A
0N/AParCompactionManager*
0N/AParCompactionManager::gc_thread_compaction_manager(int index) {
0N/A assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
0N/A assert(_manager_array != NULL, "Sanity");
0N/A return _manager_array[index];
0N/A}
0N/A
0N/Avoid ParCompactionManager::reset() {
1756N/A for(uint i = 0; i < ParallelGCThreads + 1; i++) {
1756N/A assert(manager_array(i)->revisit_klass_stack()->is_empty(), "sanity");
1756N/A assert(manager_array(i)->revisit_mdo_stack()->is_empty(), "sanity");
0N/A }
0N/A}
0N/A
1311N/Avoid ParCompactionManager::follow_marking_stacks() {
0N/A do {
1311N/A // Drain the overflow stack first, to allow stealing from the marking stack.
1315N/A oop obj;
1558N/A while (marking_stack()->pop_overflow(obj)) {
1558N/A obj->follow_contents(this);
1311N/A }
1311N/A while (marking_stack()->pop_local(obj)) {
0N/A obj->follow_contents(this);
0N/A }
0N/A
1315N/A // Process ObjArrays one at a time to avoid marking stack bloat.
1311N/A ObjArrayTask task;
1558N/A if (_objarray_stack.pop_overflow(task)) {
1311N/A objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
1311N/A k->oop_follow_contents(this, task.obj(), task.index());
1558N/A } else if (_objarray_stack.pop_local(task)) {
1311N/A objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
1311N/A k->oop_follow_contents(this, task.obj(), task.index());
1311N/A }
1311N/A } while (!marking_stacks_empty());
0N/A
1311N/A assert(marking_stacks_empty(), "Sanity");
0N/A}
0N/A
375N/Avoid ParCompactionManager::drain_region_stacks() {
0N/A do {
1558N/A // Drain overflow stack first so other threads can steal.
1558N/A size_t region_index;
1558N/A while (region_stack()->pop_overflow(region_index)) {
375N/A PSParallelCompact::fill_and_update_region(this, region_index);
0N/A }
0N/A
1558N/A while (region_stack()->pop_local(region_index)) {
375N/A PSParallelCompact::fill_and_update_region(this, region_index);
0N/A }
375N/A } while (!region_stack()->is_empty());
0N/A}