0N/A/*
2945N/A * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP
1879N/A#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP
1879N/A
1879N/A#include "memory/allocation.hpp"
1879N/A#include "utilities/stack.hpp"
1879N/A#include "utilities/taskqueue.hpp"
1879N/A
0N/A// Move to some global location
0N/A#define HAS_BEEN_MOVED 0x1501d01d
0N/A// End move to some global location
0N/A
0N/A
0N/Aclass MutableSpace;
0N/Aclass PSOldGen;
0N/Aclass ParCompactionManager;
0N/Aclass ObjectStartArray;
0N/Aclass ParallelCompactData;
0N/Aclass ParMarkBitMap;
0N/A
3863N/Aclass ParCompactionManager : public CHeapObj<mtGC> {
0N/A friend class ParallelTaskTerminator;
0N/A friend class ParMarkBitMap;
0N/A friend class PSParallelCompact;
375N/A friend class StealRegionCompactionTask;
0N/A friend class UpdateAndFillClosure;
0N/A friend class RefProcTaskExecutor;
2941N/A friend class IdleGCTask;
0N/A
0N/A public:
0N/A
0N/A// ------------------------ Don't putback if not needed
0N/A // Actions that the compaction manager should take.
0N/A enum Action {
0N/A Update,
0N/A Copy,
0N/A UpdateAndCopy,
0N/A CopyAndUpdate,
0N/A NotValid
0N/A };
0N/A// ------------------------ End don't putback if not needed
0N/A
0N/A private:
1311N/A // 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
1558N/A #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
3863N/A typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue;
3863N/A typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet;
1558N/A #undef QUEUE_SIZE
1311N/A
375N/A static ParCompactionManager** _manager_array;
375N/A static OopTaskQueueSet* _stack_array;
1311N/A static ObjArrayTaskQueueSet* _objarray_queues;
375N/A static ObjectStartArray* _start_array;
375N/A static RegionTaskQueueSet* _region_array;
375N/A static PSOldGen* _old_gen;
0N/A
1311N/Aprivate:
3863N/A OverflowTaskQueue<oop, mtGC> _marking_stack;
1558N/A ObjArrayTaskQueue _objarray_stack;
1311N/A
0N/A // Is there a way to reuse the _marking_stack for the
375N/A // saving empty regions? For now just create a different
0N/A // type of TaskQueue.
2941N/A RegionTaskQueue* _region_stack;
2941N/A
2941N/A static RegionTaskQueue** _region_list;
2941N/A // Index in _region_list for current _region_stack.
2941N/A uint _region_stack_index;
2941N/A
2941N/A // Indexes of recycled region stacks/overflow stacks
2941N/A // Stacks of regions to be compacted are embedded in the tasks doing
2941N/A // the compaction. A thread that executes the task extracts the
2941N/A // region stack and drains it. These threads keep these region
2941N/A // stacks for use during compaction task stealing. If a thread
2941N/A // gets a second draining task, it pushed its current region stack
2941N/A // index into the array _recycled_stack_index and gets a new
2941N/A // region stack from the task. A thread that is executing a
2941N/A // compaction stealing task without ever having executing a
2941N/A // draining task, will get a region stack from _recycled_stack_index.
2941N/A //
2941N/A // Array of indexes into the array of region stacks.
2941N/A static uint* _recycled_stack_index;
2941N/A // The index into _recycled_stack_index of the last region stack index
2941N/A // pushed. If -1, there are no entries into _recycled_stack_index.
2941N/A static int _recycled_top;
2941N/A // The index into _recycled_stack_index of the last region stack index
2941N/A // popped. If -1, there has not been any entry popped.
2941N/A static int _recycled_bottom;
0N/A
3863N/A Stack<Klass*, mtGC> _revisit_klass_stack;
3863N/A Stack<DataLayout*, mtGC> _revisit_mdo_stack;
1756N/A
0N/A static ParMarkBitMap* _mark_bitmap;
0N/A
0N/A Action _action;
0N/A
0N/A static PSOldGen* old_gen() { return _old_gen; }
0N/A static ObjectStartArray* start_array() { return _start_array; }
375N/A static OopTaskQueueSet* stack_array() { return _stack_array; }
0N/A
0N/A static void initialize(ParMarkBitMap* mbm);
0N/A
0N/A protected:
0N/A // Array of tasks. Needed by the ParallelTaskTerminator.
375N/A static RegionTaskQueueSet* region_array() { return _region_array; }
3863N/A OverflowTaskQueue<oop, mtGC>* marking_stack() { return &_marking_stack; }
0N/A
0N/A // Pushes onto the marking stack. If the marking stack is full,
0N/A // pushes onto the overflow stack.
0N/A void stack_push(oop obj);
0N/A // Do not implement an equivalent stack_pop. Deal with the
0N/A // marking stack and overflow stack directly.
0N/A
1558N/A public:
0N/A Action action() { return _action; }
0N/A void set_action(Action v) { _action = v; }
0N/A
2941N/A RegionTaskQueue* region_stack() { return _region_stack; }
2941N/A void set_region_stack(RegionTaskQueue* v) { _region_stack = v; }
2941N/A
0N/A inline static ParCompactionManager* manager_array(int index);
0N/A
2941N/A inline static RegionTaskQueue* region_list(int index) {
2941N/A return _region_list[index];
2941N/A }
2941N/A
2941N/A uint region_stack_index() { return _region_stack_index; }
2941N/A void set_region_stack_index(uint v) { _region_stack_index = v; }
2941N/A
2941N/A // Pop and push unique reusable stack index
2941N/A static int pop_recycled_stack_index();
2941N/A static void push_recycled_stack_index(uint v);
2941N/A static void reset_recycled_stack_index() {
2941N/A _recycled_bottom = _recycled_top = -1;
2941N/A }
2941N/A
0N/A ParCompactionManager();
2941N/A ~ParCompactionManager();
0N/A
2941N/A // Pushes onto the region stack at the given index. If the
2941N/A // region stack is full,
2941N/A // pushes onto the region overflow stack.
2941N/A static void region_list_push(uint stack_index, size_t region_index);
2941N/A static void verify_region_list_empty(uint stack_index);
0N/A ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
0N/A
0N/A // Take actions in preparation for a compaction.
0N/A static void reset();
0N/A
0N/A // void drain_stacks();
0N/A
0N/A bool should_update();
0N/A bool should_copy();
0N/A
3863N/A Stack<Klass*, mtGC>* revisit_klass_stack() { return &_revisit_klass_stack; }
3863N/A Stack<DataLayout*, mtGC>* revisit_mdo_stack() { return &_revisit_mdo_stack; }
0N/A
1558N/A // Save for later processing. Must not fail.
1558N/A inline void push(oop obj) { _marking_stack.push(obj); }
1558N/A inline void push_objarray(oop objarray, size_t index);
1558N/A inline void push_region(size_t index);
0N/A
0N/A // Access function for compaction managers
0N/A static ParCompactionManager* gc_thread_compaction_manager(int index);
0N/A
1558N/A static bool steal(int queue_num, int* seed, oop& t) {
0N/A return stack_array()->steal(queue_num, seed, t);
0N/A }
0N/A
1311N/A static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
1311N/A return _objarray_queues->steal(queue_num, seed, t);
1311N/A }
1311N/A
1558N/A static bool steal(int queue_num, int* seed, size_t& region) {
1558N/A return region_array()->steal(queue_num, seed, region);
0N/A }
0N/A
1311N/A // Process tasks remaining on any marking stack
1311N/A void follow_marking_stacks();
1311N/A inline bool marking_stacks_empty() const;
0N/A
0N/A // Process tasks remaining on any stack
375N/A void drain_region_stacks();
0N/A
0N/A};
0N/A
0N/Ainline ParCompactionManager* ParCompactionManager::manager_array(int index) {
0N/A assert(_manager_array != NULL, "access of NULL manager_array");
0N/A assert(index >= 0 && index <= (int)ParallelGCThreads,
0N/A "out of range manager_array access");
0N/A return _manager_array[index];
0N/A}
1311N/A
1311N/Abool ParCompactionManager::marking_stacks_empty() const {
1558N/A return _marking_stack.is_empty() && _objarray_stack.is_empty();
1311N/A}
1879N/A
1879N/A#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP