0N/A/*
1879N/A * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
1879N/A#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
1879N/A
1879N/A#include "memory/genOopClosures.hpp"
1879N/A
0N/A/////////////////////////////////////////////////////////////////
0N/A// Closures used by ConcurrentMarkSweepGeneration's collector
0N/A/////////////////////////////////////////////////////////////////
0N/Aclass ConcurrentMarkSweepGeneration;
0N/Aclass CMSBitMap;
0N/Aclass CMSMarkStack;
0N/Aclass CMSCollector;
0N/Aclass MarkFromRootsClosure;
0N/Aclass Par_MarkFromRootsClosure;
0N/A
113N/A// Decode the oop and call do_oop on it.
113N/A#define DO_OOP_WORK_DEFN \
113N/A void do_oop(oop obj); \
113N/A template <class T> inline void do_oop_work(T* p) { \
113N/A T heap_oop = oopDesc::load_heap_oop(p); \
113N/A if (!oopDesc::is_null(heap_oop)) { \
113N/A oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
113N/A do_oop(obj); \
113N/A } \
113N/A }
113N/A
0N/Aclass MarkRefsIntoClosure: public OopsInGenClosure {
113N/A private:
113N/A const MemRegion _span;
113N/A CMSBitMap* _bitMap;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
994N/A MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
0N/A bool do_header() { return true; }
0N/A Prefetch::style prefetch_style() {
0N/A return Prefetch::do_read;
0N/A }
0N/A};
0N/A
0N/A// A variant of the above used in certain kinds of CMS
0N/A// marking verification.
0N/Aclass MarkRefsIntoVerifyClosure: public OopsInGenClosure {
113N/A private:
113N/A const MemRegion _span;
113N/A CMSBitMap* _verification_bm;
113N/A CMSBitMap* _cms_bm;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
994N/A CMSBitMap* cms_bm);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
0N/A bool do_header() { return true; }
0N/A Prefetch::style prefetch_style() {
0N/A return Prefetch::do_read;
0N/A }
0N/A};
0N/A
935N/A// KlassRememberingOopClosure is used when marking of the permanent generation
935N/A// is being done. It adds fields to support revisiting of klasses
935N/A// for class unloading. _should_remember_klasses should be set to
935N/A// indicate if klasses should be remembered. Currently that is whenever
935N/A// CMS class unloading is turned on. The _revisit_stack is used
935N/A// to save the klasses for later processing.
935N/Aclass KlassRememberingOopClosure : public OopClosure {
935N/A protected:
935N/A CMSCollector* _collector;
935N/A CMSMarkStack* _revisit_stack;
935N/A bool const _should_remember_klasses;
935N/A public:
935N/A void check_remember_klasses() const PRODUCT_RETURN;
935N/A virtual const bool should_remember_klasses() const {
935N/A check_remember_klasses();
935N/A return _should_remember_klasses;
935N/A }
935N/A virtual void remember_klass(Klass* k);
935N/A
935N/A KlassRememberingOopClosure(CMSCollector* collector,
935N/A ReferenceProcessor* rp,
935N/A CMSMarkStack* revisit_stack);
935N/A};
935N/A
935N/A// Similar to KlassRememberingOopClosure for use when multiple
935N/A// GC threads will execute the closure.
935N/A
935N/Aclass Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
935N/A public:
935N/A Par_KlassRememberingOopClosure(CMSCollector* collector,
935N/A ReferenceProcessor* rp,
935N/A CMSMarkStack* revisit_stack):
935N/A KlassRememberingOopClosure(collector, rp, revisit_stack) {}
935N/A virtual void remember_klass(Klass* k);
935N/A};
935N/A
0N/A// The non-parallel version (the parallel version appears further below).
935N/Aclass PushAndMarkClosure: public KlassRememberingOopClosure {
113N/A private:
113N/A MemRegion _span;
113N/A CMSBitMap* _bit_map;
113N/A CMSBitMap* _mod_union_table;
113N/A CMSMarkStack* _mark_stack;
113N/A bool _concurrent_precleaning;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A PushAndMarkClosure(CMSCollector* collector,
0N/A MemRegion span,
0N/A ReferenceProcessor* rp,
0N/A CMSBitMap* bit_map,
0N/A CMSBitMap* mod_union_table,
113N/A CMSMarkStack* mark_stack,
113N/A CMSMarkStack* revisit_stack,
113N/A bool concurrent_precleaning);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
0N/A bool do_header() { return true; }
0N/A Prefetch::style prefetch_style() {
0N/A return Prefetch::do_read;
0N/A }
941N/A // In support of class unloading
941N/A virtual const bool should_remember_mdo() const {
941N/A return false;
941N/A // return _should_remember_klasses;
941N/A }
941N/A virtual void remember_mdo(DataLayout* v);
0N/A};
0N/A
0N/A// In the parallel case, the revisit stack, the bit map and the
0N/A// reference processor are currently all shared. Access to
0N/A// these shared mutable structures must use appropriate
0N/A// synchronization (for instance, via CAS). The marking stack
0N/A// used in the non-parallel case above is here replaced with
0N/A// an OopTaskQueue structure to allow efficient work stealing.
935N/Aclass Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
113N/A private:
113N/A MemRegion _span;
113N/A CMSBitMap* _bit_map;
113N/A OopTaskQueue* _work_queue;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A Par_PushAndMarkClosure(CMSCollector* collector,
0N/A MemRegion span,
0N/A ReferenceProcessor* rp,
0N/A CMSBitMap* bit_map,
0N/A OopTaskQueue* work_queue,
0N/A CMSMarkStack* revisit_stack);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
0N/A bool do_header() { return true; }
0N/A Prefetch::style prefetch_style() {
0N/A return Prefetch::do_read;
0N/A }
941N/A // In support of class unloading
941N/A virtual const bool should_remember_mdo() const {
941N/A return false;
941N/A // return _should_remember_klasses;
941N/A }
941N/A virtual void remember_mdo(DataLayout* v);
0N/A};
0N/A
0N/A// The non-parallel version (the parallel version appears further below).
0N/Aclass MarkRefsIntoAndScanClosure: public OopsInGenClosure {
113N/A private:
113N/A MemRegion _span;
113N/A CMSBitMap* _bit_map;
113N/A CMSMarkStack* _mark_stack;
113N/A PushAndMarkClosure _pushAndMarkClosure;
113N/A CMSCollector* _collector;
113N/A Mutex* _freelistLock;
113N/A bool _yield;
0N/A // Whether closure is being used for concurrent precleaning
113N/A bool _concurrent_precleaning;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A MarkRefsIntoAndScanClosure(MemRegion span,
0N/A ReferenceProcessor* rp,
0N/A CMSBitMap* bit_map,
0N/A CMSBitMap* mod_union_table,
113N/A CMSMarkStack* mark_stack,
113N/A CMSMarkStack* revisit_stack,
0N/A CMSCollector* collector,
0N/A bool should_yield,
0N/A bool concurrent_precleaning);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
0N/A bool do_header() { return true; }
0N/A Prefetch::style prefetch_style() {
0N/A return Prefetch::do_read;
0N/A }
0N/A void set_freelistLock(Mutex* m) {
0N/A _freelistLock = m;
0N/A }
935N/A virtual const bool should_remember_klasses() const {
935N/A return _pushAndMarkClosure.should_remember_klasses();
935N/A }
935N/A virtual void remember_klass(Klass* k) {
935N/A _pushAndMarkClosure.remember_klass(k);
935N/A }
0N/A
0N/A private:
0N/A inline void do_yield_check();
0N/A void do_yield_work();
0N/A bool take_from_overflow_list();
0N/A};
0N/A
0N/A// Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
0N/A// stack and the bitMap are shared, so access needs to be suitably
0N/A// sycnhronized. An OopTaskQueue structure, supporting efficient
0N/A// workstealing, replaces a CMSMarkStack for storing grey objects.
0N/Aclass Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
113N/A private:
113N/A MemRegion _span;
113N/A CMSBitMap* _bit_map;
113N/A OopTaskQueue* _work_queue;
113N/A const uint _low_water_mark;
113N/A Par_PushAndMarkClosure _par_pushAndMarkClosure;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
0N/A MemRegion span,
0N/A ReferenceProcessor* rp,
0N/A CMSBitMap* bit_map,
0N/A OopTaskQueue* work_queue,
0N/A CMSMarkStack* revisit_stack);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
0N/A bool do_header() { return true; }
935N/A // When ScanMarkedObjectsAgainClosure is used,
935N/A // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
935N/A // and this delegation is used.
935N/A virtual const bool should_remember_klasses() const {
935N/A return _par_pushAndMarkClosure.should_remember_klasses();
935N/A }
935N/A // See comment on should_remember_klasses() above.
935N/A virtual void remember_klass(Klass* k) {
935N/A _par_pushAndMarkClosure.remember_klass(k);
935N/A }
0N/A Prefetch::style prefetch_style() {
0N/A return Prefetch::do_read;
0N/A }
0N/A void trim_queue(uint size);
0N/A};
0N/A
0N/A// This closure is used during the concurrent marking phase
0N/A// following the first checkpoint. Its use is buried in
0N/A// the closure MarkFromRootsClosure.
935N/Aclass PushOrMarkClosure: public KlassRememberingOopClosure {
113N/A private:
113N/A MemRegion _span;
113N/A CMSBitMap* _bitMap;
113N/A CMSMarkStack* _markStack;
113N/A HeapWord* const _finger;
113N/A MarkFromRootsClosure* const
113N/A _parent;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A PushOrMarkClosure(CMSCollector* cms_collector,
0N/A MemRegion span,
0N/A CMSBitMap* bitMap,
113N/A CMSMarkStack* markStack,
113N/A CMSMarkStack* revisitStack,
113N/A HeapWord* finger,
0N/A MarkFromRootsClosure* parent);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
941N/A // In support of class unloading
941N/A virtual const bool should_remember_mdo() const {
941N/A return false;
941N/A // return _should_remember_klasses;
941N/A }
941N/A virtual void remember_mdo(DataLayout* v);
941N/A
0N/A // Deal with a stack overflow condition
0N/A void handle_stack_overflow(HeapWord* lost);
0N/A private:
0N/A inline void do_yield_check();
0N/A};
0N/A
0N/A// A parallel (MT) version of the above.
0N/A// This closure is used during the concurrent marking phase
0N/A// following the first checkpoint. Its use is buried in
0N/A// the closure Par_MarkFromRootsClosure.
935N/Aclass Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
113N/A private:
0N/A MemRegion _whole_span;
0N/A MemRegion _span; // local chunk
0N/A CMSBitMap* _bit_map;
0N/A OopTaskQueue* _work_queue;
0N/A CMSMarkStack* _overflow_stack;
0N/A HeapWord* const _finger;
0N/A HeapWord** const _global_finger_addr;
113N/A Par_MarkFromRootsClosure* const
113N/A _parent;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A Par_PushOrMarkClosure(CMSCollector* cms_collector,
113N/A MemRegion span,
113N/A CMSBitMap* bit_map,
113N/A OopTaskQueue* work_queue,
113N/A CMSMarkStack* mark_stack,
113N/A CMSMarkStack* revisit_stack,
113N/A HeapWord* finger,
113N/A HeapWord** global_finger_addr,
113N/A Par_MarkFromRootsClosure* parent);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
941N/A // In support of class unloading
941N/A virtual const bool should_remember_mdo() const {
941N/A return false;
941N/A // return _should_remember_klasses;
941N/A }
941N/A virtual void remember_mdo(DataLayout* v);
941N/A
0N/A // Deal with a stack overflow condition
0N/A void handle_stack_overflow(HeapWord* lost);
0N/A private:
0N/A inline void do_yield_check();
0N/A};
0N/A
0N/A// For objects in CMS generation, this closure marks
0N/A// given objects (transitively) as being reachable/live.
0N/A// This is currently used during the (weak) reference object
452N/A// processing phase of the CMS final checkpoint step, as
452N/A// well as during the concurrent precleaning of the discovered
452N/A// reference lists.
935N/Aclass CMSKeepAliveClosure: public KlassRememberingOopClosure {
113N/A private:
143N/A const MemRegion _span;
0N/A CMSMarkStack* _mark_stack;
0N/A CMSBitMap* _bit_map;
452N/A bool _concurrent_precleaning;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
452N/A CMSBitMap* bit_map, CMSMarkStack* mark_stack,
935N/A CMSMarkStack* revisit_stack, bool cpc);
452N/A bool concurrent_precleaning() const { return _concurrent_precleaning; }
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
0N/A};
0N/A
935N/Aclass CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
113N/A private:
0N/A MemRegion _span;
0N/A OopTaskQueue* _work_queue;
0N/A CMSBitMap* _bit_map;
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A CMSInnerParMarkAndPushClosure(CMSCollector* collector,
0N/A MemRegion span, CMSBitMap* bit_map,
935N/A CMSMarkStack* revisit_stack,
935N/A OopTaskQueue* work_queue);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
0N/A};
0N/A
0N/A// A parallel (MT) version of the above, used when
0N/A// reference processing is parallel; the only difference
0N/A// is in the do_oop method.
935N/Aclass CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
113N/A private:
0N/A MemRegion _span;
0N/A OopTaskQueue* _work_queue;
0N/A CMSBitMap* _bit_map;
113N/A CMSInnerParMarkAndPushClosure
113N/A _mark_and_push;
0N/A const uint _low_water_mark;
0N/A void trim_queue(uint max);
113N/A protected:
113N/A DO_OOP_WORK_DEFN
0N/A public:
0N/A CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
935N/A CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
935N/A OopTaskQueue* work_queue);
113N/A virtual void do_oop(oop* p);
113N/A virtual void do_oop(narrowOop* p);
113N/A inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
113N/A inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
0N/A};
1879N/A
1879N/A#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP