g1RemSet.hpp revision 626
342N/A/*
342N/A * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
342N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
342N/A *
342N/A * This code is free software; you can redistribute it and/or modify it
342N/A * under the terms of the GNU General Public License version 2 only, as
342N/A * published by the Free Software Foundation.
342N/A *
342N/A * This code is distributed in the hope that it will be useful, but WITHOUT
342N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
342N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
342N/A * version 2 for more details (a copy is included in the LICENSE file that
342N/A * accompanied this code).
342N/A *
342N/A * You should have received a copy of the GNU General Public License version
342N/A * 2 along with this work; if not, write to the Free Software Foundation,
342N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
342N/A *
342N/A * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
342N/A * CA 95054 USA or visit www.sun.com if you need additional information or
342N/A * have any questions.
342N/A *
342N/A */
342N/A
342N/A// A G1RemSet provides ways of iterating over pointers into a selected
342N/A// collection set.
342N/A
342N/Aclass G1CollectedHeap;
342N/Aclass CardTableModRefBarrierSet;
342N/Aclass HRInto_G1RemSet;
342N/Aclass ConcurrentG1Refine;
342N/A
549N/Aclass G1RemSet: public CHeapObj {
342N/Aprotected:
342N/A G1CollectedHeap* _g1;
342N/A
342N/A unsigned _conc_refine_traversals;
342N/A unsigned _conc_refine_cards;
342N/A
342N/A size_t n_workers();
342N/A
342N/Apublic:
342N/A G1RemSet(G1CollectedHeap* g1) :
342N/A _g1(g1), _conc_refine_traversals(0), _conc_refine_cards(0)
342N/A {}
342N/A
342N/A // Invoke "blk->do_oop" on all pointers into the CS in object in regions
342N/A // outside the CS (having invoked "blk->set_region" to set the "from"
342N/A // region correctly beforehand.) The "worker_i" param is for the
342N/A // parallel case where the number of the worker thread calling this
342N/A // function can be helpful in partitioning the work to be done. It
342N/A // should be the same as the "i" passed to the calling thread's
342N/A // work(i) function. In the sequential case this param will be ingored.
342N/A virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
342N/A int worker_i) = 0;
342N/A
342N/A // Prepare for and cleanup after an oops_into_collection_set_do
342N/A // call. Must call each of these once before and after (in sequential
342N/A // code) any threads call oops into collection set do. (This offers an
342N/A // opportunity to sequential setup and teardown of structures needed by a
342N/A // parallel iteration over the CS's RS.)
342N/A virtual void prepare_for_oops_into_collection_set_do() = 0;
342N/A virtual void cleanup_after_oops_into_collection_set_do() = 0;
342N/A
342N/A // If "this" is of the given subtype, return "this", else "NULL".
342N/A virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
342N/A
342N/A // Record, if necessary, the fact that *p (where "p" is in region "from")
342N/A // has changed to its new value.
342N/A virtual void write_ref(HeapRegion* from, oop* p) = 0;
342N/A virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
342N/A
342N/A // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
342N/A // or card, respectively, such that a region or card with a corresponding
342N/A // 0 bit contains no part of any live object. Eliminates any remembered
342N/A // set entries that correspond to dead heap ranges.
342N/A virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0;
342N/A // Like the above, but assumes is called in parallel: "worker_num" is the
342N/A // parallel thread id of the current thread, and "claim_val" is the
342N/A // value that should be used to claim heap regions.
342N/A virtual void scrub_par(BitMap* region_bm, BitMap* card_bm,
342N/A int worker_num, int claim_val) = 0;
342N/A
342N/A // Do any "refinement" activity that might be appropriate to the given
342N/A // G1RemSet. If "refinement" has iterateive "passes", do one pass.
342N/A // If "t" is non-NULL, it is the thread performing the refinement.
342N/A // Default implementation does nothing.
342N/A virtual void concurrentRefinementPass(ConcurrentG1Refine* cg1r) {}
342N/A
342N/A // Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
342N/A // join and leave around parts that must be atomic wrt GC. (NULL means
342N/A // being done at a safepoint.)
342N/A virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {}
342N/A
342N/A unsigned conc_refine_cards() { return _conc_refine_cards; }
342N/A
342N/A // Print any relevant summary info.
342N/A virtual void print_summary_info() {}
342N/A
342N/A // Prepare remebered set for verification.
342N/A virtual void prepare_for_verify() {};
342N/A};
342N/A
342N/A
342N/A// The simplest possible G1RemSet: iterates over all objects in non-CS
342N/A// regions, searching for pointers into the CS.
342N/Aclass StupidG1RemSet: public G1RemSet {
342N/Apublic:
342N/A StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {}
342N/A
342N/A void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
342N/A int worker_i);
342N/A
342N/A void prepare_for_oops_into_collection_set_do() {}
342N/A void cleanup_after_oops_into_collection_set_do() {}
342N/A
342N/A // Nothing is necessary in the version below.
342N/A void write_ref(HeapRegion* from, oop* p) {}
342N/A void par_write_ref(HeapRegion* from, oop* p, int tid) {}
342N/A
342N/A void scrub(BitMap* region_bm, BitMap* card_bm) {}
342N/A void scrub_par(BitMap* region_bm, BitMap* card_bm,
342N/A int worker_num, int claim_val) {}
342N/A
342N/A};
342N/A
342N/A// A G1RemSet in which each heap region has a rem set that records the
342N/A// external heap references into it. Uses a mod ref bs to track updates,
342N/A// so that they can be used to update the individual region remsets.
342N/A
342N/Aclass HRInto_G1RemSet: public G1RemSet {
342N/Aprotected:
342N/A enum SomePrivateConstants {
342N/A UpdateRStoMergeSync = 0,
342N/A MergeRStoDoDirtySync = 1,
342N/A DoDirtySync = 2,
342N/A LastSync = 3,
342N/A
342N/A SeqTask = 0,
342N/A NumSeqTasks = 1
342N/A };
342N/A
342N/A CardTableModRefBS* _ct_bs;
342N/A SubTasksDone* _seq_task;
342N/A G1CollectorPolicy* _g1p;
342N/A
342N/A ConcurrentG1Refine* _cg1r;
342N/A
342N/A size_t* _cards_scanned;
342N/A size_t _total_cards_scanned;
342N/A
342N/A // _par_traversal_in_progress is "true" iff a parallel traversal is in
342N/A // progress. If so, then cards added to remembered sets should also have
342N/A // their references into the collection summarized in "_new_refs".
342N/A bool _par_traversal_in_progress;
342N/A void set_par_traversal(bool b);
342N/A GrowableArray<oop*>** _new_refs;
616N/A void new_refs_iterate(OopClosure* cl);
342N/A
342N/Apublic:
342N/A // This is called to reset dual hash tables after the gc pause
342N/A // is finished and the initial hash table is no longer being
342N/A // scanned.
342N/A void cleanupHRRS();
342N/A
342N/A HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
342N/A ~HRInto_G1RemSet();
342N/A
342N/A void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
342N/A int worker_i);
342N/A
342N/A void prepare_for_oops_into_collection_set_do();
342N/A void cleanup_after_oops_into_collection_set_do();
342N/A void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
342N/A void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i);
342N/A void updateRS(int worker_i);
342N/A HeapRegion* calculateStartRegion(int i);
342N/A
342N/A HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; }
342N/A
342N/A CardTableModRefBS* ct_bs() { return _ct_bs; }
342N/A size_t cardsScanned() { return _total_cards_scanned; }
342N/A
342N/A // Record, if necessary, the fact that *p (where "p" is in region "from",
342N/A // which is required to be non-NULL) has changed to a new non-NULL value.
342N/A inline void write_ref(HeapRegion* from, oop* p);
342N/A // The "_nv" version is the same; it exists just so that it is not virtual.
342N/A inline void write_ref_nv(HeapRegion* from, oop* p);
342N/A
342N/A inline bool self_forwarded(oop obj);
342N/A inline void par_write_ref(HeapRegion* from, oop* p, int tid);
342N/A
342N/A void scrub(BitMap* region_bm, BitMap* card_bm);
342N/A void scrub_par(BitMap* region_bm, BitMap* card_bm,
342N/A int worker_num, int claim_val);
342N/A
342N/A virtual void concurrentRefinementPass(ConcurrentG1Refine* t);
342N/A virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i);
342N/A
342N/A virtual void print_summary_info();
342N/A virtual void prepare_for_verify();
342N/A};
342N/A
342N/A#define G1_REM_SET_LOGGING 0
342N/A
342N/Aclass CountNonCleanMemRegionClosure: public MemRegionClosure {
342N/A G1CollectedHeap* _g1;
342N/A int _n;
342N/A HeapWord* _start_first;
342N/Apublic:
342N/A CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
342N/A _g1(g1), _n(0), _start_first(NULL)
342N/A {}
342N/A void do_MemRegion(MemRegion mr);
342N/A int n() { return _n; };
342N/A HeapWord* start_first() { return _start_first; }
342N/A};
626N/A
626N/Aclass UpdateRSOopClosure: public OopClosure {
626N/A HeapRegion* _from;
626N/A HRInto_G1RemSet* _rs;
626N/A int _worker_i;
626N/Apublic:
626N/A UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
626N/A _from(NULL), _rs(rs), _worker_i(worker_i) {
626N/A guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
626N/A }
626N/A
626N/A void set_from(HeapRegion* from) {
626N/A assert(from != NULL, "from region must be non-NULL");
626N/A _from = from;
626N/A }
626N/A
626N/A virtual void do_oop(narrowOop* p);
626N/A virtual void do_oop(oop* p);
626N/A
626N/A // Override: this closure is idempotent.
626N/A // bool idempotent() { return true; }
626N/A bool apply_to_weak_ref_discovered_field() { return true; }
626N/A};
626N/A