/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
friend class OtherRegionsTable;
friend class HeapRegionRemSetIterator;
// prev pointer for the allocated 'all' list
// next pointer in collision list
// Global free list of PRTs
protected:
// We need access in order to union things into the base table.
void recount_occupied() {
}
_occupied(0),
{}
if (par) {
}
} else {
_occupied++;
}
}
}
// Must make this robust in case "from" is not in "_hr", because of
// concurrency.
from,
}
// If the test below fails, then this table was reused concurrently
// with this operation. This is OK, since the old table was coarsened,
// and adding a bit to the new table is never incorrect.
// If the table used to belong to a continues humongous region and is
// now reused for the corresponding start humongous region, we need to
// make sure that we detect this. Thus, we call is_in_reserved_raw()
// instead of just is_in_reserved() here.
"Must be in range.");
}
}
public:
// Overkill, but if we ever need it...
// guarantee(_occupied == _bm.count_one_bits(), "Check");
return _occupied;
}
if (clear_links_to_all_list) {
}
_occupied = 0;
}
}
}
}
}
}
// (Destructively) union the bitmap of the current table into the given
// bitmap (which is assumed to be of the same size.)
}
// Mem size in bytes.
}
// Requires "from" to be in "hr()".
}
// Bulk-free the PRTs from prt to last, assumes that they are
// linked together using their _next field.
while (true) {
return;
}
}
}
}
// Returns an initialized PerRegionTable instance.
return fl;
} else {
fl = _free_list;
}
}
return new PerRegionTable(hr);
}
// Accessor and Modification routines for the pointer for the
// singly linked collision list that links the PRTs within the
// OtherRegionsTable::_fine_grain_regions hash table.
//
// It might be useful to also make the collision list doubly linked
// OTOH there might not be many collisions.
return _collision_list_next;
}
}
return &_collision_list_next;
}
res += sizeof(PerRegionTable);
}
return res;
}
};
false /* in-resource-area */),
_n_fine_entries(0), _n_coarse_entries(0),
{
if (_max_fine_entries == 0) {
&& _fine_eviction_stride == 0, "All init at same time.");
}
if (_fine_grain_regions == NULL) {
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
"Failed to allocate _fine_grain_entries.");
}
for (size_t i = 0; i < _max_fine_entries; i++) {
_fine_grain_regions[i] = NULL;
}
}
// We always append to the beginning of the list for convenience;
// the order of entries in this list does not matter.
if (_first_all_fine_prts != NULL) {
} else {
// this is the first element we insert. Adjust the "last" pointer
}
// the new element is always the first element without a predecessor
"just checking");
"just checking");
"just checking");
}
// removing the last element in the list?
if (_last_all_fine_prts == prt) {
}
} else {
// list is empty now?
if (_first_all_fine_prts == NULL) {
}
}
}
"just checking");
"just checking");
"just checking");
}
for (int i = 0; i < n_par_rs; i++) {
for (size_t j = 0; j < max_regions; j++) {
}
}
}
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
}
}
}
#ifndef PRODUCT
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
i, j, _from_card_cache[i][j]);
}
}
}
#endif
from,
}
}
}
return;
} else {
}
// Note that this may be a continued H region.
// If the region is already coarsened, return.
}
return;
}
// Otherwise find a per-region table to add it to.
// Confirm that it's really not there...
"Must be in range.");
if (G1HRRSUseSparseTable &&
if (G1RecordHRRSOops) {
}
}
}
return;
} else {
"overflow(f: %d, t: %d)",
}
}
if (_n_fine_entries == _max_fine_entries) {
prt = delete_region_table();
// There is no need to clear the links to the 'all' list here:
// prt will be reused immediately, i.e. remain in the 'all' list.
} else {
}
if (G1HRRSUseSparseTable) {
// Transfer from sparse to fine-grain.
for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
if (c != SparsePRTEntry::NullEntry) {
}
}
// Now we can delete the sparse entry.
}
}
}
// Note that we can't assert "prt->hr() == from_hr", because of the
// possibility of concurrent reuse. But see head comment of
// OtherRegionsTable for why this is OK.
if (G1RecordHRRSOops) {
}
}
}
}
// Loop postcondition is the method postcondition.
return prt;
}
for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
// Make sure we get a non-NULL sample.
ii++;
}
max_ind = i;
}
}
i = i + _fine_eviction_stride;
if (i >= _n_fine_entries) i = i - _n_fine_entries;
}
if (_fine_eviction_start >= _n_fine_entries) {
}
// Set the corresponding coarse bit.
}
}
// Unsplice.
return max;
}
// At present, this must be called stop-world single-threaded.
// First eliminated garbage regions from the coarse map.
if (G1RSScrubVerbose) {
}
if (G1RSScrubVerbose) {
}
if (G1RSScrubVerbose) {
}
// Now do the fine-grained maps.
for (size_t i = 0; i < _max_fine_entries; i++) {
// If the entire region is dead, eliminate.
if (G1RSScrubVerbose) {
}
if (G1RSScrubVerbose) {
}
} else {
// Do fine-grain elimination.
if (G1RSScrubVerbose) {
}
if (G1RSScrubVerbose) {
}
// Did that empty the table completely?
} else {
}
}
}
}
// Since we may have deleted a from_card_cache entry from the RS, clear
// the FCC.
clear_fcc();
}
// Cast away const in this case.
sum += occ_sparse();
sum += occ_coarse();
return sum;
}
num++;
}
return sum;
}
}
return _sparse_table.occupied();
}
// Cast away const in this case.
}
return sum;
}
return _from_card_cache_mem_size;
}
return PerRegionTable::fl_mem_size();
}
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
}
}
// if there are no entries, skip this step
if (_first_all_fine_prts != NULL) {
} else {
}
_coarse_map.clear();
_n_fine_entries = 0;
_n_coarse_entries = 0;
clear_fcc();
}
} else {
}
// Check to see if any of the fcc entries come from here.
if (fcc_ent != -1) {
// Clear the from card cache.
}
}
}
}
HeapRegion* hr) {
}
return true;
} else {
return false;
}
}
// Cast away const in this case.
return contains_reference_locked(from);
}
// Is this region in the coarse map?
hr);
} else {
"Must be in range.");
}
}
void
}
// Determines how many threads can add records to an rset in parallel.
// This can be done by either mutator threads together with the
// concurrent refinement threads or GC threads.
return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
}
HeapRegion* hr)
}
// Setup sparse and fine-grain tables sizes.
// table_size = base * (log(region_size / 1M) + 1)
}
if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
}
}
if (_iter_state != Unclaimed) return false;
}
}
return _iter_state == Complete;
}
iter->initialize(this);
}
#ifndef PRODUCT
}
}
"We should have yielded all the represented cards.");
}
#endif
SparsePRT::cleanup_all();
}
}
_iter_claimed = 0;
// It's good to check this to make sure that the two methods are in sync.
}
}
//-------------------- Iteration --------------------
_sparse_iter() { }
// Set these values so that we increment to the first region.
_coarse_cur_region_index = -1;
_cur_region_cur_card = 0;
_fine_array_index = -1;
_n_yielded_coarse = 0;
_n_yielded_fine = 0;
_n_yielded_sparse = 0;
}
// Go to the next card.
// Was the last the last card in the current region?
// Yes: find the next region. This may leave _coarse_cur_region_index
// Set to the last index, in which case there are no more coarse
// regions.
} else {
return false;
}
}
// If we didn't return false above, then we can yield a card.
return true;
}
// Otherwise, find the next bucket list in the array.
if (_fine_cur_prt != NULL) return;
else _fine_array_index++;
}
}
if (fine_has_next()) {
}
while (!fine_has_next()) {
_cur_region_cur_card = 0;
}
if (_fine_cur_prt == NULL) {
if (_fine_cur_prt == NULL) return false;
}
"inv.");
}
return true;
}
return
_fine_cur_prt != NULL &&
}
switch (_is) {
case Sparse:
return true;
}
// Otherwise, deliberate fall-through
case Fine:
if (fine_has_next(card_index)) {
return true;
}
// Otherwise, deliberate fall-through
case Coarse:
if (coarse_has_next(card_index)) {
return true;
}
// Otherwise...
break;
}
"Should have yielded all the cards in the rem set "
"(in the non-par case).");
return false;
}
if (_recorded_oops == NULL) {
assert(_n_recorded == 0
&& _recorded_cards == NULL
&& _recorded_regions == NULL,
"Inv");
}
if (_n_recorded == MaxRecorded) {
} else {
_recorded_oops[_n_recorded] = f;
_n_recorded++;
}
}
if (!G1RecordHRRSEvents) return;
if (_recorded_events == NULL) {
assert(_n_recorded_events == 0
&& _recorded_event_index == NULL,
"Inv");
}
if (_n_recorded_events == MaxRecordedEvents) {
} else {
}
}
switch (evnt) {
case Event_EvacStart:
break;
case Event_EvacEnd:
break;
case Event_RSUpdateEnd:
break;
}
}
int cur_evnt = 0;
int cur_evnt_ind = 0;
if (_n_recorded_events > 0) {
}
for (int i = 0; i < _n_recorded; i++) {
cur_evnt++;
if (cur_evnt < MaxRecordedEvents) {
}
}
_recorded_oops[i]);
}
}
}
}
void
}
#ifndef PRODUCT
// Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
// hash bucket.
// Make three references from region 0x101...
// Now cause a coarsening.
// Now, does iteration yield these three?
sum++;
}
}
#endif