/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/space.inline.hpp"
#include "memory/universe.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memTracker.hpp"
#ifdef COMPILER1
#include "c1/c1_LIR.hpp"
#include "c1/c1_LIRGenerator.hpp"
#endif
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
// enumerate ref fields that have been modified (since the last
// enumeration.)
{
// Add one for a guard card, used to detect errors.
}
{
"unitialized, check declaration order");
}
int max_covered_regions):
{
vm_exit_during_initialization("couldn't alloc card table covered region set.");
int i;
for (i = 0; i < max_covered_regions; i++) {
_covered[i].set_word_size(0);
_committed[i].set_word_size(0);
}
_cur_covered_regions = 0;
if (!heap_rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for the "
"card marking array");
}
// The assember store_check code will do an unsigned shift of the oop,
// then add it to byte_map_base, i.e.
//
// _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
!ExecMem, "card table last card");
*guard_card = last_card;
if (_lowest_non_clean == NULL
vm_exit_during_initialization("couldn't allocate an LNC array.");
for (i = 0; i < max_covered_regions; i++) {
_lowest_non_clean[i] = NULL;
_lowest_non_clean_chunk_size[i] = 0;
_last_LNC_resizing_collection[i] = -1;
}
if (TraceCardTableModRefBS) {
" &_byte_map[0]: " INTPTR_FORMAT
" &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
&_byte_map[0],
" byte_map_base: " INTPTR_FORMAT,
}
}
int i;
for (i = 0; i < _cur_covered_regions; i++) {
}
// If we didn't find it, create a new one.
"too many covered regions");
// Move the ones above up, to maintain sorted order.
for (int j = _cur_covered_regions; j > i; j--) {
}
int res = i;
return res;
}
for (int i = 0; i < _cur_covered_regions; i++) {
return i;
}
}
assert(0, "address outside of heap?");
return -1;
}
for (int j = 0; j < ind; j++) {
}
return max_end;
}
for (int r = 0; r < _cur_covered_regions; r += 1) {
if (r != self) {
}
}
// Never include the guard page.
return result;
}
// We don't change the start of a region, only the end.
"attempt to cover area not in reserved area");
// collided is true if the expansion would push into another committed region
debug_only(bool collided = false;)
// Commit new or uncommit old pages, if necessary.
// Extend the end of this _commited region
// to cover the end of any lower _committed regions.
// This forms overlapping regions, but never interior regions.
}
// Align the end up to a page size (starts are already aligned).
"align up, but less");
// Check the other regions (excludes "ind") to ensure that
// the new_end_aligned does not intrude onto the committed
// space of another region.
int ri = 0;
// The prior check included in the assert
// (new_end_aligned >= _committed[ri].start())
// is redundant with the "contains" test.
// Any region containing the new end
// should start at or beyond the region found (ind)
// for the new end (committed regions are not expected to
// be proper subsets of other committed regions).
"New end of committed region is inconsistent");
// new_end_aligned can be equal to the start of its
// committed region (i.e., of "ind") if a second
// region following "ind" also start at the same location
// as "ind".
"New end of committed region is before start");
debug_only(collided = true;)
// Should only collide with 1 region
break;
}
}
}
#ifdef ASSERT
"New end of committed region is in a second committed region");
}
#endif
// The guard page is always committed and should not be committed over.
// "guarded" is used for assertion checking below and recalls the fact
// that the would-be end of the new committed region would have
// penetrated the guard page.
DEBUG_ONLY(bool guarded = false;)
DEBUG_ONLY(guarded = true;)
}
// Must commit new pages.
!ExecMem, "card table expansion");
// Use new_end_aligned (as opposed to new_end_for_commit) because
// the cur_committed region may include the guard region.
// Must uncommit pages.
cur_committed.end()));
if (!uncommit_region.is_empty()) {
// It is not safe to uncommit cards if the boundary between
// the generations is moving. A shrink can uncommit cards
// owned by generation A but being used by generation B.
if (!UseAdaptiveGCBoundary) {
uncommit_region.byte_size())) {
assert(false, "Card table contraction failed");
// The call failed so don't change the end of the
// committed region. This is better than taking the
// VM down.
}
} else {
}
}
}
// In any case, we can reset the end of the current committed entry.
#ifdef ASSERT
// Check that the last card in the new region is committed according
// to the tables.
bool covered = false;
covered = true;
break;
}
}
#endif
// The default of 0 is not necessarily clean cards.
} else {
}
"The guard card will be overwritten");
// This line commented out cleans the newly expanded region and
// not the aligned up expanded region.
// jbyte* const end = byte_after(new_region.last());
"Expect to be beyond new region unless impacting another region");
// do nothing if we resized downward.
#ifdef ASSERT
// The end of the new committed region should not
// be in any existing region unless it matches
// the start of the next region.
"Overlapping committed regions");
}
}
#endif
}
}
// In any case, the covered size changes.
if (TraceCardTableModRefBS) {
" _covered[%d].start(): " INTPTR_FORMAT
" _covered[%d].last(): " INTPTR_FORMAT,
" _committed[%d].start(): " INTPTR_FORMAT
" _committed[%d].last(): " INTPTR_FORMAT,
" byte_for(start): " INTPTR_FORMAT
" byte_for(last): " INTPTR_FORMAT,
" addr_for(start): " INTPTR_FORMAT
" addr_for(last): " INTPTR_FORMAT,
}
// Touch the last card of the covered region to show that it
// is committed (or SEGV).
}
// Note that these versions are precise! The scanning code has to handle the
// fact that the write barrier may be either precise or imprecise.
}
/*
Claimed and deferred bits are used together in G1 during the evacuation
pause. These bits can have the following state transitions:
1. The claimed bit can be put over any other card state. Except that
the "dirty -> dirty and claimed" transition is checked for in
G1 code and is not used.
2. Deferred bit can be set only if the previous state of the card
was either clean or claimed. mark_card_deferred() is wait-free.
We do not care if the operation is be successful because if
it does not it will only result in duplicate entry in the update
buffer because of the "cache-miss". So it's not worth spinning.
*/
while (val == clean_card_val() ||
if (val == clean_card_val()) {
} else {
}
return true;
}
}
return false;
}
// It's already processed
return false;
}
// Cached bit can be installed either on a clean card or on a claimed card.
if (val == clean_card_val()) {
} else {
if (val & claimed_card_val()) {
}
}
}
return true;
}
CardTableRS* ct) {
// Caller (process_strong_roots()) claims that all GC threads
// execute this call. With UseDynamicNumberOfGCThreads now all
// active GC threads execute this call. The number of active GC
// threads needs to be passed to par_non_clean_card_iterate_work()
// to get proper partitioning and termination.
//
// This is an example of where n_par_threads() is used instead
// of workers()->active_workers(). n_par_threads can be set to 0 to
// turn off parallelism. For example when this code is called as
// part of verification and SharedHeap::process_strong_roots() is being
// used, then n_par_threads() may have been set to 0. active_workers
// is not overloaded with the meaning that it is a switch to disable
// parallelism and so keeps the meaning of the number of
// active gc workers. If parallelism has not been shut off by
// setting n_par_threads to 0, then n_par_threads should be
// equal to active_workers. When a different mechanism for shutting
// off parallelism is used, then active_workers can be used in
// place of n_par_threads.
// This is an example of a path where n_par_threads is
// set to 0 to turn off parallism.
// [7] CardTableModRefBS::non_clean_card_iterate()
// [8] CardTableRS::younger_refs_in_space_iterate()
// [9] Generation::younger_refs_in_space_iterate()
// [10] OneContigSpaceCardGeneration::younger_refs_iterate()
// [11] CompactingPermGenGen::younger_refs_iterate()
// [12] CardTableRS::younger_refs_iterate()
// [13] SharedHeap::process_strong_roots()
// [14] G1CollectedHeap::verify()
// [15] Universe::verify()
// [16] G1CollectedHeap::do_collection_pause_at_safepoint()
//
if (is_par) {
#ifndef SERIALGC
#else // SERIALGC
fatal("Parallel gc not supported here.");
#endif // SERIALGC
} else {
// We do not call the non_clean_card_iterate_serial() version below because
// we want to clear the cards (which non_clean_card_iterate_serial() does not
// do for us): clear_cl here does the work of finding contiguous dirty ranges
// of cards to process and clear.
cl->gen_boundary());
}
}
}
// The iterator itself is not MT-aware, but
// MT-aware callers and closures can use this to
// accomplish dirty card iteration in parallel. The
// iterator itself does not clear the dirty cards, or
// change their values in any manner.
MemRegionClosure* cl) {
for (int i = 0; i < _cur_covered_regions; i++) {
if (*cur_entry != clean_card) {
// Should the next card be included in this range of dirty cards.
next_entry--;
}
// The memory region may not be on a card boundary. So that
// objects beyond the end of the region are not processed, make
// cur_cards precise with regard to the end of the memory region.
}
}
}
}
}
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
*cur = dirty_card;
cur++;
}
}
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
for (int i = 0; i < _cur_covered_regions; i++) {
}
}
// Be conservative: only clean cards entirely contained within the
// region.
} else {
}
}
for (int i = 0; i < _cur_covered_regions; i++) {
}
}
}
// Unlike several other card table methods, dirty_card_iterate()
// iterates over dirty cards ranges in increasing address order.
MemRegionClosure* cl) {
for (int i = 0; i < _cur_covered_regions; i++) {
cur_entry = next_entry) {
if (*cur_entry == dirty_card) {
// Accumulate maximal dirty card range, starting at cur_entry
for (dirty_cards = 1;
dirty_cards++, next_entry++);
}
}
}
}
}
bool reset,
int reset_val) {
for (int i = 0; i < _cur_covered_regions; i++) {
cur_entry = next_entry) {
if (*cur_entry == dirty_card) {
// Accumulate maximal dirty card range, starting at cur_entry
for (dirty_cards = 1;
dirty_cards++, next_entry++);
if (reset) {
for (size_t i = 0; i < dirty_cards; i++) {
}
}
return cur_cards;
}
}
}
}
}
}
// For product build verification
"card table guard has been modified");
}
verify_guard();
}
#ifndef PRODUCT
bool failures = false;
if (failed) {
if (!failures) {
failures = true;
}
(int) curr_val);
}
}
}
}
}
#endif
st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
}
return
};
return
cv != clean_card &&
};