/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
FilterKind fk) :
OopClosure* oc) :
private:
bool _failures;
int _n_failures;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
{
}
}
#ifdef PRODUCT
#else // PRODUCT
#endif // PRODUCT
}
template <class T>
void do_oop_work(T* p) {
"Precondition");
bool failed = false;
if (!_failures) {
}
p, (void*) _containing_obj,
(void*) obj);
} else {
p, (void*) _containing_obj,
}
gclog_or_tty->flush();
_failures = true;
failed = true;
_n_failures++;
}
if (!_g1h->full_collection()) {
!to->isHumongous()) {
|| !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
(_containing_obj->is_objArray() ?
if (is_bad) {
if (!_failures) {
}
"in region "HR_FORMAT,
p, (void*) _containing_obj,
"in region "HR_FORMAT,
(void*) obj,
gclog_or_tty->flush();
_failures = true;
if (!failed) _n_failures++;
}
}
}
}
}
};
template<class ClosureType>
HeapRegion* hr,
// Keep filtering the remembered set.
// Bottom lies entirely below top, so we can call the
// non-memRegion version of oop_iterate below.
}
}
return cur;
}
OopClosure* cl) {
int oop_size;
switch (_fk) {
default: ShouldNotReachHere();
}
// Start filtering what we add to the remembered set. If the object is
// not considered dead, either because it is marked (in the mark bitmap)
// or it was allocated after marking finished, then we add it. Otherwise
// we can safely ignore the object.
} else {
}
// We replicate the loop below for several kinds of possible filters.
switch (_fk) {
case NoFilterKind:
break;
case IntoCSFilterKind: {
break;
}
case OutOfRegionFilterKind: {
break;
}
default:
}
// Last object. Need to do dead-obj filtering here too.
}
}
}
// Minimum region size; we won't go lower than that.
// We might want to decrease this in the future, to deal with small
// heaps a bit more efficiently.
// Maximum region size; we don't go higher than that. There's a good
// reason for having an upper bound. We don't want regions to get too
// large, otherwise cleanup's effectiveness would decrease as there
// will be fewer opportunities to find totally empty regions after
// marking.
// The automatic region size calculation will try to have around this
// many regions in the heap (based on the min heap size).
// region_size in bytes
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
// We base the automatic calculation on the min heap size. This
// can be problematic if the spread between min and max is quite
// wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
// the max size, the region size might be way too large for the
// min size. Either way, some users might have to set the region
// size manually for some -Xms / -Xmx combos.
(uintx) MIN_REGION_SIZE);
}
// Recalculate the region size to make sure it's a power of
// 2. This means that region_size is the largest power of 2 that's
// <= what we've calculated so far.
// Now make sure that we don't go over or under our limits.
if (region_size < MIN_REGION_SIZE) {
} else if (region_size > MAX_REGION_SIZE) {
}
// And recalculate the log.
// Now, set up the globals.
// The cast to int is safe, given that we've bounded region_size by
// MIN_REGION_SIZE and MAX_REGION_SIZE.
}
// After a compaction the mark bitmap is invalid, so we must
// treat all objects as being inside the unmarked area.
}
"we should have already filtered out humongous regions");
"we should have already filtered out humongous regions");
"we should have already filtered out humongous regions");
_in_collection_set = false;
if (!par) {
// If this is parallel, this will be done later.
}
}
}
// GC efficiency is the ratio of how much space would be
// reclaimed over how long we predict it would take to reclaim it.
// Retrieve a prediction of the elapsed time for this region for
// a mixed gc because the region will only be evacuated during a
// mixed gc.
double region_elapsed_time_ms =
}
"Should be normal before the humongous object allocation");
_humongous_start_region = this;
}
"Should be normal before the humongous object allocation");
}
if (startsHumongous()) {
// at least one "continues humongous" region after it
}
} else {
// continues humongous
}
}
if (current != claimValue) {
return true;
}
}
return false;
}
// Must add one below to bias toward the high amount. Otherwise, if
// "high" were at the desired value, and "low" were one less, we
// would not converge on "high". This is not symmetric, because
// we set "high" to a block start, which might be the right one,
// which we don't do for "low".
} else {
}
}
return low;
}
}
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#endif // _MSC_VER
_in_collection_set(false),
#ifdef ASSERT
#endif // ASSERT
{
// Note that initialize() will set the start of the unmarked area of the
// region.
}
// We're not using an iterator given that it will wrap around when
// it reaches the last region and this is not what we want here.
if (!hr->isHumongous()) {
return hr;
}
index += 1;
}
return NULL;
}
}
while (p < e) {
}
assert(p == e, "bad memregion: doesn't end on obj boundary");
}
}
}
bool during_conc_mark) {
// We always recreate the prev marking info and we'll explicitly
// mark all objects we find to be self-forwarded on the prev
// bitmap. So all objects need to be below PTAMS.
_prev_marked_bytes = 0;
if (during_initial_mark) {
// During initial-mark, we'll also explicitly mark all objects
// we find to be self-forwarded on the next bitmap. So all
// objects need to be below NTAMS.
_next_marked_bytes = 0;
} else if (during_conc_mark) {
// During concurrent mark, all objects in the CSet (including
// the ones we find to be self-forwarded) are implicitly live.
// So all objects need to be above NTAMS.
_next_marked_bytes = 0;
}
}
bool during_conc_mark,
marked_bytes, used()));
}
ObjectClosure* cl) {
// We used to use "block_start_careful" here. But we're actually happy
// to update the BOT while we do this...
// Otherwise, find the obj that extends onto mr.start().
"postcondition of block_start");
// Ran into an unparseable point.
return cur;
}
// The check above must occur before the operation below, since an
// abort might invalidate the "size" operation.
}
return NULL;
}
bool filter_young,
// Currently, we should only have to clean the card if filter_young
// is true and vice versa.
if (filter_young) {
} else {
}
// If we're within a stop-world GC, then we might look at a card in a
// GC alloc region that extends onto a GC LAB, which may not be
// parseable. Stop such at the "saved_mark" of the region.
if (g1h->is_gc_active()) {
} else {
}
// Otherwise, find the obj that extends onto mr.start().
// The intersection of the incoming mr (for the card) and the
// allocated part of the region is non-empty. This implies that
// we have actually allocated into this region. The code in
// G1CollectedHeap.cpp that allocates a new region sets the
// is_young tag on the region before allocating. Thus we
// safely know if this region is young.
if (is_young() && filter_young) {
return NULL;
}
// We can only clean the card here, after we make the decision that
// the card is not young. And we only clean the card if we have been
// asked to (i.e., card_ptr != NULL).
// We must complete this write before we do any of the reads below.
OrderAccess::storeload();
}
// Cache the boundaries of the memory region in some const locals
// We used to use "block_start_careful" here. But we're actually happy
// to update the BOT while we do this...
// Ran into an unparseable point.
return cur;
}
// Otherwise...
}
// If we finish the above loop...We have a parseable object that
// begins on or before the start of the memory region, and ends
// inside or spans the entire region.
"Loop postcondition");
}
// Ran into an unparseable point.
return cur;
};
// Otherwise:
// This object either does not span the MemRegion
// boundary, or if it does it's not an array.
// Apply closure to whole object.
} else {
// This obj is an array that spans the boundary.
// Stop at the boundary.
}
}
}
return NULL;
}
if (isHumongous()) {
if (startsHumongous())
else
} else {
}
if (in_collection_set())
else
if (is_young())
else
if (is_empty())
else
}
bool dummy = false;
}
// This really ought to be commoned up into OffsetTableContigSpace somehow.
// We would need a mechanism to make that code skip dead objects.
bool* failures) const {
*failures = false;
while (p < top()) {
object_num += 1;
SIZE_FORMAT" words) in a %shumongous region",
*failures = true;
return;
}
// If it returns false, verify_for_object() will output the
// appropriate messasge.
*failures = true;
return;
}
*failures = true;
return;
*failures = true;
return;
} else {
*failures = true;
}
if (G1MaxVerifyFailures >= 0 &&
return;
}
}
} else {
*failures = true;
return;
}
}
prev_p = p;
p += obj_size;
}
if (p != top()) {
*failures = true;
return;
}
// Do some extra BOT consistency checking for addresses in the
// range [top, end). BOT look-ups in this range should yield
// top. No point in doing that if top == end (there's nothing there).
if (p < the_end) {
// Look up top
if (b_start_1 != p) {
*failures = true;
return;
}
// Look up top + 1
if (b_start_2 != p) {
*failures = true;
return;
}
}
// Look up an address between top and end
if (b_start_3 != p) {
*failures = true;
return;
}
}
// Loook up end - 1
if (b_start_4 != p) {
*failures = true;
return;
}
}
*failures = true;
return;
}
}
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
// away eventually.
// false ==> we'll do the clearing if there's clearing to be done.
}
}
}
}
print_short();
}
return _offsets.initialize_threshold();
}
}
return top();
else
return ContiguousSpace::saved_mark_word();
}
if (_gc_time_stamp < curr_gc_time_stamp) {
// The order of these is important, as another thread might be
// about to start scanning this region. If it does so after
// set_saved_mark and before _gc_time_stamp = ..., then the latter
// will be false, and it will pick up top() as the high water mark
// of region. If it does so after _gc_time_stamp = ..., then it
// will pick up the right saved_mark_word() as the high water mark
// of the region. Either way, the behaviour will be correct.
// No need to do another barrier to flush the writes above. If
// this is called in parallel with other threads trying to
// allocate into the region, the caller should call this while
// holding a lock and when the lock is released the writes will be
// flushed.
}
}
{
}