0N/A/*
3945N/A * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
0N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
0N/A *
0N/A * This code is free software; you can redistribute it and/or modify it
0N/A * under the terms of the GNU General Public License version 2 only, as
0N/A * published by the Free Software Foundation.
0N/A *
0N/A * This code is distributed in the hope that it will be useful, but WITHOUT
0N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
0N/A * version 2 for more details (a copy is included in the LICENSE file that
0N/A * accompanied this code).
0N/A *
0N/A * You should have received a copy of the GNU General Public License version
0N/A * 2 along with this work; if not, write to the Free Software Foundation,
0N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
0N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
0N/A *
0N/A */
0N/A
1879N/A#include "precompiled.hpp"
3945N/A#include "gc_implementation/shared/parGCAllocBuffer.hpp"
1879N/A#include "memory/sharedHeap.hpp"
1879N/A#include "oops/arrayOop.hpp"
1879N/A#include "oops/oop.inline.hpp"
0N/A
0N/AParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
0N/A _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
0N/A _end(NULL), _hard_end(NULL),
0N/A _retained(false), _retained_filler(),
0N/A _allocated(0), _wasted(0)
0N/A{
0N/A assert (min_size() > AlignmentReserve, "Inconsistency!");
113N/A // arrayOopDesc::header_size depends on command line initialization.
113N/A FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
113N/A AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
0N/A}
0N/A
113N/Asize_t ParGCAllocBuffer::FillerHeaderSize;
0N/A
0N/A// If the minimum object size is greater than MinObjAlignment, we can
0N/A// end up with a shard at the end of the buffer that's smaller than
0N/A// the smallest object. We can't allow that because the buffer must
0N/A// look like it's full of objects when we retire it, so we make
0N/A// sure we have enough space for a filler int array object.
113N/Asize_t ParGCAllocBuffer::AlignmentReserve;
0N/A
0N/Avoid ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
0N/A assert(!retain || end_of_gc, "Can only retain at GC end.");
0N/A if (_retained) {
0N/A // If the buffer had been retained shorten the previous filler object.
0N/A assert(_retained_filler.end() <= _top, "INVARIANT");
481N/A CollectedHeap::fill_with_object(_retained_filler);
0N/A // Wasted space book-keeping, otherwise (normally) done in invalidate()
0N/A _wasted += _retained_filler.word_size();
0N/A _retained = false;
0N/A }
0N/A assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
0N/A if (_top < _hard_end) {
481N/A CollectedHeap::fill_with_object(_top, _hard_end);
0N/A if (!retain) {
0N/A invalidate();
0N/A } else {
0N/A // Is there wasted space we'd like to retain for the next GC?
0N/A if (pointer_delta(_end, _top) > FillerHeaderSize) {
0N/A _retained = true;
0N/A _retained_filler = MemRegion(_top, FillerHeaderSize);
0N/A _top = _top + FillerHeaderSize;
0N/A } else {
0N/A invalidate();
0N/A }
0N/A }
0N/A }
0N/A}
0N/A
0N/Avoid ParGCAllocBuffer::flush_stats(PLABStats* stats) {
0N/A assert(ResizePLAB, "Wasted work");
0N/A stats->add_allocated(_allocated);
0N/A stats->add_wasted(_wasted);
0N/A stats->add_unused(pointer_delta(_end, _top));
0N/A}
0N/A
0N/A// Compute desired plab size and latch result for later
0N/A// use. This should be called once at the end of parallel
0N/A// scavenge; it clears the sensor accumulators.
4007N/Avoid PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
0N/A assert(ResizePLAB, "Not set");
0N/A if (_allocated == 0) {
4006N/A assert(_unused == 0,
4006N/A err_msg("Inconsistency in PLAB stats: "
4006N/A "_allocated: "SIZE_FORMAT", "
4006N/A "_wasted: "SIZE_FORMAT", "
4006N/A "_unused: "SIZE_FORMAT", "
4006N/A "_used : "SIZE_FORMAT,
4006N/A _allocated, _wasted, _unused, _used));
4006N/A
0N/A _allocated = 1;
0N/A }
0N/A double wasted_frac = (double)_unused/(double)_allocated;
0N/A size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
0N/A TargetPLABWastePct);
0N/A if (target_refills == 0) {
0N/A target_refills = 1;
0N/A }
0N/A _used = _allocated - _wasted - _unused;
4007N/A size_t plab_sz = _used/(target_refills*no_of_gc_workers);
0N/A if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
0N/A // Take historical weighted average
0N/A _filter.sample(plab_sz);
0N/A // Clip from above and below, and align to object boundary
0N/A plab_sz = MAX2(min_size(), (size_t)_filter.average());
0N/A plab_sz = MIN2(max_size(), plab_sz);
0N/A plab_sz = align_object_size(plab_sz);
0N/A // Latch the result
0N/A if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
3945N/A _desired_plab_sz = plab_sz;
0N/A // Now clear the accumulators for next round:
0N/A // note this needs to be fixed in the case where we
0N/A // are retaining across scavenges. FIX ME !!! XXX
0N/A _allocated = 0;
0N/A _wasted = 0;
0N/A _unused = 0;
0N/A}
0N/A
0N/A#ifndef PRODUCT
0N/Avoid ParGCAllocBuffer::print() {
0N/A gclog_or_tty->print("parGCAllocBuffer: _bottom: %p _top: %p _end: %p _hard_end: %p"
0N/A "_retained: %c _retained_filler: [%p,%p)\n",
0N/A _bottom, _top, _end, _hard_end,
0N/A "FT"[_retained], _retained_filler.start(), _retained_filler.end());
0N/A}
0N/A#endif // !PRODUCT
0N/A
0N/Aconst size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
0N/AMIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
0N/A ((size_t)Generation::GenGrain)/HeapWordSize);
0N/Aconst size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
0N/AMIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
0N/A (size_t)Generation::GenGrain);
0N/A
0N/AParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
0N/A BlockOffsetSharedArray* bsa) :
0N/A ParGCAllocBuffer(word_sz),
0N/A _bsa(bsa),
0N/A _bt(bsa, MemRegion(_bottom, _hard_end)),
0N/A _true_end(_hard_end)
0N/A{}
0N/A
0N/A// The buffer comes with its own BOT, with a shared (obviously) underlying
0N/A// BlockOffsetSharedArray. We manipulate this BOT in the normal way
0N/A// as we would for any contiguous space. However, on accasion we
0N/A// need to do some buffer surgery at the extremities before we
0N/A// start using the body of the buffer for allocations. Such surgery
0N/A// (as explained elsewhere) is to prevent allocation on a card that
0N/A// is in the process of being walked concurrently by another GC thread.
0N/A// When such surgery happens at a point that is far removed (to the
0N/A// right of the current allocation point, top), we use the "contig"
0N/A// parameter below to directly manipulate the shared array without
0N/A// modifying the _next_threshold state in the BOT.
0N/Avoid ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
0N/A bool contig) {
481N/A CollectedHeap::fill_with_object(mr);
0N/A if (contig) {
0N/A _bt.alloc_block(mr.start(), mr.end());
0N/A } else {
0N/A _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
0N/A }
0N/A}
0N/A
0N/AHeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
0N/A HeapWord* res = NULL;
0N/A if (_true_end > _hard_end) {
0N/A assert((HeapWord*)align_size_down(intptr_t(_hard_end),
0N/A ChunkSizeInBytes) == _hard_end,
0N/A "or else _true_end should be equal to _hard_end");
0N/A assert(_retained, "or else _true_end should be equal to _hard_end");
0N/A assert(_retained_filler.end() <= _top, "INVARIANT");
481N/A CollectedHeap::fill_with_object(_retained_filler);
0N/A if (_top < _hard_end) {
0N/A fill_region_with_block(MemRegion(_top, _hard_end), true);
0N/A }
0N/A HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
0N/A _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
0N/A _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
0N/A _top = _retained_filler.end();
0N/A _hard_end = next_hard_end;
0N/A _end = _hard_end - AlignmentReserve;
0N/A res = ParGCAllocBuffer::allocate(word_sz);
0N/A if (res != NULL) {
0N/A _bt.alloc_block(res, word_sz);
0N/A }
0N/A }
0N/A return res;
0N/A}
0N/A
0N/Avoid
0N/AParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
0N/A ParGCAllocBuffer::undo_allocation(obj, word_sz);
0N/A // This may back us up beyond the previous threshold, so reset.
0N/A _bt.set_region(MemRegion(_top, _hard_end));
0N/A _bt.initialize_threshold();
0N/A}
0N/A
0N/Avoid ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
0N/A assert(!retain || end_of_gc, "Can only retain at GC end.");
0N/A if (_retained) {
0N/A // We're about to make the retained_filler into a block.
0N/A _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
0N/A _retained_filler.end());
0N/A }
0N/A // Reset _hard_end to _true_end (and update _end)
0N/A if (retain && _hard_end != NULL) {
0N/A assert(_hard_end <= _true_end, "Invariant.");
0N/A _hard_end = _true_end;
0N/A _end = MAX2(_top, _hard_end - AlignmentReserve);
0N/A assert(_end <= _hard_end, "Invariant.");
0N/A }
0N/A _true_end = _hard_end;
0N/A HeapWord* pre_top = _top;
0N/A
0N/A ParGCAllocBuffer::retire(end_of_gc, retain);
0N/A // Now any old _retained_filler is cut back to size, the free part is
0N/A // filled with a filler object, and top is past the header of that
0N/A // object.
0N/A
0N/A if (retain && _top < _end) {
0N/A assert(end_of_gc && retain, "Or else retain should be false.");
0N/A // If the lab does not start on a card boundary, we don't want to
0N/A // allocate onto that card, since that might lead to concurrent
0N/A // allocation and card scanning, which we don't support. So we fill
0N/A // the first card with a garbage object.
0N/A size_t first_card_index = _bsa->index_for(pre_top);
0N/A HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
0N/A if (first_card_start < pre_top) {
0N/A HeapWord* second_card_start =
301N/A _bsa->inc_by_region_size(first_card_start);
0N/A
0N/A // Ensure enough room to fill with the smallest block
0N/A second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
0N/A
0N/A // If the end is already in the first card, don't go beyond it!
0N/A // Or if the remainder is too small for a filler object, gobble it up.
0N/A if (_hard_end < second_card_start ||
0N/A pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
0N/A second_card_start = _hard_end;
0N/A }
0N/A if (pre_top < second_card_start) {
0N/A MemRegion first_card_suffix(pre_top, second_card_start);
0N/A fill_region_with_block(first_card_suffix, true);
0N/A }
0N/A pre_top = second_card_start;
0N/A _top = pre_top;
0N/A _end = MAX2(_top, _hard_end - AlignmentReserve);
0N/A }
0N/A
0N/A // If the lab does not end on a card boundary, we don't want to
0N/A // allocate onto that card, since that might lead to concurrent
0N/A // allocation and card scanning, which we don't support. So we fill
0N/A // the last card with a garbage object.
0N/A size_t last_card_index = _bsa->index_for(_hard_end);
0N/A HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
0N/A if (last_card_start < _hard_end) {
0N/A
0N/A // Ensure enough room to fill with the smallest block
0N/A last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
0N/A
0N/A // If the top is already in the last card, don't go back beyond it!
0N/A // Or if the remainder is too small for a filler object, gobble it up.
0N/A if (_top > last_card_start ||
0N/A pointer_delta(last_card_start, _top) < AlignmentReserve) {
0N/A last_card_start = _top;
0N/A }
0N/A if (last_card_start < _hard_end) {
0N/A MemRegion last_card_prefix(last_card_start, _hard_end);
0N/A fill_region_with_block(last_card_prefix, false);
0N/A }
0N/A _hard_end = last_card_start;
0N/A _end = MAX2(_top, _hard_end - AlignmentReserve);
0N/A _true_end = _hard_end;
0N/A assert(_end <= _hard_end, "Invariant.");
0N/A }
0N/A
0N/A // At this point:
0N/A // 1) we had a filler object from the original top to hard_end.
0N/A // 2) We've filled in any partial cards at the front and back.
0N/A if (pre_top < _hard_end) {
0N/A // Now we can reset the _bt to do allocation in the given area.
0N/A MemRegion new_filler(pre_top, _hard_end);
0N/A fill_region_with_block(new_filler, false);
0N/A _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
0N/A // If there's no space left, don't retain.
0N/A if (_top >= _end) {
0N/A _retained = false;
0N/A invalidate();
0N/A return;
0N/A }
0N/A _retained_filler = MemRegion(pre_top, _top);
0N/A _bt.set_region(MemRegion(_top, _hard_end));
0N/A _bt.initialize_threshold();
0N/A assert(_bt.threshold() > _top, "initialize_threshold failed!");
0N/A
0N/A // There may be other reasons for queries into the middle of the
0N/A // filler object. When such queries are done in parallel with
0N/A // allocation, bad things can happen, if the query involves object
0N/A // iteration. So we ensure that such queries do not involve object
0N/A // iteration, by putting another filler object on the boundaries of
0N/A // such queries. One such is the object spanning a parallel card
0N/A // chunk boundary.
0N/A
0N/A // "chunk_boundary" is the address of the first chunk boundary less
0N/A // than "hard_end".
0N/A HeapWord* chunk_boundary =
0N/A (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
0N/A assert(chunk_boundary < _hard_end, "Or else above did not work.");
0N/A assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
0N/A "Consequence of last card handling above.");
0N/A
0N/A if (_top <= chunk_boundary) {
0N/A assert(_true_end == _hard_end, "Invariant.");
0N/A while (_top <= chunk_boundary) {
0N/A assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
0N/A "Consequence of last card handling above.");
481N/A _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
481N/A CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
481N/A _hard_end = chunk_boundary;
0N/A chunk_boundary -= ChunkSizeInWords;
0N/A }
0N/A _end = _hard_end - AlignmentReserve;
0N/A assert(_top <= _end, "Invariant.");
0N/A // Now reset the initial filler chunk so it doesn't overlap with
0N/A // the one(s) inserted above.
0N/A MemRegion new_filler(pre_top, _hard_end);
0N/A fill_region_with_block(new_filler, false);
0N/A }
0N/A } else {
0N/A _retained = false;
0N/A invalidate();
0N/A }
0N/A } else {
0N/A assert(!end_of_gc ||
0N/A (!_retained && _true_end == _hard_end), "Checking.");
0N/A }
0N/A assert(_end <= _hard_end, "Invariant.");
0N/A assert(_top < _end || _top == _hard_end, "Invariant");
0N/A}