/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "gc_implementation/shared/gcUtil.hpp"
#include "memory/defNewGeneration.hpp"
// CMS bitmaps are usually cover large memory regions
_bm.clear_large();
return;
}
}
}
}
"outside underlying space?");
}
"outside underlying space?");
}
"outside underlying space?");
}
// Range size is usually just 1 bit.
}
// Range size is usually just 1 bit.
}
// Range size is usually just 1 bit.
}
// Range size is usually just 1 bit.
}
// Range size must be greater than 32 bytes.
}
// Range size must be greater than 32 bytes.
}
// Range size must be greater than 32 bytes.
}
// Range size must be greater than 32 bytes.
}
// Starting at "addr" (inclusive) return a memory region
// corresponding to the first maximally contiguous marked ("1") region.
}
// Starting at "start_addr" (inclusive) return a memory region
// corresponding to the first maximal contiguous marked ("1") region
// strictly less than end_addr.
}
return mr;
}
"outside underlying space?");
}
// The same as isMarked() but without a lock check.
"outside underlying space?");
}
"outside underlying space?");
}
// Return the HeapWord address corresponding to next "1" bit
// (inclusive).
}
// Return the least HeapWord address corresponding to next "1" bit
// starting at start_addr (inclusive) but strictly less than end_addr.
return nextAddr;
}
// Return the HeapWord address corrsponding to the next "0" bit
// (inclusive).
}
// Return the HeapWord address corrsponding to the next "0" bit
// (inclusive).
return nextAddr;
}
}
}
}
if (CMSIncrementalMode) {
}
}
if (CMSIncrementalMode) {
}
}
if (CMSIncrementalMode) {
}
}
if (CMSIncrementalMode) {
}
}
if (CMSIncrementalMode) {
}
}
}
||
"must be object");
return should_unload_classes() &&
_collectorState == Sweeping &&
}
// We are in the midst of an "abortable preclean" and either
// scavenge is done or foreground GC wants to take over collection
return _collectorState == AbortablePreclean &&
}
}
}
return _valid_bits == _ALL_VALID;
}
if (_gc0_begin_time.is_updated()) {
}
}
// Amount promoted.
if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
}
// If the younger gen collections were skipped, then the
// number of promoted bytes will be 0 and adding it to the
// average will incorrectly lessen the average. It is, however,
// also possible that no promotion was needed.
//
// _gc0_promoted used to be calculated as
// _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
// promoted_bytes, _gc0_alpha);
// Amount directly allocated.
}
_cms_timer.stop();
// This is just an approximate value, but is good enough.
_cms_timer.reset();
_cms_timer.start();
}
_cms_timer.stop();
// Avoid division by 0.
_allow_duty_cycle_reduction = true;
_cms_timer.start();
}
return _cms_begin_time.seconds();
}
return _cms_end_time.seconds();
}
return gc0_promoted() / gc0_period();
}
return cms_allocated() / gc0_period();
}
}
// Update the duty cycle only if pacing is enabled and the stats are valid
// (after at least one young gen gc and one cms cycle have completed).
if (CMSIncrementalPacing && valid()) {
return icms_update_duty_cycle_impl();
}
return _icms_duty_cycle;
}
cmsSpace()->save_sweep_limit();
}
}
}
}
return _cmsSpace->used_region();
}
return _cmsSpace->used_region_at_save_marks();
}
if (ConcurrentMarkSweepThread::should_yield() &&
!_collector->foregroundGCIsActive() &&
_yield) {
}
}
if (ConcurrentMarkSweepThread::should_yield() &&
!_collector->foregroundGCIsActive() &&
_yield) {
}
}
// Return value of "true" indicates that the on-going preclean
// should be aborted.
if (ConcurrentMarkSweepThread::should_yield() &&
!_collector->foregroundGCIsActive() &&
_yield) {
// Sample young gen size before and after yield
return _collector->should_abort_preclean();
}
return false;
}
if (ConcurrentMarkSweepThread::should_yield() &&
!_collector->foregroundGCIsActive() &&
_yield) {
// Sample young gen size before and after yield
}
}
if (ConcurrentMarkSweepThread::should_yield() &&
!_collector->foregroundGCIsActive() &&
_yield) {
}
}
// The conditions are ordered for the remarking phase
// when _yield is false.
if (_yield &&
!_collector->foregroundGCIsActive() &&
}
}
// Align the end of mr so it's at a card boundary.
// This is superfluous except at the end of the space;
// we should do better than this XXX
}
// Align the end of mr so it's at a card boundary.
// This is superfluous except at the end of the space;
// we should do better than this XXX
}
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP