342N/A/*
3979N/A * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
342N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
342N/A *
342N/A * This code is free software; you can redistribute it and/or modify it
342N/A * under the terms of the GNU General Public License version 2 only, as
342N/A * published by the Free Software Foundation.
342N/A *
342N/A * This code is distributed in the hope that it will be useful, but WITHOUT
342N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
342N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
342N/A * version 2 for more details (a copy is included in the LICENSE file that
342N/A * accompanied this code).
342N/A *
342N/A * You should have received a copy of the GNU General Public License version
342N/A * 2 along with this work; if not, write to the Free Software Foundation,
342N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
342N/A *
1472N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1472N/A * or visit www.oracle.com if you need additional information or have any
1472N/A * questions.
342N/A *
342N/A */
342N/A
1879N/A#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
1879N/A#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
1879N/A
1879N/A#include "gc_implementation/g1/concurrentMark.hpp"
1879N/A#include "gc_implementation/g1/g1CollectedHeap.hpp"
2280N/A#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
1880N/A#include "gc_implementation/g1/g1CollectorPolicy.hpp"
2034N/A#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
1879N/A#include "utilities/taskqueue.hpp"
1879N/A
342N/A// Inline functions for G1CollectedHeap
342N/A
2591N/Atemplate <class T>
342N/Ainline HeapRegion*
2591N/AG1CollectedHeap::heap_region_containing(const T addr) const {
2591N/A HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
342N/A // hr can be null if addr in perm_gen
342N/A if (hr != NULL && hr->continuesHumongous()) {
342N/A hr = hr->humongous_start_region();
342N/A }
342N/A return hr;
342N/A}
342N/A
2591N/Atemplate <class T>
342N/Ainline HeapRegion*
2591N/AG1CollectedHeap::heap_region_containing_raw(const T addr) const {
2591N/A assert(_g1_reserved.contains((const void*) addr), "invariant");
2591N/A HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
342N/A return res;
342N/A}
342N/A
342N/Ainline bool G1CollectedHeap::obj_in_cs(oop obj) {
2591N/A HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
342N/A return r != NULL && r->in_collection_set();
342N/A}
342N/A
1880N/Ainline HeapWord*
2280N/AG1CollectedHeap::attempt_allocation(size_t word_size,
2280N/A unsigned int* gc_count_before_ret) {
2019N/A assert_heap_not_locked_and_not_at_safepoint();
2280N/A assert(!isHumongous(word_size), "attempt_allocation() should not "
2280N/A "be called for humongous allocation requests");
1880N/A
2280N/A HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
2280N/A false /* bot_updates */);
2280N/A if (result == NULL) {
2280N/A result = attempt_allocation_slow(word_size, gc_count_before_ret);
342N/A }
2280N/A assert_heap_not_locked();
2280N/A if (result != NULL) {
2280N/A dirty_young_block(result, word_size);
2019N/A }
2280N/A return result;
2019N/A}
2019N/A
2655N/Ainline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
2655N/A word_size) {
2655N/A assert(!isHumongous(word_size),
2655N/A "we should not be seeing humongous-size allocations in this path");
2655N/A
2655N/A HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
2655N/A false /* bot_updates */);
2655N/A if (result == NULL) {
2655N/A MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
2655N/A result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
2655N/A false /* bot_updates */);
2655N/A }
2655N/A if (result != NULL) {
2655N/A dirty_young_block(result, word_size);
2655N/A }
2655N/A return result;
2655N/A}
2655N/A
2655N/Ainline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
2655N/A assert(!isHumongous(word_size),
2655N/A "we should not be seeing humongous-size allocations in this path");
2655N/A
2655N/A HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
2655N/A true /* bot_updates */);
2655N/A if (result == NULL) {
2655N/A MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
2655N/A result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
2655N/A true /* bot_updates */);
2655N/A }
2655N/A return result;
2655N/A}
2655N/A
1880N/A// It dirties the cards that cover the block so that so that the post
1880N/A// write barrier never queues anything when updating objects on this
1880N/A// block. It is assumed (and in fact we assert) that the block
1880N/A// belongs to a young region.
1880N/Ainline void
1880N/AG1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
1880N/A assert_heap_not_locked();
1880N/A
1880N/A // Assign the containing region to containing_hr so that we don't
1880N/A // have to keep calling heap_region_containing_raw() in the
1880N/A // asserts below.
1880N/A DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
1880N/A assert(containing_hr != NULL && start != NULL && word_size > 0,
1880N/A "pre-condition");
1880N/A assert(containing_hr->is_in(start), "it should contain start");
1880N/A assert(containing_hr->is_young(), "it should be young");
1880N/A assert(!containing_hr->isHumongous(), "it should not be humongous");
1880N/A
1880N/A HeapWord* end = start + word_size;
1880N/A assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
1880N/A
1880N/A MemRegion mr(start, end);
1880N/A ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
342N/A}
342N/A
1629N/Ainline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
342N/A return _task_queues->queue(i);
342N/A}
342N/A
3979N/Ainline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
342N/A return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
342N/A}
342N/A
342N/Ainline bool G1CollectedHeap::isMarkedNext(oop obj) const {
342N/A return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
342N/A}
1879N/A
3979N/A#ifndef PRODUCT
3979N/A// Support for G1EvacuationFailureALot
3979N/A
3979N/Ainline bool
3979N/AG1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
3979N/A bool during_initial_mark,
3979N/A bool during_marking) {
3979N/A bool res = false;
3979N/A if (during_marking) {
3979N/A res |= G1EvacuationFailureALotDuringConcMark;
3979N/A }
3979N/A if (during_initial_mark) {
3979N/A res |= G1EvacuationFailureALotDuringInitialMark;
3979N/A }
3979N/A if (gcs_are_young) {
3979N/A res |= G1EvacuationFailureALotDuringYoungGC;
3979N/A } else {
3979N/A // GCs are mixed
3979N/A res |= G1EvacuationFailureALotDuringMixedGC;
3979N/A }
3979N/A return res;
3979N/A}
3979N/A
3979N/Ainline void
3979N/AG1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
3979N/A if (G1EvacuationFailureALot) {
3979N/A // Note we can't assert that _evacuation_failure_alot_for_current_gc
3979N/A // is clear here. It may have been set during a previous GC but that GC
3979N/A // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
3979N/A // trigger an evacuation failure and clear the flags and and counts.
3979N/A
3979N/A // Check if we have gone over the interval.
3979N/A const size_t gc_num = total_collections();
3979N/A const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
3979N/A
3979N/A _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
3979N/A
3979N/A // Now check if G1EvacuationFailureALot is enabled for the current GC type.
3979N/A const bool gcs_are_young = g1_policy()->gcs_are_young();
3979N/A const bool during_im = g1_policy()->during_initial_mark_pause();
3979N/A const bool during_marking = mark_in_progress();
3979N/A
3979N/A _evacuation_failure_alot_for_current_gc &=
3979N/A evacuation_failure_alot_for_gc_type(gcs_are_young,
3979N/A during_im,
3979N/A during_marking);
3979N/A }
3979N/A}
3979N/A
3979N/Ainline bool
3979N/AG1CollectedHeap::evacuation_should_fail() {
3979N/A if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
3979N/A return false;
3979N/A }
3979N/A // G1EvacuationFailureALot is in effect for current GC
3979N/A // Access to _evacuation_failure_alot_count is not atomic;
3979N/A // the value does not have to be exact.
3979N/A if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
3979N/A return false;
3979N/A }
3979N/A _evacuation_failure_alot_count = 0;
3979N/A return true;
3979N/A}
3979N/A
3979N/Ainline void G1CollectedHeap::reset_evacuation_should_fail() {
3979N/A if (G1EvacuationFailureALot) {
3979N/A _evacuation_failure_alot_gc_number = total_collections();
3979N/A _evacuation_failure_alot_count = 0;
3979N/A _evacuation_failure_alot_for_current_gc = false;
3979N/A }
3979N/A}
3979N/A#endif // #ifndef PRODUCT
3979N/A
1879N/A#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP