Lines Matching defs:G1CollectedHeap

60 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
71 // All allocation activity covered by the G1CollectedHeap interface is
123 G1CollectedHeap* _g1h;
130 _g1h = G1CollectedHeap::heap();
157 G1CollectedHeap* _g1h;
163 _g1h = G1CollectedHeap::heap();
184 YoungList::YoungList(G1CollectedHeap* g1h) :
389 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
420 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
442 void G1CollectedHeap::stop_conc_gc_threads() {
458 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
466 bool G1CollectedHeap::is_scavengable(const void* p) {
467 G1CollectedHeap* g1h = G1CollectedHeap::heap();
478 void G1CollectedHeap::check_ct_logs_at_safepoint() {
525 G1CollectedHeap* G1CollectedHeap::_g1h;
530 G1CollectedHeap::new_region_try_secondary_free_list() {
568 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
622 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
667 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
802 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
856 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
865 G1CollectedHeap::mem_allocate(size_t word_size,
908 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
916 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
1016 warning("G1CollectedHeap::attempt_allocation_slow() "
1025 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1126 warning("G1CollectedHeap::attempt_allocation_humongous() "
1135 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1157 G1CollectedHeap* _g1h;
1160 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1179 void G1CollectedHeap::clear_rsets_post_compaction() {
1185 G1CollectedHeap* _g1h;
1189 RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1205 G1CollectedHeap* _g1;
1207 ParRebuildRSTask(G1CollectedHeap* g1)
1249 void G1CollectedHeap::print_hrs_post_compaction() {
1254 double G1CollectedHeap::verify(bool guard, const char* msg) {
1269 void G1CollectedHeap::verify_before_gc() {
1274 void G1CollectedHeap::verify_after_gc() {
1279 bool G1CollectedHeap::do_collection(bool explicit_gc,
1382 // G1CollectedHeap::ref_processing_init() about
1449 if (G1CollectedHeap::use_parallel_gc_threads()) {
1556 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1569 G1CollectedHeap::
1659 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1733 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1753 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1768 bool G1CollectedHeap::expand(size_t expand_bytes) {
1836 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1879 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1905 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1982 jint G1CollectedHeap::initialize() {
2228 void G1CollectedHeap::ref_processing_init() {
2308 size_t G1CollectedHeap::capacity() const {
2312 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2350 void G1CollectedHeap::check_gc_time_stamps() {
2357 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2378 size_t G1CollectedHeap::used() const {
2389 size_t G1CollectedHeap::used_unlocked() const {
2407 size_t G1CollectedHeap::recalculate_used() const {
2413 size_t G1CollectedHeap::unsafe_max_alloc() {
2432 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2442 void G1CollectedHeap::allocate_dummy_regions() {
2463 void G1CollectedHeap::increment_old_marking_cycles_started() {
2472 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2520 void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
2528 void G1CollectedHeap::register_concurrent_cycle_end() {
2541 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2547 G1YCType G1CollectedHeap::yc_type() {
2563 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
2578 void G1CollectedHeap::collect(GCCause::Cause cause) {
2643 bool G1CollectedHeap::is_in(const void* p) const {
2674 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
2682 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
2704 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
2712 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
2729 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2734 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2739 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2744 const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2828 void G1CollectedHeap::reset_heap_region_claim_values() {
2833 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2880 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2911 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2920 void G1CollectedHeap::clear_cset_start_regions() {
2933 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
2961 if (G1CollectedHeap::use_parallel_gc_threads()) {
2995 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
2998 G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
3006 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
3018 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
3047 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
3052 Space* G1CollectedHeap::space_containing(const void* addr) const {
3059 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
3067 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
3073 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
3078 bool G1CollectedHeap::supports_tlab_allocation() const {
3082 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
3086 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
3103 size_t G1CollectedHeap::max_capacity() const {
3107 jlong G1CollectedHeap::millis_since_last_gc() {
3112 void G1CollectedHeap::prepare_for_verify() {
3119 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
3134 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
3144 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
3154 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
3165 G1CollectedHeap* _g1h;
3168 VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3183 G1CollectedHeap* _g1h;
3193 _g1h = G1CollectedHeap::heap();
3222 G1CollectedHeap *_g1;
3225 _g1 = G1CollectedHeap::heap();
3299 G1CollectedHeap* _g1h;
3307 _g1h(G1CollectedHeap::heap()),
3337 G1CollectedHeap* _g1h;
3345 G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3367 void G1CollectedHeap::verify(bool silent) {
3371 void G1CollectedHeap::verify(bool silent,
3486 void G1CollectedHeap::print_on(outputStream* st) const {
3506 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3520 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3521 if (G1CollectedHeap::use_parallel_gc_threads()) {
3531 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3532 if (G1CollectedHeap::use_parallel_gc_threads()) {
3539 void G1CollectedHeap::print_tracing_info() const {
3595 void G1CollectedHeap::print_cset_rsets() {
3600 void G1CollectedHeap::print_all_rsets() {
3606 G1CollectedHeap* G1CollectedHeap::heap() {
3607 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3612 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3621 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3634 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3657 G1CollectedHeap::doConcurrentMark() {
3665 size_t G1CollectedHeap::pending_card_num() {
3683 size_t G1CollectedHeap::cards_scanned() {
3688 G1CollectedHeap::setup_surviving_young_words() {
3705 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3714 G1CollectedHeap::cleanup_surviving_young_words() {
3743 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3749 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3763 void G1CollectedHeap::reset_taskqueue_stats() {
3771 void G1CollectedHeap::log_gc_header() {
3786 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3810 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3864 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3902 // G1CollectedHeap::ref_processing_init() to see how
4223 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4246 void G1CollectedHeap::init_mutator_alloc_region() {
4251 void G1CollectedHeap::release_mutator_alloc_region() {
4256 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
4292 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
4309 void G1CollectedHeap::abandon_gc_alloc_regions() {
4315 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4321 void G1CollectedHeap::finalize_for_evac_failure() {
4330 void G1CollectedHeap::remove_self_forwarding_pointers() {
4335 if (G1CollectedHeap::use_parallel_gc_threads()) {
4362 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4366 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4377 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4421 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4440 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4450 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4481 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4598 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4834 G1CollectedHeap* _g1h;
4844 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4891 G1CollectedHeap* _g1h;
4905 G1ParTask(G1CollectedHeap* g1h,
4926 // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
5024 G1CollectedHeap* _g1;
5027 G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
5047 G1CollectedHeap* _g1;
5050 G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
5076 G1CollectedHeap::
5148 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
5161 G1CollectedHeap* _g1;
5163 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5181 G1CollectedHeap* _g1;
5183 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5201 G1CollectedHeap* _g1h;
5207 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5262 G1CollectedHeap* _g1h;
5268 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5286 G1CollectedHeap* _g1h;
5292 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5314 G1CollectedHeap* _g1h;
5320 G1CollectedHeap* g1h,
5431 G1CollectedHeap* _g1h;
5437 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5522 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5549 assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5558 if (G1CollectedHeap::use_parallel_gc_threads()) {
5647 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5682 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5697 if (G1CollectedHeap::use_parallel_gc_threads()) {
5726 if (G1CollectedHeap::use_parallel_gc_threads()) {
5814 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5834 void G1CollectedHeap::free_region(HeapRegion* hr,
5853 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5886 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
5916 G1CollectedHeap* _g1h;
5920 G1CollectedHeap* g1h) :
5941 G1CollectedHeap* _g1h;
5944 G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
5956 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5963 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5976 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5983 void G1CollectedHeap::verify_dirty_young_regions() {
5988 void G1CollectedHeap::cleanUpCardTable() {
5996 if (G1CollectedHeap::use_parallel_gc_threads()) {
6024 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6144 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6157 void G1CollectedHeap::set_free_regions_coming() {
6167 void G1CollectedHeap::reset_free_regions_coming() {
6182 void G1CollectedHeap::wait_while_free_regions_coming() {
6207 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6229 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6268 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6328 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6343 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6347 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6358 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6376 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6395 void G1CollectedHeap::set_par_threads() {
6398 assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6418 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6447 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6522 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6531 void G1CollectedHeap::verify_region_sets() {