Searched refs:n_workers (Results 1 - 10 of 10) sorted by relevance

/openjdk7/hotspot/src/share/vm/gc_implementation/g1/
H A Dg1HotCardCache.cpp47 int n_workers = (ParallelGCThreads > 0 ? local
49 _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
H A Dg1RemSet.inline.hpp32 inline uint G1RemSet::n_workers() { function in class:G1RemSet
H A Dg1RemSet.cpp79 guarantee(n_workers() > 0, "There should be some workers");
80 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
81 for (uint i = 0; i < n_workers(); i++) {
88 for (uint i = 0; i < n_workers(); i++) {
296 assert(worker_i < (int)n_workers(), "sanity");
348 // _seq_task->set_n_termination((int)n_workers());
351 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
352 for (uint i = 0; i < n_workers(); ++i) {
422 for (uint i = 0; i < n_workers(); ++i) {
500 n_workers(),
[all...]
H A Dg1RemSet.hpp43 uint n_workers();
H A Dg1CollectedHeap.cpp1450 uint n_workers = local
1455 n_workers == workers()->total_workers(),
1457 workers()->set_active_workers(n_workers);
1463 set_par_threads(n_workers);
3427 int n_workers = workers()->active_workers(); local
3428 set_par_threads(n_workers);
3436 // The implication is that n_workers is > 0.
5295 int n_workers) :
5299 _active_workers(n_workers)
5301 assert(n_workers >
5292 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h, FlexibleWorkGang* workers, RefToScanQueueSet *task_queues, int n_workers) argument
5696 uint n_workers; local
6399 uint n_workers = workers()->active_workers(); local
[all...]
H A DconcurrentMark.cpp1872 uint n_workers; local
1882 n_workers = g1h->n_par_threads();
1883 assert(g1h->n_par_threads() == n_workers,
1892 n_workers = 1;
1911 g1h->set_par_threads((int)n_workers);
1945 g1h->set_par_threads((int)n_workers);
1969 g1h->set_par_threads((int)n_workers);
1987 g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2224 int n_workers) :
2226 _workers(workers), _active_workers(n_workers) { }
2221 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, ConcurrentMark* cm, WorkGang* workers, int n_workers) argument
3033 G1AggregateCountDataTask(G1CollectedHeap* g1h, ConcurrentMark* cm, BitMap* cm_card_bm, size_t max_task_num, int n_workers) argument
3058 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ? local
[all...]
/openjdk7/hotspot/src/share/vm/utilities/
H A Dworkgroup.hpp378 uint n_workers() { return _n_workers; } function in class:WorkGangBarrierSync
389 WorkGangBarrierSync(uint n_workers, const char* name);
393 void set_n_workers(uint n_workers);
H A Dworkgroup.cpp381 WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name) argument
383 _n_workers(n_workers), _n_completed(0), _should_reset(false) {
386 void WorkGangBarrierSync::set_n_workers(uint n_workers) { argument
387 _n_workers = n_workers;
402 if (n_completed() == n_workers()) {
407 // n_workers(). So, if we set n_completed() to 0, those workers
409 // n_workers() and go back to sleep). Instead, we raise the
415 while (n_completed() != n_workers()) {
/openjdk7/hotspot/src/share/vm/gc_implementation/parNew/
H A DparNewGeneration.cpp979 int n_workers = active_workers; local
982 ref_processor()->set_active_mt_degree(n_workers);
986 ParallelTaskTerminator _term(n_workers, task_queues());
992 gch->set_par_threads(n_workers);
998 if (n_workers > 1) {
1066 plab_stats()->adjust_desired_plab_sz(n_workers);
/openjdk7/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/
H A DconcurrentMarkSweepGeneration.cpp5122 // A value of 0 passed to n_workers will cause the number of
5127 int n_workers, FlexibleWorkGang* workers,
5132 _n_workers(n_workers),
5134 _term(n_workers, task_queues) { }
5141 int n_workers() { return _n_workers; }
5609 int n_workers = workers->active_workers();
5610 if (n_workers == 0) {
5611 assert(n_workers > 0, "Should have been set during scavenge");
5612 n_workers = ParallelGCThreads;
5613 workers->set_active_workers(n_workers);
[all...]

Completed in 94 milliseconds