Lines Matching defs:tq

155  * taskqid_t taskq_dispatch(tq, func, arg, flags):
191 * void taskq_dispatch_ent(tq, func, arg, flags, tqent)
202 * void taskq_wait(tq):
209 * void taskq_suspend(tq)
215 * int taskq_suspended(tq)
220 * void taskq_resume(tq)
224 * int taskq_member(tq, thread)
226 * Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
691 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag) \
698 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag) \
701 (!(tq->tq_flags & TASKQ_PREPOPULATE) || \
702 (tq->tq_nalloc > tq->tq_minalloc)) && \
704 mutex_exit(&tq->tq_lock); \
708 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
709 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
737 #define TQ_DO_ENQUEUE(tq, tqe, func, arg, front) { \
738 ASSERT(MUTEX_HELD(&tq->tq_lock)); \
741 TQ_PREPEND(tq->tq_task, tqe); \
743 TQ_APPEND(tq->tq_task, tqe); \
747 tq->tq_tasks++; \
748 if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks) \
749 tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed; \
750 cv_signal(&tq->tq_dispatch_cv); \
751 DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
754 #define TQ_ENQUEUE(tq, tqe, func, arg) \
755 TQ_DO_ENQUEUE(tq, tqe, func, arg, 0)
757 #define TQ_ENQUEUE_FRONT(tq, tqe, func, arg) \
758 TQ_DO_ENQUEUE(tq, tqe, func, arg, 1)
773 taskq_t *tq = buf;
775 bzero(tq, sizeof (taskq_t));
777 mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
778 rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
779 cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
780 cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL);
781 cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
782 cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
784 tq->tq_task.tqent_next = &tq->tq_task;
785 tq->tq_task.tqent_prev = &tq->tq_task;
794 taskq_t *tq = buf;
796 ASSERT(tq->tq_nthreads == 0);
797 ASSERT(tq->tq_buckets == NULL);
798 ASSERT(tq->tq_tcreates == 0);
799 ASSERT(tq->tq_tdeaths == 0);
801 mutex_destroy(&tq->tq_lock);
802 rw_destroy(&tq->tq_threadlock);
803 cv_destroy(&tq->tq_dispatch_cv);
804 cv_destroy(&tq->tq_exit_cv);
805 cv_destroy(&tq->tq_wait_cv);
806 cv_destroy(&tq->tq_maxalloc_cv);
848 taskq_update_nthreads(taskq_t *tq, uint_t ncpus)
850 uint_t newtarget = TASKQ_THREADS_PCT(ncpus, tq->tq_threads_ncpus_pct);
853 ASSERT(MUTEX_HELD(&tq->tq_lock));
856 ASSERT3U(tq->tq_nthreads_target, !=, 0);
859 ASSERT3U(newtarget, <=, tq->tq_nthreads_max);
860 if (newtarget != tq->tq_nthreads_target) {
861 tq->tq_flags |= TASKQ_CHANGING;
862 tq->tq_nthreads_target = newtarget;
863 cv_broadcast(&tq->tq_dispatch_cv);
864 cv_broadcast(&tq->tq_exit_cv);
870 taskq_cpupct_install(taskq_t *tq, cpupart_t *cpup)
872 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
875 mutex_enter(&tq->tq_lock);
876 tq->tq_cpupart = cpup->cp_id;
877 taskq_update_nthreads(tq, cpup->cp_ncpus);
878 mutex_exit(&tq->tq_lock);
880 list_insert_tail(&taskq_cpupct_list, tq);
885 taskq_cpupct_remove(taskq_t *tq)
887 ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
890 list_remove(&taskq_cpupct_list, tq);
898 taskq_t *tq;
921 for (tq = list_head(&taskq_cpupct_list); tq != NULL;
922 tq = list_next(&taskq_cpupct_list, tq)) {
924 mutex_enter(&tq->tq_lock);
929 if (tq->tq_cpupart == cp->cp_id) {
930 taskq_update_nthreads(tq, ncpus);
932 mutex_exit(&tq->tq_lock);
967 * Assumes: tq->tq_lock is held.
970 taskq_ent_alloc(taskq_t *tq, int flags)
977 ASSERT(MUTEX_HELD(&tq->tq_lock));
983 again: if ((tqe = tq->tq_freelist) != NULL &&
984 ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) {
985 tq->tq_freelist = tqe->tqent_next;
990 if (tq->tq_nalloc >= tq->tq_maxalloc) {
1005 while (tq->tq_freelist == NULL) {
1006 tq->tq_maxalloc_wait++;
1007 wait_rv = cv_timedwait(&tq->tq_maxalloc_cv,
1008 &tq->tq_lock, wait_time);
1009 tq->tq_maxalloc_wait--;
1013 if (tq->tq_freelist)
1017 mutex_exit(&tq->tq_lock);
1021 mutex_enter(&tq->tq_lock);
1023 tq->tq_nalloc++;
1034 * Assumes: tq->tq_lock is held.
1037 taskq_ent_free(taskq_t *tq, taskq_ent_t *tqe)
1039 ASSERT(MUTEX_HELD(&tq->tq_lock));
1041 if (tq->tq_nalloc <= tq->tq_minalloc) {
1042 tqe->tqent_next = tq->tq_freelist;
1043 tq->tq_freelist = tqe;
1045 tq->tq_nalloc--;
1046 mutex_exit(&tq->tq_lock);
1048 mutex_enter(&tq->tq_lock);
1051 if (tq->tq_maxalloc_wait)
1052 cv_signal(&tq->tq_maxalloc_cv);
1060 * Assumes: tq->tq_lock is held.
1063 taskq_ent_exists(taskq_t *tq, task_func_t func, void *arg)
1067 ASSERT(MUTEX_HELD(&tq->tq_lock));
1069 for (tqe = tq->tq_task.tqent_next; tqe != &tq->tq_task;
1137 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
1144 ASSERT(tq != NULL);
1147 if (!(tq->tq_flags & TASKQ_DYNAMIC)) {
1155 mutex_enter(&tq->tq_lock);
1157 TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flags);
1159 if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) {
1160 mutex_exit(&tq->tq_lock);
1167 TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1169 TQ_ENQUEUE(tq, tqe, func, arg);
1171 mutex_exit(&tq->tq_lock);
1179 TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flags);
1181 bsize = tq->tq_nbuckets;
1188 if ((tqe = taskq_bucket_dispatch(tq->tq_buckets, func, arg))
1191 bucket = tq->tq_buckets;
1204 b = &tq->tq_buckets[h & (bsize - 1)];
1205 ASSERT(b->tqbucket_taskq == tq); /* Sanity check */
1226 b = &tq->tq_buckets[++h & (bsize - 1)];
1227 ASSERT(b->tqbucket_taskq == tq); /* Sanity check */
1268 mutex_enter(&tq->tq_lock);
1269 if (!taskq_ent_exists(tq, taskq_bucket_extend, bucket)) {
1270 if ((tqe1 = taskq_ent_alloc(tq, TQ_NOSLEEP)) != NULL) {
1271 TQ_ENQUEUE_FRONT(tq, tqe1, taskq_bucket_extend, bucket);
1282 if ((tqe = taskq_ent_alloc(tq, flags)) != NULL) {
1283 TQ_ENQUEUE(tq, tqe, func, arg);
1288 mutex_exit(&tq->tq_lock);
1294 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
1298 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
1308 mutex_enter(&tq->tq_lock);
1311 TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1313 TQ_ENQUEUE(tq, tqe, func, arg);
1315 mutex_exit(&tq->tq_lock);
1323 taskq_wait(taskq_t *tq)
1325 ASSERT(tq != curthread->t_taskq);
1327 mutex_enter(&tq->tq_lock);
1328 while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
1329 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1330 mutex_exit(&tq->tq_lock);
1332 if (tq->tq_flags & TASKQ_DYNAMIC) {
1333 taskq_bucket_t *b = tq->tq_buckets;
1335 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1352 taskq_suspend(taskq_t *tq)
1354 rw_enter(&tq->tq_threadlock, RW_WRITER);
1356 if (tq->tq_flags & TASKQ_DYNAMIC) {
1357 taskq_bucket_t *b = tq->tq_buckets;
1359 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1368 mutex_enter(&tq->tq_lock);
1369 ASSERT(!(tq->tq_flags & TASKQ_SUSPENDED));
1370 tq->tq_flags |= TASKQ_SUSPENDED;
1371 mutex_exit(&tq->tq_lock);
1375 * returns: 1 if tq is suspended, 0 otherwise.
1378 taskq_suspended(taskq_t *tq)
1380 return ((tq->tq_flags & TASKQ_SUSPENDED) != 0);
1387 taskq_resume(taskq_t *tq)
1389 ASSERT(RW_WRITE_HELD(&tq->tq_threadlock));
1391 if (tq->tq_flags & TASKQ_DYNAMIC) {
1392 taskq_bucket_t *b = tq->tq_buckets;
1394 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1400 mutex_enter(&tq->tq_lock);
1401 ASSERT(tq->tq_flags & TASKQ_SUSPENDED);
1402 tq->tq_flags &= ~TASKQ_SUSPENDED;
1403 mutex_exit(&tq->tq_lock);
1405 rw_exit(&tq->tq_threadlock);
1409 taskq_member(taskq_t *tq, kthread_t *thread)
1411 return (thread->t_taskq == tq);
1423 taskq_thread_create(taskq_t *tq)
1426 const boolean_t first = (tq->tq_nthreads == 0);
1428 ASSERT(MUTEX_HELD(&tq->tq_lock));
1429 ASSERT(tq->tq_flags & TASKQ_CHANGING);
1430 ASSERT(tq->tq_nthreads < tq->tq_nthreads_target);
1431 ASSERT(!(tq->tq_flags & TASKQ_THREAD_CREATED));
1434 tq->tq_flags |= TASKQ_THREAD_CREATED;
1435 tq->tq_active++;
1436 mutex_exit(&tq->tq_lock);
1443 if ((tq->tq_flags & TASKQ_DUTY_CYCLE) != 0) {
1445 ASSERT3P(tq->tq_proc, !=, &p0);
1446 t = lwp_kernel_create(tq->tq_proc, taskq_thread, tq, TS_RUN,
1447 tq->tq_pri);
1449 t = thread_create(NULL, 0, taskq_thread, tq, 0, tq->tq_proc,
1450 TS_RUN, tq->tq_pri);
1454 mutex_enter(&tq->tq_lock);
1459 * We know the thread cannot go away, since tq cannot be
1463 if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
1464 taskq_cpupct_install(tq, t->t_cpupart);
1466 mutex_enter(&tq->tq_lock);
1469 while (tq->tq_nthreads != tq->tq_nthreads_target &&
1470 tq->tq_nthreads < TASKQ_CREATE_ACTIVE_THREADS) {
1471 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1480 taskq_thread_wait(taskq_t *tq, kmutex_t *mx, kcondvar_t *cv,
1485 if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1493 if (!(tq->tq_flags & TASKQ_CPR_SAFE)) {
1508 taskq_t *tq = arg;
1514 curthread->t_taskq = tq; /* mark ourselves for taskq_member() */
1516 if (curproc != &p0 && (tq->tq_flags & TASKQ_DUTY_CYCLE)) {
1517 sysdc_thread_enter(curthread, tq->tq_DC,
1518 (tq->tq_flags & TASKQ_DC_BATCH) ? SYSDC_THREAD_BATCH : 0);
1521 if (tq->tq_flags & TASKQ_CPR_SAFE) {
1522 CALLB_CPR_INIT_SAFE(curthread, tq->tq_name);
1524 CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr,
1525 tq->tq_name);
1527 mutex_enter(&tq->tq_lock);
1528 thread_id = ++tq->tq_nthreads;
1529 ASSERT(tq->tq_flags & TASKQ_THREAD_CREATED);
1530 ASSERT(tq->tq_flags & TASKQ_CHANGING);
1531 tq->tq_flags &= ~TASKQ_THREAD_CREATED;
1533 VERIFY3S(thread_id, <=, tq->tq_nthreads_max);
1535 if (tq->tq_nthreads_max == 1)
1536 tq->tq_thread = curthread;
1538 tq->tq_threadlist[thread_id - 1] = curthread;
1541 if (tq->tq_nthreads == TASKQ_CREATE_ACTIVE_THREADS)
1542 cv_broadcast(&tq->tq_wait_cv);
1545 if (tq->tq_flags & TASKQ_CHANGING) {
1547 if (thread_id > tq->tq_nthreads_target) {
1559 if (thread_id == tq->tq_nthreads ||
1560 tq->tq_nthreads_target == 0)
1564 (void) taskq_thread_wait(tq, &tq->tq_lock,
1565 &tq->tq_exit_cv, &cprinfo, -1);
1573 if (!(tq->tq_flags & TASKQ_THREAD_CREATED)) {
1575 if (tq->tq_nthreads == tq->tq_nthreads_target) {
1576 tq->tq_flags &= ~TASKQ_CHANGING;
1577 cv_broadcast(&tq->tq_wait_cv);
1580 if (tq->tq_nthreads < tq->tq_nthreads_target) {
1581 taskq_thread_create(tq);
1586 if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
1587 if (--tq->tq_active == 0)
1588 cv_broadcast(&tq->tq_wait_cv);
1589 (void) taskq_thread_wait(tq, &tq->tq_lock,
1590 &tq->tq_dispatch_cv, &cprinfo, -1);
1591 tq->tq_active++;
1597 mutex_exit(&tq->tq_lock);
1607 if ((!(tq->tq_flags & TASKQ_DYNAMIC)) &&
1616 rw_enter(&tq->tq_threadlock, RW_READER);
1618 DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
1621 DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
1624 rw_exit(&tq->tq_threadlock);
1626 mutex_enter(&tq->tq_lock);
1627 tq->tq_totaltime += end - start;
1628 tq->tq_executed++;
1631 taskq_ent_free(tq, tqe);
1634 if (tq->tq_nthreads_max == 1)
1635 tq->tq_thread = NULL;
1637 tq->tq_threadlist[thread_id - 1] = NULL;
1640 ASSERT(tq->tq_active > 0);
1641 tq->tq_active--;
1643 ASSERT(tq->tq_nthreads > 0);
1644 tq->tq_nthreads--;
1647 cv_broadcast(&tq->tq_exit_cv);
1648 if (tq->tq_nthreads == tq->tq_nthreads_target) {
1649 if (!(tq->tq_flags & TASKQ_THREAD_CREATED))
1650 tq->tq_flags &= ~TASKQ_CHANGING;
1652 cv_broadcast(&tq->tq_wait_cv);
1655 ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
1656 CALLB_CPR_EXIT(&cprinfo); /* drops tq->tq_lock */
1672 taskq_t *tq = bucket->tqbucket_taskq;
1678 CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, tq->tq_name);
1706 DTRACE_PROBE3(taskq__d__exec__start, taskq_t *, tq,
1709 DTRACE_PROBE3(taskq__d__exec__end, taskq_t *, tq,
1741 w = taskq_thread_wait(tq, lock, cv,
1787 mutex_enter(&tq->tq_lock);
1788 tq->tq_tdeaths++;
1789 mutex_exit(&tq->tq_lock);
1864 taskq_t *tq = kmem_cache_alloc(taskq_cache, KM_SLEEP);
1894 tq->tq_maxsize = nthreads;
1915 tq->tq_threads_ncpus_pct = pct;
1931 (void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
1932 strident_canon(tq->tq_name, TASKQ_NAMELEN + 1);
1934 tq->tq_flags = flags | TASKQ_CHANGING;
1935 tq->tq_active = 0;
1936 tq->tq_instance = instance;
1937 tq->tq_nthreads_target = nthreads;
1938 tq->tq_nthreads_max = max_nthreads;
1939 tq->tq_minalloc = minalloc;
1940 tq->tq_maxalloc = maxalloc;
1941 tq->tq_nbuckets = bsize;
1942 tq->tq_proc = proc;
1943 tq->tq_pri = pri;
1944 tq->tq_DC = dc;
1945 list_link_init(&tq->tq_cpupct_link);
1948 tq->tq_threadlist = kmem_alloc(
1951 mutex_enter(&tq->tq_lock);
1954 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
1963 zone_hold(tq->tq_proc->p_zone);
1970 taskq_thread_create(tq);
1971 mutex_exit(&tq->tq_lock);
1978 tq->tq_buckets = bucket;
1985 bucket->tqbucket_taskq = tq;
2003 instance = tq->tq_instance =
2008 if ((tq->tq_kstat = kstat_create("unix", instance,
2009 tq->tq_name, "taskq_d", KSTAT_TYPE_NAMED,
2012 tq->tq_kstat->ks_lock = &taskq_d_kstat_lock;
2013 tq->tq_kstat->ks_data = &taskq_d_kstat;
2014 tq->tq_kstat->ks_update = taskq_d_kstat_update;
2015 tq->tq_kstat->ks_private = tq;
2016 kstat_install(tq->tq_kstat);
2019 if ((tq->tq_kstat = kstat_create("unix", instance, tq->tq_name,
2023 tq->tq_kstat->ks_lock = &taskq_kstat_lock;
2024 tq->tq_kstat->ks_data = &taskq_kstat;
2025 tq->tq_kstat->ks_update = taskq_kstat_update;
2026 tq->tq_kstat->ks_private = tq;
2027 kstat_install(tq->tq_kstat);
2031 return (tq);
2041 taskq_destroy(taskq_t *tq)
2043 taskq_bucket_t *b = tq->tq_buckets;
2046 ASSERT(! (tq->tq_flags & TASKQ_CPR_SAFE));
2051 if (tq->tq_kstat != NULL) {
2052 kstat_delete(tq->tq_kstat);
2053 tq->tq_kstat = NULL;
2059 if (tq->tq_flags & TASKQ_NOINSTANCE) {
2060 vmem_free(taskq_id_arena, (void *)(uintptr_t)(tq->tq_instance),
2062 tq->tq_instance = 0;
2068 if (tq->tq_flags & TASKQ_THREADS_CPU_PCT) {
2069 taskq_cpupct_remove(tq);
2075 taskq_wait(tq);
2077 mutex_enter(&tq->tq_lock);
2078 ASSERT((tq->tq_task.tqent_next == &tq->tq_task) &&
2079 (tq->tq_active == 0));
2082 tq->tq_nthreads_target = 0;
2084 tq->tq_flags |= TASKQ_CHANGING;
2085 cv_broadcast(&tq->tq_dispatch_cv);
2086 cv_broadcast(&tq->tq_exit_cv);
2088 while (tq->tq_nthreads != 0)
2089 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
2091 if (tq->tq_nthreads_max != 1)
2092 kmem_free(tq->tq_threadlist, sizeof (kthread_t *) *
2093 tq->tq_nthreads_max);
2095 tq->tq_minalloc = 0;
2096 while (tq->tq_nalloc != 0)
2097 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
2099 mutex_exit(&tq->tq_lock);
2104 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
2130 if (tq->tq_buckets != NULL) {
2131 ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2132 kmem_free(tq->tq_buckets,
2133 sizeof (taskq_bucket_t) * tq->tq_nbuckets);
2135 /* Cleanup fields before returning tq to the cache */
2136 tq->tq_buckets = NULL;
2137 tq->tq_tcreates = 0;
2138 tq->tq_tdeaths = 0;
2140 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
2147 zone_rele(tq->tq_proc->p_zone);
2149 tq->tq_threads_ncpus_pct = 0;
2150 tq->tq_totaltime = 0;
2151 tq->tq_tasks = 0;
2152 tq->tq_maxtasks = 0;
2153 tq->tq_executed = 0;
2154 kmem_cache_free(taskq_cache, tq);
2171 taskq_t *tq = b->tqbucket_taskq;
2179 mutex_enter(&tq->tq_lock);
2184 if (tq->tq_tcreates++ - tq->tq_tdeaths > tq->tq_maxsize) {
2185 tq->tq_tcreates--;
2186 mutex_exit(&tq->tq_lock);
2189 mutex_exit(&tq->tq_lock);
2194 mutex_enter(&tq->tq_lock);
2196 tq->tq_tcreates--;
2197 mutex_exit(&tq->tq_lock);
2210 0, tq->tq_proc, TS_STOPPED, tq->tq_pri);
2233 tqe->tqent_thread->t_taskq = tq;
2243 taskq_t *tq = ksp->ks_private;
2248 tqsp->tq_pid.value.ui64 = tq->tq_proc->p_pid;
2249 tqsp->tq_tasks.value.ui64 = tq->tq_tasks;
2250 tqsp->tq_executed.value.ui64 = tq->tq_executed;
2251 tqsp->tq_maxtasks.value.ui64 = tq->tq_maxtasks;
2252 tqsp->tq_totaltime.value.ui64 = tq->tq_totaltime;
2253 tqsp->tq_nactive.value.ui64 = tq->tq_active;
2254 tqsp->tq_nalloc.value.ui64 = tq->tq_nalloc;
2255 tqsp->tq_pri.value.ui64 = tq->tq_pri;
2256 tqsp->tq_nthreads.value.ui64 = tq->tq_nthreads;
2264 taskq_t *tq = ksp->ks_private;
2265 taskq_bucket_t *b = tq->tq_buckets;
2271 ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
2273 tqsp->tqd_btasks.value.ui64 = tq->tq_tasks;
2274 tqsp->tqd_bexecuted.value.ui64 = tq->tq_executed;
2275 tqsp->tqd_bmaxtasks.value.ui64 = tq->tq_maxtasks;
2276 tqsp->tqd_bnalloc.value.ui64 = tq->tq_nalloc;
2277 tqsp->tqd_bnactive.value.ui64 = tq->tq_active;
2278 tqsp->tqd_btotaltime.value.ui64 = tq->tq_totaltime;
2279 tqsp->tqd_pri.value.ui64 = tq->tq_pri;
2293 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {