Lines Matching refs:tq

59 task_alloc(taskq_t *tq, int tqflags)
64 again: if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
65 tq->tq_freelist = t->task_next;
67 if (tq->tq_nalloc >= tq->tq_maxalloc) {
81 tq->tq_maxalloc_wait++;
82 rv = cv_timedwait(&tq->tq_maxalloc_cv,
83 &tq->tq_lock, ddi_get_lbolt() + hz);
84 tq->tq_maxalloc_wait--;
88 mutex_exit(&tq->tq_lock);
92 mutex_enter(&tq->tq_lock);
94 tq->tq_nalloc++;
100 task_free(taskq_t *tq, task_t *t)
102 if (tq->tq_nalloc <= tq->tq_minalloc) {
103 t->task_next = tq->tq_freelist;
104 tq->tq_freelist = t;
106 tq->tq_nalloc--;
107 mutex_exit(&tq->tq_lock);
109 mutex_enter(&tq->tq_lock);
112 if (tq->tq_maxalloc_wait)
113 cv_signal(&tq->tq_maxalloc_cv);
117 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags)
126 mutex_enter(&tq->tq_lock);
127 ASSERT(tq->tq_flags & TASKQ_ACTIVE);
128 if ((t = task_alloc(tq, tqflags)) == NULL) {
129 mutex_exit(&tq->tq_lock);
133 t->task_next = tq->tq_task.task_next;
134 t->task_prev = &tq->tq_task;
136 t->task_next = &tq->tq_task;
137 t->task_prev = tq->tq_task.task_prev;
143 cv_signal(&tq->tq_dispatch_cv);
144 mutex_exit(&tq->tq_lock);
149 taskq_wait(taskq_t *tq)
151 mutex_enter(&tq->tq_lock);
152 while (tq->tq_task.task_next != &tq->tq_task || tq->tq_active != 0)
153 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
154 mutex_exit(&tq->tq_lock);
160 taskq_t *tq = arg;
163 mutex_enter(&tq->tq_lock);
164 while (tq->tq_flags & TASKQ_ACTIVE) {
165 if ((t = tq->tq_task.task_next) == &tq->tq_task) {
166 if (--tq->tq_active == 0)
167 cv_broadcast(&tq->tq_wait_cv);
168 cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
169 tq->tq_active++;
174 mutex_exit(&tq->tq_lock);
176 rw_enter(&tq->tq_threadlock, RW_READER);
178 rw_exit(&tq->tq_threadlock);
180 mutex_enter(&tq->tq_lock);
181 task_free(tq, t);
183 tq->tq_nthreads--;
184 cv_broadcast(&tq->tq_wait_cv);
185 mutex_exit(&tq->tq_lock);
194 taskq_t *tq = kmem_zalloc(sizeof (taskq_t), KM_SLEEP);
210 rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
211 mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
212 cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
213 cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
214 cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
215 tq->tq_flags = flags | TASKQ_ACTIVE;
216 tq->tq_active = nthreads;
217 tq->tq_nthreads = nthreads;
218 tq->tq_minalloc = minalloc;
219 tq->tq_maxalloc = maxalloc;
220 tq->tq_task.task_next = &tq->tq_task;
221 tq->tq_task.task_prev = &tq->tq_task;
222 tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP);
225 mutex_enter(&tq->tq_lock);
227 task_free(tq, task_alloc(tq, KM_SLEEP));
228 mutex_exit(&tq->tq_lock);
233 tq, THR_BOUND, &tq->tq_threadlist[t]);
235 return (tq);
239 taskq_destroy(taskq_t *tq)
242 int nthreads = tq->tq_nthreads;
244 taskq_wait(tq);
246 mutex_enter(&tq->tq_lock);
248 tq->tq_flags &= ~TASKQ_ACTIVE;
249 cv_broadcast(&tq->tq_dispatch_cv);
251 while (tq->tq_nthreads != 0)
252 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
254 tq->tq_minalloc = 0;
255 while (tq->tq_nalloc != 0) {
256 ASSERT(tq->tq_freelist != NULL);
257 task_free(tq, task_alloc(tq, KM_SLEEP));
260 mutex_exit(&tq->tq_lock);
263 (void) thr_join(tq->tq_threadlist[t], NULL, NULL);
265 kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));
267 rw_destroy(&tq->tq_threadlock);
268 mutex_destroy(&tq->tq_lock);
269 cv_destroy(&tq->tq_dispatch_cv);
270 cv_destroy(&tq->tq_wait_cv);
271 cv_destroy(&tq->tq_maxalloc_cv);
273 kmem_free(tq, sizeof (taskq_t));
277 taskq_member(taskq_t *tq, void *t)
284 for (i = 0; i < tq->tq_nthreads; i++)
285 if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t)