Lines Matching defs:queue_info
55 apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
77 *queue_info = qi;
82 apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
88 * it into the queue_info's list of recycled pools
96 /* Save queue_info->recycled_pool in local variable next because
100 struct recycled_pool *next = queue_info->recycled_pools;
102 if (apr_atomic_casptr((void*)&(queue_info->recycled_pools),
111 if (apr_atomic_inc32(&queue_info->idlers) == 0) {
112 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
116 rv = apr_thread_cond_signal(queue_info->wait_for_idler);
118 apr_thread_mutex_unlock(queue_info->idlers_mutex);
121 rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
130 apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
138 if (queue_info->idlers == 0) {
139 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
159 * of queue_info->idlers above. It's possible
167 while (queue_info->idlers == 0) {
168 rv = apr_thread_cond_wait(queue_info->wait_for_idler,
169 queue_info->idlers_mutex);
172 rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
179 rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
186 apr_atomic_dec32(&(queue_info->idlers));
198 struct recycled_pool *first_pool = queue_info->recycled_pools;
202 if (apr_atomic_casptr((void*)&(queue_info->recycled_pools), first_pool->next,
209 if (queue_info->terminated) {
217 apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info)
220 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
224 queue_info->terminated = 1;
225 apr_thread_cond_broadcast(queue_info->wait_for_idler);
226 return apr_thread_mutex_unlock(queue_info->idlers_mutex);