Lines Matching refs:queue

24 #include "xge-queue.h"
33 * user-defined portion of the queue item.
41 * __queue_consume - (Lockless) dequeue an item from the specified queue.
43 * @queue: Event queue.
47 __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
52 if (xge_list_is_empty(&queue->list_head))
55 elem = (xge_queue_item_t *)queue->list_head.next;
61 if (queue->head_ptr == elem) {
62 queue->head_ptr = (char *)queue->head_ptr + real_size;
68 (u64)(ulong_t)queue->start_ptr,
69 (u64)(ulong_t)queue->head_ptr,
70 (u64)(ulong_t)queue->tail_ptr,
71 (u64)(ulong_t)queue->end_ptr,
74 } else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
75 queue->tail_ptr = (char *)queue->tail_ptr - real_size;
81 (u64)(ulong_t)queue->start_ptr,
82 (u64)(ulong_t)queue->head_ptr,
83 (u64)(ulong_t)queue->tail_ptr,
84 (u64)(ulong_t)queue->end_ptr,
93 (u64)(ulong_t)queue->start_ptr,
94 (u64)(ulong_t)queue->head_ptr,
95 (u64)(ulong_t)queue->tail_ptr,
96 (u64)(ulong_t)queue->end_ptr,
100 xge_assert(queue->tail_ptr >= queue->head_ptr);
101 xge_assert(queue->tail_ptr >= queue->start_ptr &&
102 queue->tail_ptr <= queue->end_ptr);
103 xge_assert(queue->head_ptr >= queue->start_ptr &&
104 queue->head_ptr < queue->end_ptr);
109 if (xge_list_is_empty(&queue->list_head)) {
111 queue->head_ptr = queue->tail_ptr = queue->start_ptr;
118 * into the specified queue.
127 * the new queue item (see xge_queue_item_t{}). Upon return
142 xge_queue_t *queue = (xge_queue_t *)queueh;
149 xge_os_spin_lock_irq(&queue->lock, flags);
151 if (is_critical && !queue->has_critical_event) {
158 while (__queue_consume(queue,
165 if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
166 elem = (xge_queue_item_t *) queue->tail_ptr;
167 queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
173 (u64)(ulong_t)queue->start_ptr,
174 (u64)(ulong_t)queue->head_ptr,
175 (u64)(ulong_t)queue->tail_ptr,
176 (u64)(ulong_t)queue->end_ptr,
179 } else if ((char *)queue->head_ptr - real_size >=
180 (char *)queue->start_ptr) {
181 elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
182 queue->head_ptr = elem;
188 (u64)(ulong_t)queue->start_ptr,
189 (u64)(ulong_t)queue->head_ptr,
190 (u64)(ulong_t)queue->tail_ptr,
191 (u64)(ulong_t)queue->end_ptr,
196 if (queue->pages_current >= queue->pages_max) {
197 xge_os_spin_unlock_irq(&queue->lock, flags);
201 if (queue->has_critical_event) {
202 xge_os_spin_unlock_irq(&queue->lock, flags);
209 xge_os_spin_unlock_irq(&queue->lock, flags);
215 xge_assert(queue->tail_ptr >= queue->head_ptr);
216 xge_assert(queue->tail_ptr >= queue->start_ptr &&
217 queue->tail_ptr <= queue->end_ptr);
218 xge_assert(queue->head_ptr >= queue->start_ptr &&
219 queue->head_ptr < queue->end_ptr);
224 queue->has_critical_event = 1;
227 xge_list_insert_before(&elem->item, &queue->list_head);
228 xge_os_spin_unlock_irq(&queue->lock, flags);
231 queue->queued_func(queue->queued_data, event_type);
238 * xge_queue_create - Create protected first-in-first-out queue.
242 * time of queue creation.
243 * @pages_max: Max number of pages that can be allocated in the queue.
245 * added to the queue.
248 * Create protected (fifo) queue.
259 xge_queue_t *queue;
261 if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
264 queue->queued_func = queued;
265 queue->queued_data = queued_data;
266 queue->pdev = pdev;
267 queue->irqh = irqh;
268 queue->pages_current = pages_initial;
269 queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
271 if (queue->start_ptr == NULL) {
272 xge_os_free(pdev, queue, sizeof(xge_queue_t));
275 queue->head_ptr = queue->tail_ptr = queue->start_ptr;
276 queue->end_ptr = (char *)queue->start_ptr +
277 queue->pages_current * XGE_QUEUE_BUF_SIZE;
278 xge_os_spin_lock_init_irq(&queue->lock, irqh);
279 queue->pages_initial = pages_initial;
280 queue->pages_max = pages_max;
281 xge_list_init(&queue->list_head);
283 return queue;
296 xge_queue_t *queue = (xge_queue_t *)queueh;
297 xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
298 if (!xge_list_is_empty(&queue->list_head)) {
299 xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
300 XGE_OS_LLXFMT, (u64)(ulong_t)queue);
302 xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
305 xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
309 * __io_queue_grow - Dynamically increases the size of the queue.
312 * This function is called in the case of no slot avaialble in the queue
314 * Note that queue cannot grow beyond the max size specified for the
315 * queue.
323 xge_queue_t *queue = (xge_queue_t *)queueh;
328 xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
329 (u64)(ulong_t)queue, queue->pages_current);
331 newbuf = xge_os_malloc(queue->pdev,
332 (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
336 xge_os_memcpy(newbuf, queue->start_ptr,
337 queue->pages_current * XGE_QUEUE_BUF_SIZE);
338 oldbuf = queue->start_ptr;
340 /* adjust queue sizes */
341 queue->start_ptr = newbuf;
342 queue->end_ptr = (char *)newbuf +
343 (queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
344 queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
346 queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
348 xge_assert(!xge_list_is_empty(&queue->list_head));
349 queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
350 ((char *)queue->list_head.next - (char *)oldbuf));
351 queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
352 ((char *)queue->list_head.prev - (char *)oldbuf));
353 /* adjust queue list */
354 xge_list_for_each(item, &queue->list_head) {
356 if (elem->item.next != &queue->list_head) {
361 if (elem->item.prev != &queue->list_head) {
367 xge_os_free(queue->pdev, oldbuf,
368 queue->pages_current * XGE_QUEUE_BUF_SIZE);
369 queue->pages_current++;
375 * xge_queue_consume - Dequeue an item from the specified queue.
381 * Dequeue an item from the queue. The caller is required to provide
387 * is too small to accomodate an item from the queue.
394 xge_queue_t *queue = (xge_queue_t *)queueh;
398 xge_os_spin_lock_irq(&queue->lock, flags);
399 status = __queue_consume(queue, data_max_size, item);
400 xge_os_spin_unlock_irq(&queue->lock, flags);
407 * xge_queue_flush - Flush, or empty, the queue.
410 * Flush the queue, i.e. make it empty by consuming all events
421 /* flush queue by consuming all enqueued items */
433 * __queue_get_reset_critical - Check for critical events in the queue,
436 * Check for critical event(s) in the queue, and reset the
438 * Returns: 1 - if the queue contains atleast one critical event.
439 * 0 - If there are no critical events in the queue.
442 xge_queue_t* queue = (xge_queue_t*)qh;
443 int c = queue->has_critical_event;
445 queue->has_critical_event = 0;