2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)queue.h 8.5 (Berkeley) 8/20/94
36 * This file defines five types of data structures: singly-linked lists,
37 * lists, simple queues, tail queues, and circular queues.
39 * A singly-linked list is headed by a single forward pointer. The
40 * elements are singly linked for minimum space and pointer manipulation
41 * overhead at the expense of O(n) removal for arbitrary elements. New
42 * elements can be added to the list after an existing element or at the
43 * head of the list. Elements being removed from the head of the list
44 * should use the explicit macro for this purpose for optimum
45 * efficiency. A singly-linked list may only be traversed in the forward
46 * direction. Singly-linked lists are ideal for applications with large
47 * datasets and few or no removals or for implementing a LIFO queue.
49 * A list is headed by a single forward pointer (or an array of forward
50 * pointers for a hash table header). The elements are doubly linked
51 * so that an arbitrary element can be removed without a need to
52 * traverse the list. New elements can be added to the list before
53 * or after an existing element or at the head of the list. A list
54 * may only be traversed in the forward direction.
56 * A simple queue is headed by a pair of pointers, one the head of the
57 * list and the other to the tail of the list. The elements are singly
58 * linked to save space, so elements can only be removed from the
59 * head of the list. New elements can be added to the list after
60 * an existing element, at the head of the list, or at the end of the
61 * list. A simple queue may only be traversed in the forward direction.
63 * A tail queue is headed by a pair of pointers, one to the head of the
64 * list and the other to the tail of the list. The elements are doubly
65 * linked so that an arbitrary element can be removed without a need to
66 * traverse the list. New elements can be added to the list before or
67 * after an existing element, at the head of the list, or at the end of
68 * the list. A tail queue may be traversed in either direction.
70 * A circle queue is headed by a pair of pointers, one to the head of the
71 * list and the other to the tail of the list. The elements are doubly
72 * linked so that an arbitrary element can be removed without a need to
73 * traverse the list. New elements can be added to the list before or after
74 * an existing element, at the head of the list, or at the end of the list.
75 * A circle queue may be traversed in either direction, but has a more
76 * complex end of list detection.
78 * For details on the use of these macros, see the queue(3) manual page.
82 * Include the definition of NULL only on NetBSD because sys/null.h
83 * is not available elsewhere. This conditional makes the header
84 * portable and it can simply be dropped verbatim into any system.
85 * The caveat is that on other systems some other header
86 * must provide NULL before the macros can be used.
92 #if defined(QUEUEDEBUG)
94 # define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
97 # define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
102 * Singly-linked List definitions.
104 #define SLIST_HEAD(name, type) \
106 struct type *slh_first; /* first element */ \
109 #define SLIST_HEAD_INITIALIZER(head) \
112 #define SLIST_ENTRY(type) \
114 struct type *sle_next; /* next element */ \
118 * Singly-linked List access methods.
120 #define SLIST_FIRST(head) ((head)->slh_first)
121 #define SLIST_END(head) NULL
122 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
123 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
125 #define SLIST_FOREACH(var, head, field) \
126 for((var) = (head)->slh_first; \
127 (var) != SLIST_END(head); \
128 (var) = (var)->field.sle_next)
130 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
131 for ((var) = SLIST_FIRST((head)); \
132 (var) != SLIST_END(head) && \
133 ((tvar) = SLIST_NEXT((var), field), 1); \
137 * Singly-linked List functions.
139 #define SLIST_INIT(head) do { \
140 (head)->slh_first = SLIST_END(head); \
141 } while (/*CONSTCOND*/0)
143 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
144 (elm)->field.sle_next = (slistelm)->field.sle_next; \
145 (slistelm)->field.sle_next = (elm); \
146 } while (/*CONSTCOND*/0)
148 #define SLIST_INSERT_HEAD(head, elm, field) do { \
149 (elm)->field.sle_next = (head)->slh_first; \
150 (head)->slh_first = (elm); \
151 } while (/*CONSTCOND*/0)
153 #define SLIST_REMOVE_AFTER(slistelm, field) do { \
154 (slistelm)->field.sle_next = \
155 SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
156 } while (/*CONSTCOND*/0)
158 #define SLIST_REMOVE_HEAD(head, field) do { \
159 (head)->slh_first = (head)->slh_first->field.sle_next; \
160 } while (/*CONSTCOND*/0)
162 #define SLIST_REMOVE(head, elm, type, field) do { \
163 if ((head)->slh_first == (elm)) { \
164 SLIST_REMOVE_HEAD((head), field); \
167 struct type *curelm = (head)->slh_first; \
168 while(curelm->field.sle_next != (elm)) \
169 curelm = curelm->field.sle_next; \
170 curelm->field.sle_next = \
171 curelm->field.sle_next->field.sle_next; \
173 } while (/*CONSTCOND*/0)
179 #define LIST_HEAD(name, type) \
181 struct type *lh_first; /* first element */ \
184 #define LIST_HEAD_INITIALIZER(head) \
187 #define LIST_ENTRY(type) \
189 struct type *le_next; /* next element */ \
190 struct type **le_prev; /* address of previous next element */ \
194 * List access methods.
196 #define LIST_FIRST(head) ((head)->lh_first)
197 #define LIST_END(head) NULL
198 #define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
199 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
201 #define LIST_FOREACH(var, head, field) \
202 for ((var) = ((head)->lh_first); \
203 (var) != LIST_END(head); \
204 (var) = ((var)->field.le_next))
206 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
207 for ((var) = LIST_FIRST((head)); \
208 (var) != LIST_END(head) && \
209 ((tvar) = LIST_NEXT((var), field), 1); \
212 #define LIST_MOVE(head1, head2) do { \
213 LIST_INIT((head2)); \
214 if (!LIST_EMPTY((head1))) { \
215 (head2)->lh_first = (head1)->lh_first; \
216 LIST_INIT((head1)); \
218 } while (/*CONSTCOND*/0)
223 #if defined(QUEUEDEBUG)
224 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
225 if ((head)->lh_first && \
226 (head)->lh_first->field.le_prev != &(head)->lh_first) \
227 QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \
229 #define QUEUEDEBUG_LIST_OP(elm, field) \
230 if ((elm)->field.le_next && \
231 (elm)->field.le_next->field.le_prev != \
232 &(elm)->field.le_next) \
233 QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \
234 __FILE__, __LINE__); \
235 if (*(elm)->field.le_prev != (elm)) \
236 QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \
238 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
239 (elm)->field.le_next = (void *)1L; \
240 (elm)->field.le_prev = (void *)1L;
242 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
243 #define QUEUEDEBUG_LIST_OP(elm, field)
244 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
247 #define LIST_INIT(head) do { \
248 (head)->lh_first = LIST_END(head); \
249 } while (/*CONSTCOND*/0)
251 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
252 QUEUEDEBUG_LIST_OP((listelm), field) \
253 if (((elm)->field.le_next = (listelm)->field.le_next) != \
255 (listelm)->field.le_next->field.le_prev = \
256 &(elm)->field.le_next; \
257 (listelm)->field.le_next = (elm); \
258 (elm)->field.le_prev = &(listelm)->field.le_next; \
259 } while (/*CONSTCOND*/0)
261 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
262 QUEUEDEBUG_LIST_OP((listelm), field) \
263 (elm)->field.le_prev = (listelm)->field.le_prev; \
264 (elm)->field.le_next = (listelm); \
265 *(listelm)->field.le_prev = (elm); \
266 (listelm)->field.le_prev = &(elm)->field.le_next; \
267 } while (/*CONSTCOND*/0)
269 #define LIST_INSERT_HEAD(head, elm, field) do { \
270 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
271 if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
272 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
273 (head)->lh_first = (elm); \
274 (elm)->field.le_prev = &(head)->lh_first; \
275 } while (/*CONSTCOND*/0)
277 #define LIST_REMOVE(elm, field) do { \
278 QUEUEDEBUG_LIST_OP((elm), field) \
279 if ((elm)->field.le_next != NULL) \
280 (elm)->field.le_next->field.le_prev = \
281 (elm)->field.le_prev; \
282 *(elm)->field.le_prev = (elm)->field.le_next; \
283 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
284 } while (/*CONSTCOND*/0)
286 #define LIST_REPLACE(elm, elm2, field) do { \
287 if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
288 (elm2)->field.le_next->field.le_prev = \
289 &(elm2)->field.le_next; \
290 (elm2)->field.le_prev = (elm)->field.le_prev; \
291 *(elm2)->field.le_prev = (elm2); \
292 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
293 } while (/*CONSTCOND*/0)
296 * Simple queue definitions.
298 #define SIMPLEQ_HEAD(name, type) \
300 struct type *sqh_first; /* first element */ \
301 struct type **sqh_last; /* addr of last next element */ \
304 #define SIMPLEQ_HEAD_INITIALIZER(head) \
305 { NULL, &(head).sqh_first }
307 #define SIMPLEQ_ENTRY(type) \
309 struct type *sqe_next; /* next element */ \
313 * Simple queue access methods.
315 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
316 #define SIMPLEQ_END(head) NULL
317 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head))
318 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
320 #define SIMPLEQ_FOREACH(var, head, field) \
321 for ((var) = ((head)->sqh_first); \
322 (var) != SIMPLEQ_END(head); \
323 (var) = ((var)->field.sqe_next))
325 #define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
326 for ((var) = ((head)->sqh_first); \
327 (var) != SIMPLEQ_END(head) && \
328 ((next = ((var)->field.sqe_next)), 1); \
332 * Simple queue functions.
334 #define SIMPLEQ_INIT(head) do { \
335 (head)->sqh_first = NULL; \
336 (head)->sqh_last = &(head)->sqh_first; \
337 } while (/*CONSTCOND*/0)
339 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
340 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
341 (head)->sqh_last = &(elm)->field.sqe_next; \
342 (head)->sqh_first = (elm); \
343 } while (/*CONSTCOND*/0)
345 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
346 (elm)->field.sqe_next = NULL; \
347 *(head)->sqh_last = (elm); \
348 (head)->sqh_last = &(elm)->field.sqe_next; \
349 } while (/*CONSTCOND*/0)
351 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
352 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
353 (head)->sqh_last = &(elm)->field.sqe_next; \
354 (listelm)->field.sqe_next = (elm); \
355 } while (/*CONSTCOND*/0)
357 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
358 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
359 (head)->sqh_last = &(head)->sqh_first; \
360 } while (/*CONSTCOND*/0)
362 #define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
363 if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
365 (head)->sqh_last = &(elm)->field.sqe_next; \
366 } while (/*CONSTCOND*/0)
368 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
369 if ((head)->sqh_first == (elm)) { \
370 SIMPLEQ_REMOVE_HEAD((head), field); \
372 struct type *curelm = (head)->sqh_first; \
373 while (curelm->field.sqe_next != (elm)) \
374 curelm = curelm->field.sqe_next; \
375 if ((curelm->field.sqe_next = \
376 curelm->field.sqe_next->field.sqe_next) == NULL) \
377 (head)->sqh_last = &(curelm)->field.sqe_next; \
379 } while (/*CONSTCOND*/0)
381 #define SIMPLEQ_CONCAT(head1, head2) do { \
382 if (!SIMPLEQ_EMPTY((head2))) { \
383 *(head1)->sqh_last = (head2)->sqh_first; \
384 (head1)->sqh_last = (head2)->sqh_last; \
385 SIMPLEQ_INIT((head2)); \
387 } while (/*CONSTCOND*/0)
389 #define SIMPLEQ_LAST(head, type, field) \
390 (SIMPLEQ_EMPTY((head)) ? \
392 ((struct type *)(void *) \
393 ((char *)((head)->sqh_last) - offsetof(struct type, field))))
396 * Tail queue definitions.
398 #define _TAILQ_HEAD(name, type, qual) \
400 qual type *tqh_first; /* first element */ \
401 qual type *qual *tqh_last; /* addr of last next element */ \
403 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
405 #define TAILQ_HEAD_INITIALIZER(head) \
406 { TAILQ_END(head), &(head).tqh_first }
408 #define _TAILQ_ENTRY(type, qual) \
410 qual type *tqe_next; /* next element */ \
411 qual type *qual *tqe_prev; /* address of previous next element */\
413 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
416 * Tail queue access methods.
418 #define TAILQ_FIRST(head) ((head)->tqh_first)
419 #define TAILQ_END(head) (NULL)
420 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
421 #define TAILQ_LAST(head, headname) \
422 (*(((struct headname *)((head)->tqh_last))->tqh_last))
423 #define TAILQ_PREV(elm, headname, field) \
424 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
425 #define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head))
428 #define TAILQ_FOREACH(var, head, field) \
429 for ((var) = ((head)->tqh_first); \
430 (var) != TAILQ_END(head); \
431 (var) = ((var)->field.tqe_next))
433 #define TAILQ_FOREACH_SAFE(var, head, field, next) \
434 for ((var) = ((head)->tqh_first); \
435 (var) != TAILQ_END(head) && \
436 ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
438 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
439 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
440 (var) != TAILQ_END(head); \
441 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
443 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
444 for ((var) = TAILQ_LAST((head), headname); \
445 (var) != TAILQ_END(head) && \
446 ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
449 * Tail queue functions.
451 #if defined(QUEUEDEBUG)
452 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
453 if ((head)->tqh_first && \
454 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
455 QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \
457 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
458 if (*(head)->tqh_last != NULL) \
459 QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \
461 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
462 if ((elm)->field.tqe_next && \
463 (elm)->field.tqe_next->field.tqe_prev != \
464 &(elm)->field.tqe_next) \
465 QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \
466 __FILE__, __LINE__); \
467 if (*(elm)->field.tqe_prev != (elm)) \
468 QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \
470 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
471 if ((elm)->field.tqe_next == NULL && \
472 (head)->tqh_last != &(elm)->field.tqe_next) \
473 QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
474 (head), (elm), __FILE__, __LINE__);
475 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
476 (elm)->field.tqe_next = (void *)1L; \
477 (elm)->field.tqe_prev = (void *)1L;
479 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
480 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
481 #define QUEUEDEBUG_TAILQ_OP(elm, field)
482 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
483 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
486 #define TAILQ_INIT(head) do { \
487 (head)->tqh_first = TAILQ_END(head); \
488 (head)->tqh_last = &(head)->tqh_first; \
489 } while (/*CONSTCOND*/0)
491 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
492 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
493 if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
494 (head)->tqh_first->field.tqe_prev = \
495 &(elm)->field.tqe_next; \
497 (head)->tqh_last = &(elm)->field.tqe_next; \
498 (head)->tqh_first = (elm); \
499 (elm)->field.tqe_prev = &(head)->tqh_first; \
500 } while (/*CONSTCOND*/0)
502 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
503 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
504 (elm)->field.tqe_next = TAILQ_END(head); \
505 (elm)->field.tqe_prev = (head)->tqh_last; \
506 *(head)->tqh_last = (elm); \
507 (head)->tqh_last = &(elm)->field.tqe_next; \
508 } while (/*CONSTCOND*/0)
510 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
511 QUEUEDEBUG_TAILQ_OP((listelm), field) \
512 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \
514 (elm)->field.tqe_next->field.tqe_prev = \
515 &(elm)->field.tqe_next; \
517 (head)->tqh_last = &(elm)->field.tqe_next; \
518 (listelm)->field.tqe_next = (elm); \
519 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
520 } while (/*CONSTCOND*/0)
522 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
523 QUEUEDEBUG_TAILQ_OP((listelm), field) \
524 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
525 (elm)->field.tqe_next = (listelm); \
526 *(listelm)->field.tqe_prev = (elm); \
527 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
528 } while (/*CONSTCOND*/0)
530 #define TAILQ_REMOVE(head, elm, field) do { \
531 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
532 QUEUEDEBUG_TAILQ_OP((elm), field) \
533 if (((elm)->field.tqe_next) != TAILQ_END(head)) \
534 (elm)->field.tqe_next->field.tqe_prev = \
535 (elm)->field.tqe_prev; \
537 (head)->tqh_last = (elm)->field.tqe_prev; \
538 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
539 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
540 } while (/*CONSTCOND*/0)
542 #define TAILQ_REPLACE(head, elm, elm2, field) do { \
543 if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \
545 (elm2)->field.tqe_next->field.tqe_prev = \
546 &(elm2)->field.tqe_next; \
548 (head)->tqh_last = &(elm2)->field.tqe_next; \
549 (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
550 *(elm2)->field.tqe_prev = (elm2); \
551 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
552 } while (/*CONSTCOND*/0)
554 #define TAILQ_CONCAT(head1, head2, field) do { \
555 if (!TAILQ_EMPTY(head2)) { \
556 *(head1)->tqh_last = (head2)->tqh_first; \
557 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
558 (head1)->tqh_last = (head2)->tqh_last; \
559 TAILQ_INIT((head2)); \
561 } while (/*CONSTCOND*/0)
564 * Singly-linked Tail queue declarations.
566 #define STAILQ_HEAD(name, type) \
568 struct type *stqh_first; /* first element */ \
569 struct type **stqh_last; /* addr of last next element */ \
572 #define STAILQ_HEAD_INITIALIZER(head) \
573 { NULL, &(head).stqh_first }
575 #define STAILQ_ENTRY(type) \
577 struct type *stqe_next; /* next element */ \
581 * Singly-linked Tail queue access methods.
583 #define STAILQ_FIRST(head) ((head)->stqh_first)
584 #define STAILQ_END(head) NULL
585 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
586 #define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head))
589 * Singly-linked Tail queue functions.
591 #define STAILQ_INIT(head) do { \
592 (head)->stqh_first = NULL; \
593 (head)->stqh_last = &(head)->stqh_first; \
594 } while (/*CONSTCOND*/0)
596 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
597 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
598 (head)->stqh_last = &(elm)->field.stqe_next; \
599 (head)->stqh_first = (elm); \
600 } while (/*CONSTCOND*/0)
602 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
603 (elm)->field.stqe_next = NULL; \
604 *(head)->stqh_last = (elm); \
605 (head)->stqh_last = &(elm)->field.stqe_next; \
606 } while (/*CONSTCOND*/0)
608 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
609 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
610 (head)->stqh_last = &(elm)->field.stqe_next; \
611 (listelm)->field.stqe_next = (elm); \
612 } while (/*CONSTCOND*/0)
614 #define STAILQ_REMOVE_HEAD(head, field) do { \
615 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
616 (head)->stqh_last = &(head)->stqh_first; \
617 } while (/*CONSTCOND*/0)
619 #define STAILQ_REMOVE(head, elm, type, field) do { \
620 if ((head)->stqh_first == (elm)) { \
621 STAILQ_REMOVE_HEAD((head), field); \
623 struct type *curelm = (head)->stqh_first; \
624 while (curelm->field.stqe_next != (elm)) \
625 curelm = curelm->field.stqe_next; \
626 if ((curelm->field.stqe_next = \
627 curelm->field.stqe_next->field.stqe_next) == NULL) \
628 (head)->stqh_last = &(curelm)->field.stqe_next; \
630 } while (/*CONSTCOND*/0)
632 #define STAILQ_FOREACH(var, head, field) \
633 for ((var) = ((head)->stqh_first); \
635 (var) = ((var)->field.stqe_next))
637 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
638 for ((var) = STAILQ_FIRST((head)); \
639 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
642 #define STAILQ_CONCAT(head1, head2) do { \
643 if (!STAILQ_EMPTY((head2))) { \
644 *(head1)->stqh_last = (head2)->stqh_first; \
645 (head1)->stqh_last = (head2)->stqh_last; \
646 STAILQ_INIT((head2)); \
648 } while (/*CONSTCOND*/0)
650 #define STAILQ_LAST(head, type, field) \
651 (STAILQ_EMPTY((head)) ? \
653 ((struct type *)(void *) \
654 ((char *)((head)->stqh_last) - offsetof(struct type, field))))
659 * Circular queue definitions. Do not use. We still keep the macros
660 * for compatibility but because of pointer aliasing issues their use
665 * __launder_type(): We use this ugly hack to work around the the compiler
666 * noticing that two types may not alias each other and elide tests in code.
667 * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
668 * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC
669 * 4.8) declare these comparisons as always false, causing the code to
670 * not run as designed.
672 * This hack is only to be used for comparisons and thus can be fully const.
673 * Do not use for assignment.
675 * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
676 * this by changing the head/tail sentinal values, but see the note above
679 static __inline const void * __launder_type(const void *);
680 static __inline const void *
681 __launder_type(const void *__x)
683 __asm __volatile("" : "+r" (__x));
687 #if defined(QUEUEDEBUG)
688 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
689 if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \
690 (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \
691 QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \
692 __FILE__, __LINE__); \
693 if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \
694 (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \
695 QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \
697 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
698 if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \
699 if ((head)->cqh_last != (elm)) \
700 QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \
701 (elm), __FILE__, __LINE__); \
703 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
704 QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \
705 (elm), __FILE__, __LINE__); \
707 if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \
708 if ((head)->cqh_first != (elm)) \
709 QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \
710 (elm), __FILE__, __LINE__); \
712 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
713 QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \
714 (elm), __FILE__, __LINE__); \
716 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
717 (elm)->field.cqe_next = (void *)1L; \
718 (elm)->field.cqe_prev = (void *)1L;
720 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
721 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
722 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
725 #define CIRCLEQ_HEAD(name, type) \
727 struct type *cqh_first; /* first element */ \
728 struct type *cqh_last; /* last element */ \
731 #define CIRCLEQ_HEAD_INITIALIZER(head) \
732 { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
734 #define CIRCLEQ_ENTRY(type) \
736 struct type *cqe_next; /* next element */ \
737 struct type *cqe_prev; /* previous element */ \
741 * Circular queue functions.
743 #define CIRCLEQ_INIT(head) do { \
744 (head)->cqh_first = CIRCLEQ_END(head); \
745 (head)->cqh_last = CIRCLEQ_END(head); \
746 } while (/*CONSTCOND*/0)
748 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
749 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
750 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
751 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
752 (elm)->field.cqe_prev = (listelm); \
753 if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
754 (head)->cqh_last = (elm); \
756 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
757 (listelm)->field.cqe_next = (elm); \
758 } while (/*CONSTCOND*/0)
760 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
761 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
762 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
763 (elm)->field.cqe_next = (listelm); \
764 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
765 if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
766 (head)->cqh_first = (elm); \
768 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
769 (listelm)->field.cqe_prev = (elm); \
770 } while (/*CONSTCOND*/0)
772 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
773 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
774 (elm)->field.cqe_next = (head)->cqh_first; \
775 (elm)->field.cqe_prev = CIRCLEQ_END(head); \
776 if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \
777 (head)->cqh_last = (elm); \
779 (head)->cqh_first->field.cqe_prev = (elm); \
780 (head)->cqh_first = (elm); \
781 } while (/*CONSTCOND*/0)
783 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
784 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
785 (elm)->field.cqe_next = CIRCLEQ_END(head); \
786 (elm)->field.cqe_prev = (head)->cqh_last; \
787 if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \
788 (head)->cqh_first = (elm); \
790 (head)->cqh_last->field.cqe_next = (elm); \
791 (head)->cqh_last = (elm); \
792 } while (/*CONSTCOND*/0)
794 #define CIRCLEQ_REMOVE(head, elm, field) do { \
795 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
796 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
797 if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
798 (head)->cqh_last = (elm)->field.cqe_prev; \
800 (elm)->field.cqe_next->field.cqe_prev = \
801 (elm)->field.cqe_prev; \
802 if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
803 (head)->cqh_first = (elm)->field.cqe_next; \
805 (elm)->field.cqe_prev->field.cqe_next = \
806 (elm)->field.cqe_next; \
807 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
808 } while (/*CONSTCOND*/0)
810 #define CIRCLEQ_FOREACH(var, head, field) \
811 for ((var) = ((head)->cqh_first); \
812 (var) != CIRCLEQ_ENDC(head); \
813 (var) = ((var)->field.cqe_next))
815 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
816 for ((var) = ((head)->cqh_last); \
817 (var) != CIRCLEQ_ENDC(head); \
818 (var) = ((var)->field.cqe_prev))
821 * Circular queue access methods.
823 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
824 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
825 /* For comparisons */
826 #define CIRCLEQ_ENDC(head) (__launder_type(head))
827 /* For assignments */
828 #define CIRCLEQ_END(head) ((void *)(head))
829 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
830 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
831 #define CIRCLEQ_EMPTY(head) \
832 (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
834 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
835 (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
836 ? ((head)->cqh_first) \
837 : (elm->field.cqe_next))
838 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
839 (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
840 ? ((head)->cqh_last) \
841 : (elm->field.cqe_prev))
842 #endif /* !_KERNEL */
844 #endif /* !_SYS_QUEUE_H_ */