qemu-e2k/include/qemu/queue.h

502 lines
23 KiB
C
Raw Normal View History

/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */
/*
* QEMU version: Copy from netbsd, removed debug code, removed some of
* the implementations. Left in singly-linked lists, lists, simple
* queues, and tail queues.
*/
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef QEMU_SYS_QUEUE_H
#define QEMU_SYS_QUEUE_H
/*
* This file defines four types of data structures: singly-linked lists,
* lists, simple queues, and tail queues.
*
* A singly-linked list is headed by a single forward pointer. The
* elements are singly linked for minimum space and pointer manipulation
* overhead at the expense of O(n) removal for arbitrary elements. New
* elements can be added to the list after an existing element or at the
* head of the list. Elements being removed from the head of the list
* should use the explicit macro for this purpose for optimum
* efficiency. A singly-linked list may only be traversed in the forward
* direction. Singly-linked lists are ideal for applications with large
* datasets and few or no removals or for implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
#include "qemu/atomic.h" /* for smp_wmb() */
/*
* List definitions.
*/
#define QLIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define QLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define QLIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List functions.
*/
#define QLIST_INIT(head) do { \
(head)->lh_first = NULL; \
} while (/*CONSTCOND*/0)
#define QLIST_SWAP(dstlist, srclist, field) do { \
void *tmplist; \
tmplist = (srclist)->lh_first; \
(srclist)->lh_first = (dstlist)->lh_first; \
if ((srclist)->lh_first != NULL) { \
(srclist)->lh_first->field.le_prev = &(srclist)->lh_first; \
} \
(dstlist)->lh_first = tmplist; \
if ((dstlist)->lh_first != NULL) { \
(dstlist)->lh_first->field.le_prev = &(dstlist)->lh_first; \
} \
} while (/*CONSTCOND*/0)
#define QLIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define QLIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define QLIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (/*CONSTCOND*/0)
#define QLIST_REMOVE(elm, field) do { \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define QLIST_FOREACH(var, head, field) \
for ((var) = ((head)->lh_first); \
(var); \
(var) = ((var)->field.le_next))
#define QLIST_FOREACH_SAFE(var, head, field, next_var) \
for ((var) = ((head)->lh_first); \
(var) && ((next_var) = ((var)->field.le_next), 1); \
(var) = (next_var))
/*
* List access methods.
*/
#define QLIST_EMPTY(head) ((head)->lh_first == NULL)
#define QLIST_FIRST(head) ((head)->lh_first)
#define QLIST_NEXT(elm, field) ((elm)->field.le_next)
/*
* Singly-linked List definitions.
*/
#define QSLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define QSLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define QSLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define QSLIST_INIT(head) do { \
(head)->slh_first = NULL; \
} while (/*CONSTCOND*/0)
#define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (/*CONSTCOND*/0)
#define QSLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (/*CONSTCOND*/0)
queue: fix QSLIST_INSERT_HEAD_ATOMIC race There is a not-so-subtle race in QSLIST_INSERT_HEAD_ATOMIC. Because atomic_cmpxchg returns the old value instead of a success flag, QSLIST_INSERT_HEAD_ATOMIC was checking for success by comparing against the second argument to atomic_cmpxchg. Unfortunately, this only works if the second argument is a local or thread-local variable. If it is in memory, it can be subject to common subexpression elimination (and then everything's fine) or reloaded after the atomic_cmpxchg, depending on the compiler's whims. If the latter happens, the race can happen. A thread can sneak in, doing something on elm->field.sle_next after the atomic_cmpxchg and before the comparison. This causes a wrong failure, and then two threads are using "elm" at the same time. In the case discovered by Christian, the sequence was likely something like this: thread 1 | thread 2 QSLIST_INSERT_HEAD_ATOMIC | atomic_cmpxchg succeeds | elm added to list | | steal release_pool | QSLIST_REMOVE_HEAD | elm removed from list | ... | QSLIST_INSERT_HEAD_ATOMIC | (overwrites sle_next) spurious failure | atomic_cmpxchg succeeds | elm added to list again | | steal release_pool | QSLIST_REMOVE_HEAD | elm removed again | The last three steps could be done by a third thread as well. A reproducer that failed in a matter of seconds is as follows: - the guest has 32 VCPUs on a 28 core host (hyperthreading was enabled), memory was 16G just to err on the safe side (the host has 64G, but hey at least you need no s390) - the guest has 24 null-aio virtio-blk devices using dataplane (-object iothread,id=ioN -drive if=none,id=blkN,driver=null-aio,size=500G -device virtio-blk-pci,iothread=ioN,drive=blkN) - the guest also has a single network interface. It's only doing loopback tests so slirp vs. tap and the model doesn't matter. - the guest is running fio with the following script: [global] rw=randread blocksize=16k ioengine=libaio runtime=10m buffered=0 fallocate=none time_based iodepth=32 [virtio1a] filename=/dev/block/252\:16 [virtio1b] filename=/dev/block/252\:16 ... [virtio24a] filename=/dev/block/252\:384 [virtio24b] filename=/dev/block/252\:384 [listen1] protocol=tcp ioengine=net port=12345 listen rw=read bs=4k size=1000g [connect1] protocol=tcp hostname=localhost ioengine=net port=12345 protocol=tcp rw=write startdelay=1 size=1000g ... [listen8] protocol=tcp ioengine=net port=12352 listen rw=read bs=4k size=1000g [connect8] protocol=tcp hostname=localhost ioengine=net port=12352 rw=write startdelay=1 size=1000g Moral of the story: I should refrain from writing more clever stuff. At least it looks like it is not too clever to be undebuggable. Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-id: 1426002357-6889-1-git-send-email-pbonzini@redhat.com Fixes: c740ad92d0d958fa785e5d7aa1b67ecaf30a6a54 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2015-03-10 16:45:57 +01:00
#define QSLIST_INSERT_HEAD_ATOMIC(head, elm, field) do { \
typeof(elm) save_sle_next; \
do { \
save_sle_next = (elm)->field.sle_next = (head)->slh_first; \
} while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \
save_sle_next); \
} while (/*CONSTCOND*/0)
#define QSLIST_MOVE_ATOMIC(dest, src) do { \
(dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
(slistelm)->field.sle_next = \
QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \
} while (/*CONSTCOND*/0)
#define QSLIST_FOREACH(var, head, field) \
for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
#define QSLIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = QSLIST_FIRST((head)); \
(var) && ((tvar) = QSLIST_NEXT((var), field), 1); \
(var) = (tvar))
/*
* Singly-linked List access methods.
*/
#define QSLIST_EMPTY(head) ((head)->slh_first == NULL)
#define QSLIST_FIRST(head) ((head)->slh_first)
#define QSLIST_NEXT(elm, field) ((elm)->field.sle_next)
/*
* Simple queue definitions.
*/
#define QSIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define QSIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define QSIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue functions.
*/
#define QSIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \
if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \
QSIMPLEQ_INIT(removed); \
if (((removed)->sqh_first = (head)->sqh_first) != NULL) { \
if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) { \
(head)->sqh_last = &(head)->sqh_first; \
} \
(removed)->sqh_last = &(elm)->field.sqe_next; \
(elm)->field.sqe_next = NULL; \
} \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \
if ((head)->sqh_first == (elm)) { \
QSIMPLEQ_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->sqh_first; \
while (curelm->field.sqe_next != (elm)) \
curelm = curelm->field.sqe_next; \
if ((curelm->field.sqe_next = \
curelm->field.sqe_next->field.sqe_next) == NULL) \
(head)->sqh_last = &(curelm)->field.sqe_next; \
} \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->sqh_first); \
(var); \
(var) = ((var)->field.sqe_next))
#define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \
for ((var) = ((head)->sqh_first); \
(var) && ((next = ((var)->field.sqe_next)), 1); \
(var) = (next))
#define QSIMPLEQ_CONCAT(head1, head2) do { \
if (!QSIMPLEQ_EMPTY((head2))) { \
*(head1)->sqh_last = (head2)->sqh_first; \
(head1)->sqh_last = (head2)->sqh_last; \
QSIMPLEQ_INIT((head2)); \
} \
} while (/*CONSTCOND*/0)
#define QSIMPLEQ_LAST(head, type, field) \
(QSIMPLEQ_EMPTY((head)) ? \
NULL : \
((struct type *)(void *) \
((char *)((head)->sqh_last) - offsetof(struct type, field))))
/*
* Simple queue access methods.
*/
#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
/*
* Tail queue definitions.
*/
#define Q_TAILQ_HEAD(name, type, qual) \
struct name { \
qual type *tqh_first; /* first element */ \
qual type *qual *tqh_last; /* addr of last next element */ \
}
#define QTAILQ_HEAD(name, type) Q_TAILQ_HEAD(name, struct type,)
#define QTAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define Q_TAILQ_ENTRY(type, qual) \
struct { \
qual type *tqe_next; /* next element */ \
qual type *qual *tqe_prev; /* address of previous next element */\
}
#define QTAILQ_ENTRY(type) Q_TAILQ_ENTRY(struct type,)
/*
* Tail queue functions.
*/
#define QTAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define QTAILQ_REMOVE(head, elm, field) do { \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
(elm)->field.tqe_prev = NULL; \
} while (/*CONSTCOND*/0)
#define QTAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->tqh_first); \
(var); \
(var) = ((var)->field.tqe_next))
#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \
for ((var) = ((head)->tqh_first); \
(var) && ((next_var) = ((var)->field.tqe_next), 1); \
(var) = (next_var))
#define QTAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
(var); \
(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
/*
* Tail queue access methods.
*/
#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define QTAILQ_FIRST(head) ((head)->tqh_first)
#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_prev != NULL)
#define QTAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define QTAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define field_at_offset(base, offset, type) \
((type) (((char *) (base)) + (offset)))
typedef struct DUMMY_Q_ENTRY DUMMY_Q_ENTRY;
typedef struct DUMMY_Q DUMMY_Q;
struct DUMMY_Q_ENTRY {
QTAILQ_ENTRY(DUMMY_Q_ENTRY) next;
};
struct DUMMY_Q {
QTAILQ_HEAD(DUMMY_Q_HEAD, DUMMY_Q_ENTRY) head;
};
#define dummy_q ((DUMMY_Q *) 0)
#define dummy_qe ((DUMMY_Q_ENTRY *) 0)
/*
* Offsets of layout of a tail queue head.
*/
#define QTAILQ_FIRST_OFFSET (offsetof(typeof(dummy_q->head), tqh_first))
#define QTAILQ_LAST_OFFSET (offsetof(typeof(dummy_q->head), tqh_last))
/*
* Raw access of elements of a tail queue
*/
#define QTAILQ_RAW_FIRST(head) \
(*field_at_offset(head, QTAILQ_FIRST_OFFSET, void **))
#define QTAILQ_RAW_TQH_LAST(head) \
(*field_at_offset(head, QTAILQ_LAST_OFFSET, void ***))
/*
* Offsets of layout of a tail queue element.
*/
#define QTAILQ_NEXT_OFFSET (offsetof(typeof(dummy_qe->next), tqe_next))
#define QTAILQ_PREV_OFFSET (offsetof(typeof(dummy_qe->next), tqe_prev))
/*
* Raw access of elements of a tail entry
*/
#define QTAILQ_RAW_NEXT(elm, entry) \
(*field_at_offset(elm, entry + QTAILQ_NEXT_OFFSET, void **))
#define QTAILQ_RAW_TQE_PREV(elm, entry) \
(*field_at_offset(elm, entry + QTAILQ_PREV_OFFSET, void ***))
/*
* Tail queue tranversal using pointer arithmetic.
*/
#define QTAILQ_RAW_FOREACH(elm, head, entry) \
for ((elm) = QTAILQ_RAW_FIRST(head); \
(elm); \
(elm) = QTAILQ_RAW_NEXT(elm, entry))
/*
* Tail queue insertion using pointer arithmetic.
*/
#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \
QTAILQ_RAW_NEXT(elm, entry) = NULL; \
QTAILQ_RAW_TQE_PREV(elm, entry) = QTAILQ_RAW_TQH_LAST(head); \
*QTAILQ_RAW_TQH_LAST(head) = (elm); \
QTAILQ_RAW_TQH_LAST(head) = &QTAILQ_RAW_NEXT(elm, entry); \
} while (/*CONSTCOND*/0)
#endif /* QEMU_SYS_QUEUE_H */