block: make BDRV_POLL_WHILE() re-entrancy safe
Nested BDRV_POLL_WHILE() calls can occur. Currently assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens. This patch converts the bool wait_->need_kick flag to an unsigned wait_->num_waiters counter. Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate the condition again after the inner caller completes (invoking the inner caller counts as aio_poll() progress). Reported-by: "fuweiwei (C)" <fuweiwei2@huawei.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-id: 20180307124619.6218-1-stefanha@redhat.com Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
e4ae62b802
commit
7376eda7c2
@ -50,8 +50,8 @@
|
||||
* }
|
||||
*/
|
||||
typedef struct {
|
||||
/* Is the main loop waiting for a kick? Accessed with atomic ops. */
|
||||
bool need_kick;
|
||||
/* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
|
||||
unsigned num_waiters;
|
||||
} AioWait;
|
||||
|
||||
/**
|
||||
@ -71,35 +71,34 @@ typedef struct {
|
||||
* wait on conditions between two IOThreads since that could lead to deadlock,
|
||||
* go via the main loop instead.
|
||||
*/
|
||||
#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
|
||||
bool waited_ = false; \
|
||||
bool busy_ = true; \
|
||||
AioWait *wait_ = (wait); \
|
||||
AioContext *ctx_ = (ctx); \
|
||||
if (in_aio_context_home_thread(ctx_)) { \
|
||||
while ((cond) || busy_) { \
|
||||
busy_ = aio_poll(ctx_, (cond)); \
|
||||
waited_ |= !!(cond) | busy_; \
|
||||
} \
|
||||
} else { \
|
||||
assert(qemu_get_current_aio_context() == \
|
||||
qemu_get_aio_context()); \
|
||||
assert(!wait_->need_kick); \
|
||||
/* Set wait_->need_kick before evaluating cond. */ \
|
||||
atomic_mb_set(&wait_->need_kick, true); \
|
||||
while (busy_) { \
|
||||
if ((cond)) { \
|
||||
waited_ = busy_ = true; \
|
||||
aio_context_release(ctx_); \
|
||||
aio_poll(qemu_get_aio_context(), true); \
|
||||
aio_context_acquire(ctx_); \
|
||||
} else { \
|
||||
busy_ = aio_poll(ctx_, false); \
|
||||
waited_ |= busy_; \
|
||||
} \
|
||||
} \
|
||||
atomic_set(&wait_->need_kick, false); \
|
||||
} \
|
||||
#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
|
||||
bool waited_ = false; \
|
||||
bool busy_ = true; \
|
||||
AioWait *wait_ = (wait); \
|
||||
AioContext *ctx_ = (ctx); \
|
||||
if (in_aio_context_home_thread(ctx_)) { \
|
||||
while ((cond) || busy_) { \
|
||||
busy_ = aio_poll(ctx_, (cond)); \
|
||||
waited_ |= !!(cond) | busy_; \
|
||||
} \
|
||||
} else { \
|
||||
assert(qemu_get_current_aio_context() == \
|
||||
qemu_get_aio_context()); \
|
||||
/* Increment wait_->num_waiters before evaluating cond. */ \
|
||||
atomic_inc(&wait_->num_waiters); \
|
||||
while (busy_) { \
|
||||
if ((cond)) { \
|
||||
waited_ = busy_ = true; \
|
||||
aio_context_release(ctx_); \
|
||||
aio_poll(qemu_get_aio_context(), true); \
|
||||
aio_context_acquire(ctx_); \
|
||||
} else { \
|
||||
busy_ = aio_poll(ctx_, false); \
|
||||
waited_ |= busy_; \
|
||||
} \
|
||||
} \
|
||||
atomic_dec(&wait_->num_waiters); \
|
||||
} \
|
||||
waited_; })
|
||||
|
||||
/**
|
||||
|
@ -34,7 +34,7 @@ static void dummy_bh_cb(void *opaque)
|
||||
void aio_wait_kick(AioWait *wait)
|
||||
{
|
||||
/* The barrier (or an atomic op) is in the caller. */
|
||||
if (atomic_read(&wait->need_kick)) {
|
||||
if (atomic_read(&wait->num_waiters)) {
|
||||
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user