7376eda7c2
Nested BDRV_POLL_WHILE() calls can occur. Currently assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens. This patch converts the bool wait_->need_kick flag to an unsigned wait_->num_waiters counter. Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate the condition again after the inner caller completes (invoking the inner caller counts as aio_poll() progress). Reported-by: "fuweiwei (C)" <fuweiwei2@huawei.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-id: 20180307124619.6218-1-stefanha@redhat.com Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
72 lines
2.1 KiB
C
72 lines
2.1 KiB
C
/*
|
|
* AioContext wait support
|
|
*
|
|
* Copyright (C) 2018 Red Hat, Inc.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qemu/main-loop.h"
|
|
#include "block/aio-wait.h"
|
|
|
|
static void dummy_bh_cb(void *opaque)
|
|
{
|
|
/* The point is to make AIO_WAIT_WHILE()'s aio_poll() return */
|
|
}
|
|
|
|
void aio_wait_kick(AioWait *wait)
|
|
{
|
|
/* The barrier (or an atomic op) is in the caller. */
|
|
if (atomic_read(&wait->num_waiters)) {
|
|
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
|
|
}
|
|
}
|
|
|
|
typedef struct {
|
|
AioWait wait;
|
|
bool done;
|
|
QEMUBHFunc *cb;
|
|
void *opaque;
|
|
} AioWaitBHData;
|
|
|
|
/* Context: BH in IOThread */
|
|
static void aio_wait_bh(void *opaque)
|
|
{
|
|
AioWaitBHData *data = opaque;
|
|
|
|
data->cb(data->opaque);
|
|
|
|
data->done = true;
|
|
aio_wait_kick(&data->wait);
|
|
}
|
|
|
|
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
|
{
|
|
AioWaitBHData data = {
|
|
.cb = cb,
|
|
.opaque = opaque,
|
|
};
|
|
|
|
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
|
|
|
aio_bh_schedule_oneshot(ctx, aio_wait_bh, &data);
|
|
AIO_WAIT_WHILE(&data.wait, ctx, !data.done);
|
|
}
|