aio-posix: remove walking_handlers, protecting AioHandler list with list_lock
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Message-id: 20170112180800.21085-8-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
abf90d3921
commit
2bbf11d753
65
aio-posix.c
65
aio-posix.c
@ -16,7 +16,7 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "block/block.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
#include "qemu/sockets.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "trace.h"
|
||||
@ -66,7 +66,7 @@ static bool aio_epoll_try_enable(AioContext *ctx)
|
||||
AioHandler *node;
|
||||
struct epoll_event event;
|
||||
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
||||
int r;
|
||||
if (node->deleted || !node->pfd.events) {
|
||||
continue;
|
||||
@ -212,24 +212,27 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
bool is_new = false;
|
||||
bool deleted = false;
|
||||
|
||||
qemu_lockcnt_lock(&ctx->list_lock);
|
||||
|
||||
node = find_aio_handler(ctx, fd);
|
||||
|
||||
/* Are we deleting the fd handler? */
|
||||
if (!io_read && !io_write && !io_poll) {
|
||||
if (node == NULL) {
|
||||
qemu_lockcnt_unlock(&ctx->list_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
g_source_remove_poll(&ctx->source, &node->pfd);
|
||||
|
||||
/* If the lock is held, just mark the node as deleted */
|
||||
if (ctx->walking_handlers) {
|
||||
if (qemu_lockcnt_count(&ctx->list_lock)) {
|
||||
node->deleted = 1;
|
||||
node->pfd.revents = 0;
|
||||
} else {
|
||||
/* Otherwise, delete it for real. We can't just mark it as
|
||||
* deleted because deleted nodes are only cleaned up after
|
||||
* releasing the walking_handlers lock.
|
||||
* deleted because deleted nodes are only cleaned up while
|
||||
* no one is walking the handlers list.
|
||||
*/
|
||||
QLIST_REMOVE(node, node);
|
||||
deleted = true;
|
||||
@ -243,7 +246,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
/* Alloc and insert if it's not already there */
|
||||
node = g_new0(AioHandler, 1);
|
||||
node->pfd.fd = fd;
|
||||
QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
|
||||
QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
|
||||
|
||||
g_source_add_poll(&ctx->source, &node->pfd);
|
||||
is_new = true;
|
||||
@ -265,6 +268,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
}
|
||||
|
||||
aio_epoll_update(ctx, node, is_new);
|
||||
qemu_lockcnt_unlock(&ctx->list_lock);
|
||||
aio_notify(ctx);
|
||||
|
||||
if (deleted) {
|
||||
@ -316,8 +320,8 @@ static void poll_set_started(AioContext *ctx, bool started)
|
||||
|
||||
ctx->poll_started = started;
|
||||
|
||||
ctx->walking_handlers++;
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
qemu_lockcnt_inc(&ctx->list_lock);
|
||||
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
||||
IOHandler *fn;
|
||||
|
||||
if (node->deleted) {
|
||||
@ -334,7 +338,7 @@ static void poll_set_started(AioContext *ctx, bool started)
|
||||
fn(node->opaque);
|
||||
}
|
||||
}
|
||||
ctx->walking_handlers--;
|
||||
qemu_lockcnt_dec(&ctx->list_lock);
|
||||
}
|
||||
|
||||
|
||||
@ -349,22 +353,32 @@ bool aio_prepare(AioContext *ctx)
|
||||
bool aio_pending(AioContext *ctx)
|
||||
{
|
||||
AioHandler *node;
|
||||
bool result = false;
|
||||
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
/*
|
||||
* We have to walk very carefully in case aio_set_fd_handler is
|
||||
* called while we're walking.
|
||||
*/
|
||||
qemu_lockcnt_inc(&ctx->list_lock);
|
||||
|
||||
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
||||
int revents;
|
||||
|
||||
revents = node->pfd.revents & node->pfd.events;
|
||||
if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read &&
|
||||
aio_node_check(ctx, node->is_external)) {
|
||||
return true;
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write &&
|
||||
aio_node_check(ctx, node->is_external)) {
|
||||
return true;
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
qemu_lockcnt_dec(&ctx->list_lock);
|
||||
|
||||
return false;
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool aio_dispatch_handlers(AioContext *ctx)
|
||||
@ -376,9 +390,9 @@ static bool aio_dispatch_handlers(AioContext *ctx)
|
||||
* We have to walk very carefully in case aio_set_fd_handler is
|
||||
* called while we're walking.
|
||||
*/
|
||||
ctx->walking_handlers++;
|
||||
qemu_lockcnt_inc(&ctx->list_lock);
|
||||
|
||||
QLIST_FOREACH_SAFE(node, &ctx->aio_handlers, node, tmp) {
|
||||
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
|
||||
int revents;
|
||||
|
||||
revents = node->pfd.revents & node->pfd.events;
|
||||
@ -404,16 +418,15 @@ static bool aio_dispatch_handlers(AioContext *ctx)
|
||||
}
|
||||
|
||||
if (node->deleted) {
|
||||
ctx->walking_handlers--;
|
||||
if (!ctx->walking_handlers) {
|
||||
if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
|
||||
QLIST_REMOVE(node, node);
|
||||
g_free(node);
|
||||
qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
|
||||
}
|
||||
ctx->walking_handlers++;
|
||||
}
|
||||
}
|
||||
|
||||
ctx->walking_handlers--;
|
||||
qemu_lockcnt_dec(&ctx->list_lock);
|
||||
return progress;
|
||||
}
|
||||
|
||||
@ -493,7 +506,7 @@ static bool run_poll_handlers_once(AioContext *ctx)
|
||||
bool progress = false;
|
||||
AioHandler *node;
|
||||
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
||||
if (!node->deleted && node->io_poll &&
|
||||
node->io_poll(node->opaque)) {
|
||||
progress = true;
|
||||
@ -514,7 +527,7 @@ static bool run_poll_handlers_once(AioContext *ctx)
|
||||
* Note that ctx->notify_me must be non-zero so this function can detect
|
||||
* aio_notify().
|
||||
*
|
||||
* Note that the caller must have incremented ctx->walking_handlers.
|
||||
* Note that the caller must have incremented ctx->list_lock.
|
||||
*
|
||||
* Returns: true if progress was made, false otherwise
|
||||
*/
|
||||
@ -524,7 +537,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
|
||||
int64_t end_time;
|
||||
|
||||
assert(ctx->notify_me);
|
||||
assert(ctx->walking_handlers > 0);
|
||||
assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
|
||||
assert(ctx->poll_disable_cnt == 0);
|
||||
|
||||
trace_run_poll_handlers_begin(ctx, max_ns);
|
||||
@ -546,7 +559,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
|
||||
*
|
||||
* ctx->notify_me must be non-zero so this function can detect aio_notify().
|
||||
*
|
||||
* Note that the caller must have incremented ctx->walking_handlers.
|
||||
* Note that the caller must have incremented ctx->list_lock.
|
||||
*
|
||||
* Returns: true if progress was made, false otherwise
|
||||
*/
|
||||
@ -597,7 +610,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
atomic_add(&ctx->notify_me, 2);
|
||||
}
|
||||
|
||||
ctx->walking_handlers++;
|
||||
qemu_lockcnt_inc(&ctx->list_lock);
|
||||
|
||||
if (ctx->poll_max_ns) {
|
||||
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
@ -611,7 +624,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
/* fill pollfds */
|
||||
|
||||
if (!aio_epoll_enabled(ctx)) {
|
||||
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
|
||||
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
||||
if (!node->deleted && node->pfd.events
|
||||
&& aio_node_check(ctx, node->is_external)) {
|
||||
add_pollfd(node);
|
||||
@ -696,7 +709,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||
}
|
||||
|
||||
npfd = 0;
|
||||
ctx->walking_handlers--;
|
||||
qemu_lockcnt_dec(&ctx->list_lock);
|
||||
|
||||
/* Run dispatch even if there were no readable fds to run timers */
|
||||
if (aio_dispatch(ctx, ret > 0)) {
|
||||
|
Loading…
Reference in New Issue
Block a user