aio: rename bh_lock to list_lock
This will be used for AioHandlers too. There is going to be little or no contention, so it is better to reuse the same lock. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Message-id: 20170112180800.21085-2-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
8f90b5e91d
commit
cf2c02c8ea
20
async.c
20
async.c
@ -53,14 +53,14 @@ void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
||||
.cb = cb,
|
||||
.opaque = opaque,
|
||||
};
|
||||
qemu_mutex_lock(&ctx->bh_lock);
|
||||
qemu_mutex_lock(&ctx->list_lock);
|
||||
bh->next = ctx->first_bh;
|
||||
bh->scheduled = 1;
|
||||
bh->deleted = 1;
|
||||
/* Make sure that the members are ready before putting bh into list */
|
||||
smp_wmb();
|
||||
ctx->first_bh = bh;
|
||||
qemu_mutex_unlock(&ctx->bh_lock);
|
||||
qemu_mutex_unlock(&ctx->list_lock);
|
||||
aio_notify(ctx);
|
||||
}
|
||||
|
||||
@ -73,12 +73,12 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
||||
.cb = cb,
|
||||
.opaque = opaque,
|
||||
};
|
||||
qemu_mutex_lock(&ctx->bh_lock);
|
||||
qemu_mutex_lock(&ctx->list_lock);
|
||||
bh->next = ctx->first_bh;
|
||||
/* Make sure that the members are ready before putting bh into list */
|
||||
smp_wmb();
|
||||
ctx->first_bh = bh;
|
||||
qemu_mutex_unlock(&ctx->bh_lock);
|
||||
qemu_mutex_unlock(&ctx->list_lock);
|
||||
return bh;
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ int aio_bh_poll(AioContext *ctx)
|
||||
|
||||
/* remove deleted bhs */
|
||||
if (!ctx->walking_bh) {
|
||||
qemu_mutex_lock(&ctx->bh_lock);
|
||||
qemu_mutex_lock(&ctx->list_lock);
|
||||
bhp = &ctx->first_bh;
|
||||
while (*bhp) {
|
||||
bh = *bhp;
|
||||
@ -131,7 +131,7 @@ int aio_bh_poll(AioContext *ctx)
|
||||
bhp = &bh->next;
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&ctx->bh_lock);
|
||||
qemu_mutex_unlock(&ctx->list_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -270,7 +270,7 @@ aio_ctx_finalize(GSource *source)
|
||||
}
|
||||
#endif
|
||||
|
||||
qemu_mutex_lock(&ctx->bh_lock);
|
||||
qemu_mutex_lock(&ctx->list_lock);
|
||||
while (ctx->first_bh) {
|
||||
QEMUBH *next = ctx->first_bh->next;
|
||||
|
||||
@ -280,12 +280,12 @@ aio_ctx_finalize(GSource *source)
|
||||
g_free(ctx->first_bh);
|
||||
ctx->first_bh = next;
|
||||
}
|
||||
qemu_mutex_unlock(&ctx->bh_lock);
|
||||
qemu_mutex_unlock(&ctx->list_lock);
|
||||
|
||||
aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
|
||||
event_notifier_cleanup(&ctx->notifier);
|
||||
qemu_rec_mutex_destroy(&ctx->lock);
|
||||
qemu_mutex_destroy(&ctx->bh_lock);
|
||||
qemu_mutex_destroy(&ctx->list_lock);
|
||||
timerlistgroup_deinit(&ctx->tlg);
|
||||
}
|
||||
|
||||
@ -381,7 +381,7 @@ AioContext *aio_context_new(Error **errp)
|
||||
ctx->linux_aio = NULL;
|
||||
#endif
|
||||
ctx->thread_pool = NULL;
|
||||
qemu_mutex_init(&ctx->bh_lock);
|
||||
qemu_mutex_init(&ctx->list_lock);
|
||||
qemu_rec_mutex_init(&ctx->lock);
|
||||
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
|
||||
|
||||
|
@ -91,7 +91,7 @@ struct AioContext {
|
||||
uint32_t notify_me;
|
||||
|
||||
/* lock to protect between bh's adders and deleter */
|
||||
QemuMutex bh_lock;
|
||||
QemuMutex list_lock;
|
||||
|
||||
/* Anchor of the list of Bottom Halves belonging to the context */
|
||||
struct QEMUBH *first_bh;
|
||||
|
Loading…
Reference in New Issue
Block a user