async: the main AioContext is only "current" if under the BQL

If we want to wake up a coroutine from a worker thread, aio_co_wake()
currently does not work.  In that scenario, aio_co_wake() calls
aio_co_enter(), but there is no current AioContext and therefore
qemu_get_current_aio_context() returns the main thread.  aio_co_wake()
then attempts to call aio_context_acquire() instead of going through
aio_co_schedule().

The default case of qemu_get_current_aio_context() was added to cover
synchronous I/O started from the vCPU thread, but the main and vCPU
threads are quite different.  The main thread is an I/O thread itself,
only running a more complicated event loop; the vCPU thread instead
is essentially a worker thread that occasionally calls
qemu_mutex_lock_iothread().  It is only in those critical sections
that it acts as if it were the home thread of the main AioContext.

Therefore, this patch detaches qemu_get_current_aio_context() from
iothreads, which is a useless complication.  The AioContext pointer
is stored directly in the thread-local variable, including for the
main loop.  Worker threads (including vCPU threads) optionally behave
as temporary home threads if they have taken the big QEMU lock,
but if that is not the case they will always schedule coroutines
on remote threads via aio_co_schedule().

With this change, the stub qemu_mutex_iothread_locked() must be changed
from true to false.  The previous value of true was needed because the
main thread did not have an AioContext in the thread-local variable,
but now it does have one.

Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210609122234.544153-1-pbonzini@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Tested-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
[eblake: tweak commit message per Vladimir's review]
Signed-off-by: Eric Blake <eblake@redhat.com>
This commit is contained in:
Paolo Bonzini 2021-06-09 14:22:34 +02:00 committed by Eric Blake
parent 3ccf6cd0e3
commit 5f50be9b58
8 changed files with 28 additions and 27 deletions

View File

@ -691,10 +691,13 @@ void aio_co_enter(AioContext *ctx, struct Coroutine *co);
* Return the AioContext whose event loop runs in the current thread. * Return the AioContext whose event loop runs in the current thread.
* *
* If called from an IOThread this will be the IOThread's AioContext. If * If called from an IOThread this will be the IOThread's AioContext. If
* called from another thread it will be the main loop AioContext. * called from the main thread or with the "big QEMU lock" taken it
* will be the main loop AioContext.
*/ */
AioContext *qemu_get_current_aio_context(void); AioContext *qemu_get_current_aio_context(void);
void qemu_set_current_aio_context(AioContext *ctx);
/** /**
* aio_context_setup: * aio_context_setup:
* @ctx: the aio context * @ctx: the aio context

View File

@ -39,13 +39,6 @@ DECLARE_CLASS_CHECKERS(IOThreadClass, IOTHREAD,
#define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL #define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL
#endif #endif
static __thread IOThread *my_iothread;
AioContext *qemu_get_current_aio_context(void)
{
return my_iothread ? my_iothread->ctx : qemu_get_aio_context();
}
static void *iothread_run(void *opaque) static void *iothread_run(void *opaque)
{ {
IOThread *iothread = opaque; IOThread *iothread = opaque;
@ -56,7 +49,7 @@ static void *iothread_run(void *opaque)
* in this new thread uses glib. * in this new thread uses glib.
*/ */
g_main_context_push_thread_default(iothread->worker_context); g_main_context_push_thread_default(iothread->worker_context);
my_iothread = iothread; qemu_set_current_aio_context(iothread->ctx);
iothread->thread_id = qemu_get_thread_id(); iothread->thread_id = qemu_get_thread_id();
qemu_sem_post(&iothread->init_done_sem); qemu_sem_post(&iothread->init_done_sem);

View File

@ -3,7 +3,7 @@
bool qemu_mutex_iothread_locked(void) bool qemu_mutex_iothread_locked(void)
{ {
return true; return false;
} }
void qemu_mutex_lock_iothread_impl(const char *file, int line) void qemu_mutex_lock_iothread_impl(const char *file, int line)

View File

@ -1,8 +0,0 @@
#include "qemu/osdep.h"
#include "block/aio.h"
#include "qemu/main-loop.h"
AioContext *qemu_get_current_aio_context(void)
{
return qemu_get_aio_context();
}

View File

@ -16,7 +16,6 @@ stub_ss.add(files('fw_cfg.c'))
stub_ss.add(files('gdbstub.c')) stub_ss.add(files('gdbstub.c'))
stub_ss.add(files('get-vm-name.c')) stub_ss.add(files('get-vm-name.c'))
stub_ss.add(when: 'CONFIG_LINUX_IO_URING', if_true: files('io_uring.c')) stub_ss.add(when: 'CONFIG_LINUX_IO_URING', if_true: files('io_uring.c'))
stub_ss.add(files('iothread.c'))
stub_ss.add(files('iothread-lock.c')) stub_ss.add(files('iothread-lock.c'))
stub_ss.add(files('isa-bus.c')) stub_ss.add(files('isa-bus.c'))
stub_ss.add(files('is-daemonized.c')) stub_ss.add(files('is-daemonized.c'))

View File

@ -30,13 +30,6 @@ struct IOThread {
bool stopping; bool stopping;
}; };
static __thread IOThread *my_iothread;
AioContext *qemu_get_current_aio_context(void)
{
return my_iothread ? my_iothread->ctx : qemu_get_aio_context();
}
static void iothread_init_gcontext(IOThread *iothread) static void iothread_init_gcontext(IOThread *iothread)
{ {
GSource *source; GSource *source;
@ -54,9 +47,9 @@ static void *iothread_run(void *opaque)
rcu_register_thread(); rcu_register_thread();
my_iothread = iothread;
qemu_mutex_lock(&iothread->init_done_lock); qemu_mutex_lock(&iothread->init_done_lock);
iothread->ctx = aio_context_new(&error_abort); iothread->ctx = aio_context_new(&error_abort);
qemu_set_current_aio_context(iothread->ctx);
/* /*
* We must connect the ctx to a GMainContext, because in older versions * We must connect the ctx to a GMainContext, because in older versions

View File

@ -649,3 +649,23 @@ void aio_context_release(AioContext *ctx)
{ {
qemu_rec_mutex_unlock(&ctx->lock); qemu_rec_mutex_unlock(&ctx->lock);
} }
static __thread AioContext *my_aiocontext;
AioContext *qemu_get_current_aio_context(void)
{
if (my_aiocontext) {
return my_aiocontext;
}
if (qemu_mutex_iothread_locked()) {
/* Possibly in a vCPU thread. */
return qemu_get_aio_context();
}
return NULL;
}
void qemu_set_current_aio_context(AioContext *ctx)
{
assert(!my_aiocontext);
my_aiocontext = ctx;
}

View File

@ -170,6 +170,7 @@ int qemu_init_main_loop(Error **errp)
if (!qemu_aio_context) { if (!qemu_aio_context) {
return -EMFILE; return -EMFILE;
} }
qemu_set_current_aio_context(qemu_aio_context);
qemu_notify_bh = qemu_bh_new(notify_event_cb, NULL); qemu_notify_bh = qemu_bh_new(notify_event_cb, NULL);
gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
src = aio_get_g_source(qemu_aio_context); src = aio_get_g_source(qemu_aio_context);