monitor: cleanup fetching of QMP requests

Use a continue statement so that "after going to sleep" is treated the same
way as "after processing a request".  Pull the monitor_lock critical
section out of monitor_qmp_requests_pop_any_with_lock() and protect
qmp_dispatcher_co_shutdown with the monitor_lock.

The two changes are complex to separate because monitor_qmp_dispatcher_co()
previously had a complicated logic to check for shutdown both before
and after going to sleep.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2023-03-03 12:51:33 +01:00
parent 3e6bed619a
commit 0ff2553701
2 changed files with 22 additions and 27 deletions

View File

@ -56,7 +56,10 @@ IOThread *mon_iothread;
/* Coroutine to dispatch the requests received from I/O thread */
Coroutine *qmp_dispatcher_co;
/* Set to true when the dispatcher coroutine should terminate */
/*
* Set to true when the dispatcher coroutine should terminate. Protected
* by monitor_lock.
*/
bool qmp_dispatcher_co_shutdown;
/*
@ -679,7 +682,9 @@ void monitor_cleanup(void)
* we'll just leave them in the queue without sending a response
* and monitor_data_destroy() will free them.
*/
qmp_dispatcher_co_shutdown = true;
WITH_QEMU_LOCK_GUARD(&monitor_lock) {
qmp_dispatcher_co_shutdown = true;
}
if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
aio_co_wake(qmp_dispatcher_co);
}

View File

@ -178,8 +178,6 @@ static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void)
Monitor *mon;
MonitorQMP *qmp_mon;
QEMU_LOCK_GUARD(&monitor_lock);
QTAILQ_FOREACH(mon, &mon_list, entry) {
if (!monitor_is_qmp(mon)) {
continue;
@ -215,6 +213,10 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data)
MonitorQMP *mon;
while (true) {
/*
* busy must be set to true again by whoever
* rescheduled us to avoid double scheduling
*/
assert(qatomic_mb_read(&qmp_dispatcher_co_busy) == true);
/*
@ -224,36 +226,23 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data)
*/
qatomic_mb_set(&qmp_dispatcher_co_busy, false);
/* On shutdown, don't take any more requests from the queue */
if (qmp_dispatcher_co_shutdown) {
qatomic_set(&qmp_dispatcher_co, NULL);
return;
WITH_QEMU_LOCK_GUARD(&monitor_lock) {
/* On shutdown, don't take any more requests from the queue */
if (qmp_dispatcher_co_shutdown) {
return NULL;
}
req_obj = monitor_qmp_requests_pop_any_with_lock();
}
while (!(req_obj = monitor_qmp_requests_pop_any_with_lock())) {
if (!req_obj) {
/*
* No more requests to process. Wait to be reentered from
* handle_qmp_command() when it pushes more requests, or
* from monitor_cleanup() when it requests shutdown.
*/
if (!qmp_dispatcher_co_shutdown) {
qemu_coroutine_yield();
/*
* busy must be set to true again by whoever
* rescheduled us to avoid double scheduling
*/
assert(qatomic_xchg(&qmp_dispatcher_co_busy, false) == true);
}
/*
* qmp_dispatcher_co_shutdown may have changed if we
* yielded and were reentered from monitor_cleanup()
*/
if (qmp_dispatcher_co_shutdown) {
qatomic_set(&qmp_dispatcher_co, NULL);
return;
}
qemu_coroutine_yield();
continue;
}
trace_monitor_qmp_in_band_dequeue(req_obj,
@ -342,6 +331,7 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data)
aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
qemu_coroutine_yield();
}
qatomic_set(&qmp_dispatcher_co, NULL);
}
static void handle_qmp_command(void *opaque, QObject *req, Error *err)