2014-03-03 11:30:05 +01:00
|
|
|
/*
|
|
|
|
* Event loop thread
|
|
|
|
*
|
|
|
|
* Copyright Red Hat Inc., 2013
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef IOTHREAD_H
|
|
|
|
#define IOTHREAD_H
|
|
|
|
|
|
|
|
#include "block/aio.h"
|
2014-03-20 15:06:31 +01:00
|
|
|
#include "qemu/thread.h"
|
2019-08-12 07:23:31 +02:00
|
|
|
#include "qom/object.h"
|
2014-03-03 11:30:05 +01:00
|
|
|
|
|
|
|
#define TYPE_IOTHREAD "iothread"
|
|
|
|
|
2020-09-03 22:43:22 +02:00
|
|
|
struct IOThread {
|
2014-03-20 15:06:31 +01:00
|
|
|
Object parent_obj;
|
|
|
|
|
|
|
|
QemuThread thread;
|
|
|
|
AioContext *ctx;
|
2019-03-06 12:55:29 +01:00
|
|
|
bool run_gcontext; /* whether we should run gcontext */
|
2017-08-29 09:22:37 +02:00
|
|
|
GMainContext *worker_context;
|
|
|
|
GMainLoop *main_loop;
|
2019-03-06 12:55:28 +01:00
|
|
|
QemuSemaphore init_done_sem; /* is thread init done? */
|
iothread: fix iothread_stop() race condition
There is a small chance that iothread_stop() hangs as follows:
Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)):
#0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6
#1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
#2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322
#3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629
#4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59
#5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0
#6 0x00007f64012cce6f in clone () at /lib64/libc.so.6
Thread 1 (Thread 0x7f640b45b280 (LWP 16103)):
#0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0
#1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547
#2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91
#3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102
#4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852
#5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867
#6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341
#7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913
The relevant code from iothread_run() is:
while (!atomic_read(&iothread->stopping)) {
aio_poll(iothread->ctx, true);
and iothread_stop():
iothread->stopping = true;
aio_notify(iothread->ctx);
...
qemu_thread_join(&iothread->thread);
The following scenario can occur:
1. IOThread:
while (!atomic_read(&iothread->stopping)) -> stopping=false
2. Main loop:
iothread->stopping = true;
aio_notify(iothread->ctx);
3. IOThread:
aio_poll(iothread->ctx, true); -> hang
The bug is explained by the AioContext->notify_me doc comments:
"If this field is 0, everything (file descriptors, bottom halves,
timers) will be re-evaluated before the next blocking poll(), thus the
event_notifier_set call can be skipped."
The problem is that "everything" does not include checking
iothread->stopping. This means iothread_run() will block in aio_poll()
if aio_notify() was called just before aio_poll().
This patch fixes the hang by replacing aio_notify() with
aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run()
to return.
Implementing this properly required a new bool running flag. The new
flag prevents races that are tricky if we try to use iothread->stopping.
Now iothread->stopping is purely for iothread_stop() and
iothread->running is purely for the iothread_run() thread.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 20171207201320.19284-6-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 21:13:19 +01:00
|
|
|
bool stopping; /* has iothread_stop() been called? */
|
|
|
|
bool running; /* should iothread_run() continue? */
|
2014-03-20 15:06:31 +01:00
|
|
|
int thread_id;
|
2016-12-01 20:26:45 +01:00
|
|
|
|
|
|
|
/* AioContext poll parameters */
|
|
|
|
int64_t poll_max_ns;
|
2016-12-01 20:26:52 +01:00
|
|
|
int64_t poll_grow;
|
|
|
|
int64_t poll_shrink;
|
2021-07-21 11:42:10 +02:00
|
|
|
|
|
|
|
/* AioContext AIO engine parameters */
|
|
|
|
int64_t aio_max_batch;
|
2020-09-03 22:43:22 +02:00
|
|
|
};
|
|
|
|
typedef struct IOThread IOThread;
|
2014-03-03 11:30:05 +01:00
|
|
|
|
2020-08-31 23:07:33 +02:00
|
|
|
DECLARE_INSTANCE_CHECKER(IOThread, IOTHREAD,
|
|
|
|
TYPE_IOTHREAD)
|
2014-03-03 11:30:05 +01:00
|
|
|
|
|
|
|
char *iothread_get_id(IOThread *iothread);
|
2017-12-06 15:45:48 +01:00
|
|
|
IOThread *iothread_by_id(const char *id);
|
2014-03-03 11:30:05 +01:00
|
|
|
AioContext *iothread_get_aio_context(IOThread *iothread);
|
2017-08-29 09:22:37 +02:00
|
|
|
GMainContext *iothread_get_g_main_context(IOThread *iothread);
|
2014-03-03 11:30:05 +01:00
|
|
|
|
2017-09-28 04:59:55 +02:00
|
|
|
/*
|
|
|
|
* Helpers used to allocate iothreads for internal use. These
|
|
|
|
* iothreads will not be seen by monitor clients when query using
|
|
|
|
* "query-iothreads".
|
|
|
|
*/
|
|
|
|
IOThread *iothread_create(const char *id, Error **errp);
|
2017-09-28 04:59:56 +02:00
|
|
|
void iothread_stop(IOThread *iothread);
|
2017-09-28 04:59:55 +02:00
|
|
|
void iothread_destroy(IOThread *iothread);
|
|
|
|
|
2021-01-29 17:46:10 +01:00
|
|
|
/*
|
|
|
|
* Returns true if executing withing IOThread context,
|
|
|
|
* false otherwise.
|
|
|
|
*/
|
|
|
|
bool qemu_in_iothread(void);
|
|
|
|
|
2014-03-03 11:30:05 +01:00
|
|
|
#endif /* IOTHREAD_H */
|