monitor: hmp_qemu_io: acquire aio contex, fix crash

Max reported the following bug:

$ ./qemu-img create -f raw src.img 1G
$ ./qemu-img create -f raw dst.img 1G

$ (echo '
   {"execute":"qmp_capabilities"}
   {"execute":"blockdev-mirror",
    "arguments":{"job-id":"mirror",
                 "device":"source",
                 "target":"target",
                 "sync":"full",
                 "filter-node-name":"mirror-top"}}
'; sleep 3; echo '
   {"execute":"human-monitor-command",
    "arguments":{"command-line":
                 "qemu-io mirror-top \"write 0 1G\""}}') \
| x86_64-softmmu/qemu-system-x86_64 \
   -qmp stdio \
   -blockdev file,node-name=source,filename=src.img \
   -blockdev file,node-name=target,filename=dst.img \
   -object iothread,id=iothr0 \
   -device virtio-blk,drive=source,iothread=iothr0

crashes:

0  raise () at /usr/lib/libc.so.6
1  abort () at /usr/lib/libc.so.6
2  error_exit
   (err=<optimized out>,
   msg=msg@entry=0x55fbb1634790 <__func__.27> "qemu_mutex_unlock_impl")
   at ../util/qemu-thread-posix.c:37
3  qemu_mutex_unlock_impl
   (mutex=mutex@entry=0x55fbb25ab6e0,
   file=file@entry=0x55fbb1636957 "../util/async.c",
   line=line@entry=650)
   at ../util/qemu-thread-posix.c:109
4  aio_context_release (ctx=ctx@entry=0x55fbb25ab680) at ../util/async.c:650
5  bdrv_do_drained_begin
   (bs=bs@entry=0x55fbb3a87000, recursive=recursive@entry=false,
   parent=parent@entry=0x0,
   ignore_bds_parents=ignore_bds_parents@entry=false,
   poll=poll@entry=true) at ../block/io.c:441
6  bdrv_do_drained_begin
   (poll=true, ignore_bds_parents=false, parent=0x0, recursive=false,
   bs=0x55fbb3a87000) at ../block/io.c:448
7  blk_drain (blk=0x55fbb26c5a00) at ../block/block-backend.c:1718
8  blk_unref (blk=0x55fbb26c5a00) at ../block/block-backend.c:498
9  blk_unref (blk=0x55fbb26c5a00) at ../block/block-backend.c:491
10 hmp_qemu_io (mon=0x7fffaf3fc7d0, qdict=<optimized out>)
   at ../block/monitor/block-hmp-cmds.c:628

man pthread_mutex_unlock
...
    EPERM  The  mutex type is PTHREAD_MUTEX_ERRORCHECK or
    PTHREAD_MUTEX_RECURSIVE, or the mutex is a robust mutex, and the
    current thread does not own the mutex.

So, thread doesn't own the mutex. And we have iothread here.

Next, note that AIO_WAIT_WHILE() documents that ctx must be acquired
exactly once by caller. But where is it acquired in the call stack?
Seems nowhere.

qemuio_command do acquire aio context.. But we need context acquired
around blk_unref() as well and actually around blk_insert_bs() too.

Let's refactor qemuio_command so that it doesn't acquire aio context
but callers do that instead. This way we can cleanly acquire aio
context in hmp_qemu_io() around all three calls.

Reported-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210423134233.51495-1-vsementsov@virtuozzo.com>
[mreitz: Fixed comment]
Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
Vladimir Sementsov-Ogievskiy 2021-04-23 16:42:33 +03:00 committed by Max Reitz
parent 2b99cfce08
commit 78632a3d16
3 changed files with 40 additions and 16 deletions

View File

@ -557,8 +557,10 @@ void hmp_eject(Monitor *mon, const QDict *qdict)
void hmp_qemu_io(Monitor *mon, const QDict *qdict)
{
BlockBackend *blk;
BlockBackend *blk = NULL;
BlockDriverState *bs = NULL;
BlockBackend *local_blk = NULL;
AioContext *ctx = NULL;
bool qdev = qdict_get_try_bool(qdict, "qdev", false);
const char *device = qdict_get_str(qdict, "device");
const char *command = qdict_get_str(qdict, "command");
@ -573,18 +575,22 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
} else {
blk = blk_by_name(device);
if (!blk) {
BlockDriverState *bs = bdrv_lookup_bs(NULL, device, &err);
bs = bdrv_lookup_bs(NULL, device, &err);
if (!bs) {
goto fail;
}
}
}
ctx = blk ? blk_get_aio_context(blk) : bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (bs) {
blk = local_blk = blk_new(bdrv_get_aio_context(bs),
0, BLK_PERM_ALL);
blk = local_blk = blk_new(bdrv_get_aio_context(bs), 0, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, &err);
if (ret < 0) {
goto fail;
}
} else {
goto fail;
}
}
}
/*
@ -616,6 +622,11 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
fail:
blk_unref(local_blk);
if (ctx) {
aio_context_release(ctx);
}
hmp_handle_error(mon, err);
}

View File

@ -2457,9 +2457,12 @@ static const cmdinfo_t help_cmd = {
.oneline = "help for one or all commands",
};
/*
* Called with aio context of blk acquired. Or with qemu_get_aio_context()
* context acquired if blk is NULL.
*/
int qemuio_command(BlockBackend *blk, const char *cmd)
{
AioContext *ctx;
char *input;
const cmdinfo_t *ct;
char **v;
@ -2471,10 +2474,7 @@ int qemuio_command(BlockBackend *blk, const char *cmd)
if (c) {
ct = find_command(v[0]);
if (ct) {
ctx = blk ? blk_get_aio_context(blk) : qemu_get_aio_context();
aio_context_acquire(ctx);
ret = command(blk, ct, c, v);
aio_context_release(ctx);
} else {
fprintf(stderr, "command \"%s\" not found\n", v[0]);
ret = -EINVAL;

View File

@ -411,6 +411,19 @@ static void prep_fetchline(void *opaque)
*fetchable= 1;
}
static int do_qemuio_command(const char *cmd)
{
int ret;
AioContext *ctx =
qemuio_blk ? blk_get_aio_context(qemuio_blk) : qemu_get_aio_context();
aio_context_acquire(ctx);
ret = qemuio_command(qemuio_blk, cmd);
aio_context_release(ctx);
return ret;
}
static int command_loop(void)
{
int i, fetchable = 0, prompted = 0;
@ -418,7 +431,7 @@ static int command_loop(void)
char *input;
for (i = 0; !quit_qemu_io && i < ncmdline; i++) {
ret = qemuio_command(qemuio_blk, cmdline[i]);
ret = do_qemuio_command(cmdline[i]);
if (ret < 0) {
last_error = ret;
}
@ -446,7 +459,7 @@ static int command_loop(void)
if (input == NULL) {
break;
}
ret = qemuio_command(qemuio_blk, input);
ret = do_qemuio_command(input);
g_free(input);
if (ret < 0) {