block: Acquire AioContexts during bdrv_reopen_multiple()

As the BlockReopenQueue can contain nodes in multiple AioContexts, only
one of which may be locked when AIO_WAIT_WHILE() can be called, we can't
let the caller lock the right contexts. Instead, individually lock the
AioContext of a single node when iterating the queue.

Reintroduce bdrv_reopen() as a wrapper for reopening a single node that
drains the node and temporarily drops the AioContext lock for
bdrv_reopen_multiple().

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210708114709.206487-4-kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Kevin Wolf 2021-07-08 13:47:06 +02:00
parent ab5b522879
commit 6cf42ca2f9
5 changed files with 58 additions and 14 deletions

51
block.c
View File

@ -4124,19 +4124,26 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue)
* *
* All affected nodes must be drained between bdrv_reopen_queue() and * All affected nodes must be drained between bdrv_reopen_queue() and
* bdrv_reopen_multiple(). * bdrv_reopen_multiple().
*
* To be called from the main thread, with all other AioContexts unlocked.
*/ */
int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
{ {
int ret = -1; int ret = -1;
BlockReopenQueueEntry *bs_entry, *next; BlockReopenQueueEntry *bs_entry, *next;
AioContext *ctx;
Transaction *tran = tran_new(); Transaction *tran = tran_new();
g_autoptr(GHashTable) found = NULL; g_autoptr(GHashTable) found = NULL;
g_autoptr(GSList) refresh_list = NULL; g_autoptr(GSList) refresh_list = NULL;
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
assert(bs_queue != NULL); assert(bs_queue != NULL);
QTAILQ_FOREACH(bs_entry, bs_queue, entry) { QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
ctx = bdrv_get_aio_context(bs_entry->state.bs);
aio_context_acquire(ctx);
ret = bdrv_flush(bs_entry->state.bs); ret = bdrv_flush(bs_entry->state.bs);
aio_context_release(ctx);
if (ret < 0) { if (ret < 0) {
error_setg_errno(errp, -ret, "Error flushing drive"); error_setg_errno(errp, -ret, "Error flushing drive");
goto abort; goto abort;
@ -4145,7 +4152,10 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
QTAILQ_FOREACH(bs_entry, bs_queue, entry) { QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
assert(bs_entry->state.bs->quiesce_counter > 0); assert(bs_entry->state.bs->quiesce_counter > 0);
ctx = bdrv_get_aio_context(bs_entry->state.bs);
aio_context_acquire(ctx);
ret = bdrv_reopen_prepare(&bs_entry->state, bs_queue, tran, errp); ret = bdrv_reopen_prepare(&bs_entry->state, bs_queue, tran, errp);
aio_context_release(ctx);
if (ret < 0) { if (ret < 0) {
goto abort; goto abort;
} }
@ -4188,7 +4198,10 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
* to first element. * to first element.
*/ */
QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) {
ctx = bdrv_get_aio_context(bs_entry->state.bs);
aio_context_acquire(ctx);
bdrv_reopen_commit(&bs_entry->state); bdrv_reopen_commit(&bs_entry->state);
aio_context_release(ctx);
} }
tran_commit(tran); tran_commit(tran);
@ -4197,7 +4210,10 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
BlockDriverState *bs = bs_entry->state.bs; BlockDriverState *bs = bs_entry->state.bs;
if (bs->drv->bdrv_reopen_commit_post) { if (bs->drv->bdrv_reopen_commit_post) {
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
bs->drv->bdrv_reopen_commit_post(&bs_entry->state); bs->drv->bdrv_reopen_commit_post(&bs_entry->state);
aio_context_release(ctx);
} }
} }
@ -4208,7 +4224,10 @@ abort:
tran_abort(tran); tran_abort(tran);
QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
if (bs_entry->prepared) { if (bs_entry->prepared) {
ctx = bdrv_get_aio_context(bs_entry->state.bs);
aio_context_acquire(ctx);
bdrv_reopen_abort(&bs_entry->state); bdrv_reopen_abort(&bs_entry->state);
aio_context_release(ctx);
} }
} }
@ -4218,21 +4237,37 @@ cleanup:
return ret; return ret;
} }
int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
Error **errp)
{
AioContext *ctx = bdrv_get_aio_context(bs);
BlockReopenQueue *queue;
int ret;
bdrv_subtree_drained_begin(bs);
if (ctx != qemu_get_aio_context()) {
aio_context_release(ctx);
}
queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts);
ret = bdrv_reopen_multiple(queue, errp);
if (ctx != qemu_get_aio_context()) {
aio_context_acquire(ctx);
}
bdrv_subtree_drained_end(bs);
return ret;
}
int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
Error **errp) Error **errp)
{ {
int ret;
BlockReopenQueue *queue;
QDict *opts = qdict_new(); QDict *opts = qdict_new();
qdict_put_bool(opts, BDRV_OPT_READ_ONLY, read_only); qdict_put_bool(opts, BDRV_OPT_READ_ONLY, read_only);
bdrv_subtree_drained_begin(bs); return bdrv_reopen(bs, opts, true, errp);
queue = bdrv_reopen_queue(NULL, bs, opts, true);
ret = bdrv_reopen_multiple(queue, errp);
bdrv_subtree_drained_end(bs);
return ret;
} }
/* /*

View File

@ -390,7 +390,14 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
} }
if (reopen_queue) { if (reopen_queue) {
AioContext *ctx = bdrv_get_aio_context(bs);
if (ctx != qemu_get_aio_context()) {
aio_context_release(ctx);
}
bdrv_reopen_multiple(reopen_queue, errp); bdrv_reopen_multiple(reopen_queue, errp);
if (ctx != qemu_get_aio_context()) {
aio_context_acquire(ctx);
}
} }
bdrv_subtree_drained_end(s->hidden_disk->bs); bdrv_subtree_drained_end(s->hidden_disk->bs);

View File

@ -3592,8 +3592,13 @@ void qmp_x_blockdev_reopen(BlockdevOptions *options, Error **errp)
ctx = bdrv_get_aio_context(bs); ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx); aio_context_acquire(ctx);
bdrv_subtree_drained_begin(bs); bdrv_subtree_drained_begin(bs);
aio_context_release(ctx);
queue = bdrv_reopen_queue(NULL, bs, qdict, false); queue = bdrv_reopen_queue(NULL, bs, qdict, false);
bdrv_reopen_multiple(queue, errp); bdrv_reopen_multiple(queue, errp);
ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
bdrv_subtree_drained_end(bs); bdrv_subtree_drained_end(bs);
aio_context_release(ctx); aio_context_release(ctx);

View File

@ -388,6 +388,8 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
bool keep_old_opts); bool keep_old_opts);
void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue); void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue);
int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
Error **errp);
int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
Error **errp); Error **errp);
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset, int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,

View File

@ -2116,8 +2116,6 @@ static int reopen_f(BlockBackend *blk, int argc, char **argv)
bool writethrough = !blk_enable_write_cache(blk); bool writethrough = !blk_enable_write_cache(blk);
bool has_rw_option = false; bool has_rw_option = false;
bool has_cache_option = false; bool has_cache_option = false;
BlockReopenQueue *brq;
Error *local_err = NULL; Error *local_err = NULL;
while ((c = getopt(argc, argv, "c:o:rw")) != -1) { while ((c = getopt(argc, argv, "c:o:rw")) != -1) {
@ -2210,10 +2208,7 @@ static int reopen_f(BlockBackend *blk, int argc, char **argv)
qdict_put_bool(opts, BDRV_OPT_CACHE_NO_FLUSH, flags & BDRV_O_NO_FLUSH); qdict_put_bool(opts, BDRV_OPT_CACHE_NO_FLUSH, flags & BDRV_O_NO_FLUSH);
} }
bdrv_subtree_drained_begin(bs); bdrv_reopen(bs, opts, true, &local_err);
brq = bdrv_reopen_queue(NULL, bs, opts, true);
bdrv_reopen_multiple(brq, &local_err);
bdrv_subtree_drained_end(bs);
if (local_err) { if (local_err) {
error_report_err(local_err); error_report_err(local_err);