block: Mark bdrv_co_block_status() and callers GRAPH_RDLOCK

This adds GRAPH_RDLOCK annotations to declare that callers of
bdrv_co_block_status() need to hold a reader lock for the graph.

For some places, we know that they will hold the lock, but we don't have
the GRAPH_RDLOCK annotations yet. In this case, add assume_graph_lock()
with a FIXME comment. These places will be removed once everything is
properly annotated.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20230203152202.49054-5-kwolf@redhat.com>
Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Kevin Wolf 2023-02-03 16:21:43 +01:00
parent c2b8e31516
commit 7ff9579e60
14 changed files with 81 additions and 62 deletions

View File

@ -270,7 +270,10 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
return -ECANCELED;
}
/* rdlock protects the subsequent call to bdrv_is_allocated() */
bdrv_graph_co_rdlock();
ret = block_copy_reset_unallocated(s->bcs, offset, &count);
bdrv_graph_co_rdunlock();
if (ret < 0) {
return ret;
}

View File

@ -1431,6 +1431,7 @@ int coroutine_fn blk_co_block_status_above(BlockBackend *blk,
BlockDriverState **file)
{
IO_CODE();
GRAPH_RDLOCK_GUARD();
return bdrv_co_block_status_above(blk_bs(blk), base, offset, bytes, pnum,
map, file);
}
@ -1441,6 +1442,7 @@ int coroutine_fn blk_co_is_allocated_above(BlockBackend *blk,
int64_t bytes, int64_t *pnum)
{
IO_CODE();
GRAPH_RDLOCK_GUARD();
return bdrv_co_is_allocated_above(blk_bs(blk), base, include_base, offset,
bytes, pnum);
}

View File

@ -581,9 +581,9 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
return ret;
}
static coroutine_fn int block_copy_block_status(BlockCopyState *s,
int64_t offset,
int64_t bytes, int64_t *pnum)
static coroutine_fn GRAPH_RDLOCK
int block_copy_block_status(BlockCopyState *s, int64_t offset, int64_t bytes,
int64_t *pnum)
{
int64_t num;
BlockDriverState *base;
@ -618,9 +618,9 @@ static coroutine_fn int block_copy_block_status(BlockCopyState *s,
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
*/
static int coroutine_fn block_copy_is_cluster_allocated(BlockCopyState *s,
int64_t offset,
int64_t *pnum)
static int coroutine_fn GRAPH_RDLOCK
block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
int64_t *pnum)
{
BlockDriverState *bs = s->source->bs;
int64_t count, total_count = 0;
@ -630,6 +630,7 @@ static int coroutine_fn block_copy_is_cluster_allocated(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) {
/* protected in backup_run() */
ret = bdrv_co_is_allocated(bs, offset, bytes, &count);
if (ret < 0) {
return ret;
@ -704,7 +705,7 @@ int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
* Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
* clusters found and -errno on failure.
*/
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
block_copy_dirty_clusters(BlockCopyCallState *call_state)
{
BlockCopyState *s = call_state->s;
@ -827,7 +828,8 @@ void block_copy_kick(BlockCopyCallState *call_state)
* it means that some I/O operation failed in context of _this_ block_copy call,
* not some parallel operation.
*/
static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
static int coroutine_fn GRAPH_RDLOCK
block_copy_common(BlockCopyCallState *call_state)
{
int ret;
BlockCopyState *s = call_state->s;
@ -892,6 +894,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
static void coroutine_fn block_copy_async_co_entry(void *opaque)
{
GRAPH_RDLOCK_GUARD();
block_copy_common(opaque);
}

View File

@ -43,7 +43,7 @@ bdrv_co_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
int coroutine_fn
int coroutine_fn GRAPH_RDLOCK
bdrv_co_common_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
bool include_base,

View File

@ -2224,11 +2224,10 @@ int bdrv_flush_all(void)
* BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
* set to the host mapping and BDS corresponding to the guest offset.
*/
static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
bool want_zero,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
static int coroutine_fn GRAPH_RDLOCK
bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
int64_t total_size;
int64_t n; /* bytes */
@ -2240,6 +2239,7 @@ static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
bool has_filtered_child;
assert(pnum);
assert_bdrv_graph_readable();
*pnum = 0;
total_size = bdrv_getlength(bs);
if (total_size < 0) {
@ -2470,6 +2470,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
IO_CODE();
assert(!include_base || base); /* Can't include NULL base */
assert_bdrv_graph_readable();
if (!depth) {
depth = &dummy;
@ -2595,6 +2596,8 @@ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
int64_t pnum = bytes;
IO_CODE();
assume_graph_lock(); /* FIXME */
if (!bytes) {
return 1;
}

View File

@ -558,9 +558,11 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
MirrorMethod mirror_method = MIRROR_METHOD_COPY;
assert(!(offset % s->granularity));
ret = bdrv_block_status_above(source, NULL, offset,
nb_chunks * s->granularity,
&io_bytes, NULL, NULL);
WITH_GRAPH_RDLOCK_GUARD() {
ret = bdrv_block_status_above(source, NULL, offset,
nb_chunks * s->granularity,
&io_bytes, NULL, NULL);
}
if (ret < 0) {
io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
} else if (ret & BDRV_BLOCK_DATA) {
@ -863,8 +865,10 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
return 0;
}
ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
&count);
WITH_GRAPH_RDLOCK_GUARD() {
ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset,
bytes, &count);
}
if (ret < 0) {
return ret;
}

View File

@ -524,19 +524,16 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate,
return 1;
}
static int coroutine_fn qcow_co_block_status(BlockDriverState *bs,
bool want_zero,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
static int coroutine_fn GRAPH_RDLOCK
qcow_co_block_status(BlockDriverState *bs, bool want_zero,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
BDRVQcowState *s = bs->opaque;
int index_in_cluster, ret;
int64_t n;
uint64_t cluster_offset;
assume_graph_lock(); /* FIXME */
qemu_co_mutex_lock(&s->lock);
ret = get_cluster_offset(bs, offset, 0, 0, 0, 0, &cluster_offset);
qemu_co_mutex_unlock(&s->lock);

View File

@ -1217,11 +1217,10 @@ static void quorum_child_perm(BlockDriverState *bs, BdrvChild *c,
* return BDRV_BLOCK_ZERO if *all* children agree that a certain
* region contains zeroes, and BDRV_BLOCK_DATA otherwise.
*/
static int coroutine_fn quorum_co_block_status(BlockDriverState *bs,
bool want_zero,
int64_t offset, int64_t count,
int64_t *pnum, int64_t *map,
BlockDriverState **file)
static int coroutine_fn GRAPH_RDLOCK
quorum_co_block_status(BlockDriverState *bs, bool want_zero,
int64_t offset, int64_t count,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
BDRVQuorumState *s = bs->opaque;
int i, ret;

View File

@ -161,21 +161,25 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
copy = false;
ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
if (ret == 1) {
/* Allocated in the top, no need to copy. */
} else if (ret >= 0) {
/* Copy if allocated in the intermediate images. Limit to the
* known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE). */
ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
s->base_overlay, true,
offset, n, &n);
/* Finish early if end of backing file has been reached */
if (ret == 0 && n == 0) {
n = len - offset;
}
WITH_GRAPH_RDLOCK_GUARD() {
ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
if (ret == 1) {
/* Allocated in the top, no need to copy. */
} else if (ret >= 0) {
/*
* Copy if allocated in the intermediate images. Limit to the
* known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE).
*/
ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
s->base_overlay, true,
offset, n, &n);
/* Finish early if end of backing file has been reached */
if (ret == 0 && n == 0) {
n = len - offset;
}
copy = (ret > 0);
copy = (ret > 0);
}
}
trace_stream_one_iteration(s, offset, n, ret);
if (copy) {

View File

@ -36,9 +36,9 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
void block_copy_state_free(BlockCopyState *s);
void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes);
int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset,
int64_t *count);
int64_t coroutine_fn GRAPH_RDLOCK
block_copy_reset_unallocated(BlockCopyState *s, int64_t offset, int64_t *count);
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
bool ignore_ratelimit, uint64_t timeout_ns,

View File

@ -109,24 +109,24 @@ int bdrv_block_status(BlockDriverState *bs, int64_t offset,
int64_t bytes, int64_t *pnum, int64_t *map,
BlockDriverState **file);
int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map,
BlockDriverState **file);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file);
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file);
int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
int64_t bytes, int64_t *pnum);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum);
int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum);
int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
BlockDriverState *base,
bool include_base, int64_t offset,
int64_t bytes, int64_t *pnum);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
bool include_base, int64_t offset, int64_t bytes,
int64_t *pnum);
int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
bool include_base, int64_t offset, int64_t bytes,
int64_t *pnum);

View File

@ -606,7 +606,8 @@ struct BlockDriver {
* *pnum value for the block-status cache on protocol nodes, prior
* to clamping *pnum for return to its caller.
*/
int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs,
int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_block_status)(
BlockDriverState *bs,
bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file);

View File

@ -1991,7 +1991,9 @@ static void coroutine_fn convert_co_do_copy(void *opaque)
qemu_co_mutex_unlock(&s->lock);
break;
}
n = convert_iteration_sectors(s, s->sector_num);
WITH_GRAPH_RDLOCK_GUARD() {
n = convert_iteration_sectors(s, s->sector_num);
}
if (n < 0) {
qemu_co_mutex_unlock(&s->lock);
s->ret = n;

View File

@ -312,7 +312,8 @@ static void test_sync_op_blk_truncate(BlockBackend *blk)
g_assert_cmpint(ret, ==, -EINVAL);
}
static void test_sync_op_block_status(BdrvChild *c)
/* Disable TSA to make bdrv_test.bdrv_co_block_status writable */
static void TSA_NO_TSA test_sync_op_block_status(BdrvChild *c)
{
int ret;
int64_t n;