backup: Make sure that source and target size match

Since the introduction of a backup filter node in commit 00e30f05d, the
backup block job crashes when the target image is smaller than the
source image because it will try to write after the end of the target
node without having BLK_PERM_RESIZE. (Previously, the BlockBackend layer
would have caught this and errored out gracefully.)

We can fix this and even do better than the old behaviour: Check that
source and target have the same image size at the start of the block job
and unshare BLK_PERM_RESIZE. (This permission was already unshared
before the same commit 00e30f05d, but the BlockBackend that was used to
make the restriction was removed without a replacement.) This will
immediately error out when starting the job instead of only when writing
to a block that doesn't exist in the target.

Longer target than source would technically work because we would never
write to blocks that don't exist, but semantically these are invalid,
too, because a backup is supposed to create a copy, not just an image
that starts with a copy.

Fixes: 00e30f05de
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1778593
Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20200430142755.315494-4-kwolf@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Kevin Wolf 2020-04-30 16:27:54 +02:00
parent 58226634c4
commit 958a04bd32
2 changed files with 22 additions and 6 deletions

View File

@ -148,8 +148,10 @@ static void backup_top_child_perm(BlockDriverState *bs, BdrvChild *c,
*
* Share write to target (child_file), to not interfere
* with guest writes to its disk which may be in target backing chain.
* Can't resize during a backup block job because we check the size
* only upfront.
*/
*nshared = BLK_PERM_ALL;
*nshared = BLK_PERM_ALL & ~BLK_PERM_RESIZE;
*nperm = BLK_PERM_WRITE;
} else {
/* Source child */
@ -159,7 +161,7 @@ static void backup_top_child_perm(BlockDriverState *bs, BdrvChild *c,
if (perm & BLK_PERM_WRITE) {
*nperm = *nperm | BLK_PERM_CONSISTENT_READ;
}
*nshared &= ~BLK_PERM_WRITE;
*nshared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE);
}
}
@ -192,11 +194,13 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source,
{
Error *local_err = NULL;
BDRVBackupTopState *state;
BlockDriverState *top = bdrv_new_open_driver(&bdrv_backup_top_filter,
filter_node_name,
BDRV_O_RDWR, errp);
BlockDriverState *top;
bool appended = false;
assert(source->total_sectors == target->total_sectors);
top = bdrv_new_open_driver(&bdrv_backup_top_filter, filter_node_name,
BDRV_O_RDWR, errp);
if (!top) {
return NULL;
}

View File

@ -340,7 +340,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockCompletionFunc *cb, void *opaque,
JobTxn *txn, Error **errp)
{
int64_t len;
int64_t len, target_len;
BackupBlockJob *job = NULL;
int64_t cluster_size;
BdrvRequestFlags write_flags;
@ -405,6 +405,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
goto error;
}
target_len = bdrv_getlength(target);
if (target_len < 0) {
error_setg_errno(errp, -target_len, "Unable to get length for '%s'",
bdrv_get_device_or_node_name(bs));
goto error;
}
if (target_len != len) {
error_setg(errp, "Source and target image have different sizes");
goto error;
}
cluster_size = backup_calculate_cluster_size(target, errp);
if (cluster_size < 0) {
goto error;