From c334e897d08eea1f5a3a95f6a2208afe6757c103 Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Mon, 29 Apr 2019 12:08:41 +0300 Subject: [PATCH] block/backup: unify different modes code path Do full, top and incremental mode copying all in one place. This unifies the code path and helps further improvements. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Max Reitz Message-id: 20190429090842.57910-5-vsementsov@virtuozzo.com Signed-off-by: Max Reitz --- block/backup.c | 43 ++++++++++--------------------------------- 1 file changed, 10 insertions(+), 33 deletions(-) diff --git a/block/backup.c b/block/backup.c index 78f1b79354..5b3fc9d123 100644 --- a/block/backup.c +++ b/block/backup.c @@ -384,15 +384,23 @@ static bool bdrv_is_unallocated_range(BlockDriverState *bs, return offset >= end; } -static int coroutine_fn backup_run_incremental(BackupBlockJob *job) +static int coroutine_fn backup_loop(BackupBlockJob *job) { int ret; bool error_is_read; int64_t offset; HBitmapIter hbi; + BlockDriverState *bs = blk_bs(job->common.blk); hbitmap_iter_init(&hbi, job->copy_bitmap, 0); while ((offset = hbitmap_iter_next(&hbi)) != -1) { + if (job->sync_mode == MIRROR_SYNC_MODE_TOP && + bdrv_is_unallocated_range(bs, offset, job->cluster_size)) + { + hbitmap_reset(job->copy_bitmap, offset, job->cluster_size); + continue; + } + do { if (yield_and_check(job)) { return 0; @@ -437,7 +445,6 @@ static int coroutine_fn backup_run(Job *job, Error **errp) { BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); BlockDriverState *bs = blk_bs(s->common.blk); - int64_t offset; int ret = 0; QLIST_INIT(&s->inflight_reqs); @@ -462,38 +469,8 @@ static int coroutine_fn backup_run(Job *job, Error **errp) * notify callback service CoW requests. */ job_yield(job); } - } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { - ret = backup_run_incremental(s); } else { - /* Both FULL and TOP SYNC_MODE's require copying.. */ - for (offset = 0; offset < s->len; - offset += s->cluster_size) { - bool error_is_read; - - if (yield_and_check(s)) { - break; - } - - if (s->sync_mode == MIRROR_SYNC_MODE_TOP && - bdrv_is_unallocated_range(bs, offset, s->cluster_size)) - { - continue; - } - - ret = backup_do_cow(s, offset, s->cluster_size, - &error_is_read, false); - if (ret < 0) { - /* Depending on error action, fail now or retry cluster */ - BlockErrorAction action = - backup_error_action(s, error_is_read, -ret); - if (action == BLOCK_ERROR_ACTION_REPORT) { - break; - } else { - offset -= s->cluster_size; - continue; - } - } - } + ret = backup_loop(s); } notifier_with_return_remove(&s->before_write);