backup: Switch backup_run() to byte-based

We are gradually converting to byte-based interfaces, as they are
easier to reason about than sector-based.  Change the internal
loop iteration of backups to track by bytes instead of sectors
(although we are still guaranteed that we iterate by steps that
are cluster-aligned).

Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Eric Blake 2017-07-07 07:44:56 -05:00 committed by Kevin Wolf
parent 03f5d60bbf
commit 6f8e35e241
1 changed files with 15 additions and 17 deletions

View File

@ -370,11 +370,10 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
int ret = 0; int ret = 0;
int clusters_per_iter; int clusters_per_iter;
uint32_t granularity; uint32_t granularity;
int64_t sector; int64_t offset;
int64_t cluster; int64_t cluster;
int64_t end; int64_t end;
int64_t last_cluster = -1; int64_t last_cluster = -1;
int64_t sectors_per_cluster = cluster_size_sectors(job);
BdrvDirtyBitmapIter *dbi; BdrvDirtyBitmapIter *dbi;
granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
@ -382,8 +381,8 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
dbi = bdrv_dirty_iter_new(job->sync_bitmap, 0); dbi = bdrv_dirty_iter_new(job->sync_bitmap, 0);
/* Find the next dirty sector(s) */ /* Find the next dirty sector(s) */
while ((sector = bdrv_dirty_iter_next(dbi)) != -1) { while ((offset = bdrv_dirty_iter_next(dbi) * BDRV_SECTOR_SIZE) >= 0) {
cluster = sector / sectors_per_cluster; cluster = offset / job->cluster_size;
/* Fake progress updates for any clusters we skipped */ /* Fake progress updates for any clusters we skipped */
if (cluster != last_cluster + 1) { if (cluster != last_cluster + 1) {
@ -410,7 +409,8 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
/* If the bitmap granularity is smaller than the backup granularity, /* If the bitmap granularity is smaller than the backup granularity,
* we need to advance the iterator pointer to the next cluster. */ * we need to advance the iterator pointer to the next cluster. */
if (granularity < job->cluster_size) { if (granularity < job->cluster_size) {
bdrv_set_dirty_iter(dbi, cluster * sectors_per_cluster); bdrv_set_dirty_iter(dbi,
cluster * job->cluster_size / BDRV_SECTOR_SIZE);
} }
last_cluster = cluster - 1; last_cluster = cluster - 1;
@ -432,17 +432,15 @@ static void coroutine_fn backup_run(void *opaque)
BackupBlockJob *job = opaque; BackupBlockJob *job = opaque;
BackupCompleteData *data; BackupCompleteData *data;
BlockDriverState *bs = blk_bs(job->common.blk); BlockDriverState *bs = blk_bs(job->common.blk);
int64_t start, end; int64_t offset;
int64_t sectors_per_cluster = cluster_size_sectors(job); int64_t sectors_per_cluster = cluster_size_sectors(job);
int ret = 0; int ret = 0;
QLIST_INIT(&job->inflight_reqs); QLIST_INIT(&job->inflight_reqs);
qemu_co_rwlock_init(&job->flush_rwlock); qemu_co_rwlock_init(&job->flush_rwlock);
start = 0; job->done_bitmap = bitmap_new(DIV_ROUND_UP(job->common.len,
end = DIV_ROUND_UP(job->common.len, job->cluster_size); job->cluster_size));
job->done_bitmap = bitmap_new(end);
job->before_write.notify = backup_before_write_notify; job->before_write.notify = backup_before_write_notify;
bdrv_add_before_write_notifier(bs, &job->before_write); bdrv_add_before_write_notifier(bs, &job->before_write);
@ -457,7 +455,8 @@ static void coroutine_fn backup_run(void *opaque)
ret = backup_run_incremental(job); ret = backup_run_incremental(job);
} else { } else {
/* Both FULL and TOP SYNC_MODE's require copying.. */ /* Both FULL and TOP SYNC_MODE's require copying.. */
for (; start < end; start++) { for (offset = 0; offset < job->common.len;
offset += job->cluster_size) {
bool error_is_read; bool error_is_read;
int alloced = 0; int alloced = 0;
@ -480,8 +479,8 @@ static void coroutine_fn backup_run(void *opaque)
* needed but at some point that is always the case. */ * needed but at some point that is always the case. */
alloced = alloced =
bdrv_is_allocated(bs, bdrv_is_allocated(bs,
start * sectors_per_cluster + i, (offset >> BDRV_SECTOR_BITS) + i,
sectors_per_cluster - i, &n); sectors_per_cluster - i, &n);
i += n; i += n;
if (alloced || n == 0) { if (alloced || n == 0) {
@ -499,9 +498,8 @@ static void coroutine_fn backup_run(void *opaque)
if (alloced < 0) { if (alloced < 0) {
ret = alloced; ret = alloced;
} else { } else {
ret = backup_do_cow(job, start * job->cluster_size, ret = backup_do_cow(job, offset, job->cluster_size,
job->cluster_size, &error_is_read, &error_is_read, false);
false);
} }
if (ret < 0) { if (ret < 0) {
/* Depending on error action, fail now or retry cluster */ /* Depending on error action, fail now or retry cluster */
@ -510,7 +508,7 @@ static void coroutine_fn backup_run(void *opaque)
if (action == BLOCK_ERROR_ACTION_REPORT) { if (action == BLOCK_ERROR_ACTION_REPORT) {
break; break;
} else { } else {
start--; offset -= job->cluster_size;
continue; continue;
} }
} }