backup: Switch BackupBlockJob to byte-based

We are gradually converting to byte-based interfaces, as they are
easier to reason about than sector-based.  Continue by converting an
internal structure (no semantic change), and all references to
tracking progress.  Drop a redundant local variable bytes_per_cluster.

Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Eric Blake 2017-07-07 07:44:53 -05:00 committed by Kevin Wolf
parent e8a81e9cad
commit cf79cdf662
1 changed files with 15 additions and 18 deletions

View File

@ -39,7 +39,7 @@ typedef struct BackupBlockJob {
BlockdevOnError on_source_error;
BlockdevOnError on_target_error;
CoRwlock flush_rwlock;
uint64_t sectors_read;
uint64_t bytes_read;
unsigned long *done_bitmap;
int64_t cluster_size;
bool compress;
@ -102,16 +102,15 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
void *bounce_buffer = NULL;
int ret = 0;
int64_t sectors_per_cluster = cluster_size_sectors(job);
int64_t bytes_per_cluster = sectors_per_cluster * BDRV_SECTOR_SIZE;
int64_t start, end;
int n;
int64_t start, end; /* clusters */
int n; /* bytes */
qemu_co_rwlock_rdlock(&job->flush_rwlock);
start = sector_num / sectors_per_cluster;
end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
trace_backup_do_cow_enter(job, start * bytes_per_cluster,
trace_backup_do_cow_enter(job, start * job->cluster_size,
sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE);
@ -120,28 +119,27 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
for (; start < end; start++) {
if (test_bit(start, job->done_bitmap)) {
trace_backup_do_cow_skip(job, start * bytes_per_cluster);
trace_backup_do_cow_skip(job, start * job->cluster_size);
continue; /* already copied */
}
trace_backup_do_cow_process(job, start * bytes_per_cluster);
trace_backup_do_cow_process(job, start * job->cluster_size);
n = MIN(sectors_per_cluster,
job->common.len / BDRV_SECTOR_SIZE -
start * sectors_per_cluster);
n = MIN(job->cluster_size,
job->common.len - start * job->cluster_size);
if (!bounce_buffer) {
bounce_buffer = blk_blockalign(blk, job->cluster_size);
}
iov.iov_base = bounce_buffer;
iov.iov_len = n * BDRV_SECTOR_SIZE;
iov.iov_len = n;
qemu_iovec_init_external(&bounce_qiov, &iov, 1);
ret = blk_co_preadv(blk, start * job->cluster_size,
bounce_qiov.size, &bounce_qiov,
is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
if (ret < 0) {
trace_backup_do_cow_read_fail(job, start * bytes_per_cluster, ret);
trace_backup_do_cow_read_fail(job, start * job->cluster_size, ret);
if (error_is_read) {
*error_is_read = true;
}
@ -157,7 +155,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
}
if (ret < 0) {
trace_backup_do_cow_write_fail(job, start * bytes_per_cluster, ret);
trace_backup_do_cow_write_fail(job, start * job->cluster_size, ret);
if (error_is_read) {
*error_is_read = false;
}
@ -169,8 +167,8 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
/* Publish progress, guest I/O counts as progress too. Note that the
* offset field is an opaque progress value, it is not a disk offset.
*/
job->sectors_read += n;
job->common.offset += n * BDRV_SECTOR_SIZE;
job->bytes_read += n;
job->common.offset += n;
}
out:
@ -363,9 +361,8 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
*/
if (job->common.speed) {
uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
job->sectors_read *
BDRV_SECTOR_SIZE);
job->sectors_read = 0;
job->bytes_read);
job->bytes_read = 0;
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
} else {
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);