block: move block_copy from block/backup.c to separate file

Split block_copy to separate file, to be cleanly shared with backup-top
filter driver in further commits.

It's a clean movement, the only change is drop "static" from interface
functions.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Message-id: 20190920142056.12778-8-vsementsov@virtuozzo.com
Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
Vladimir Sementsov-Ogievskiy 2019-09-20 17:20:48 +03:00 committed by Max Reitz
parent 0e23e382b7
commit beb5f5450d
5 changed files with 413 additions and 354 deletions

View File

@ -37,6 +37,7 @@ block-obj-y += write-threshold.o
block-obj-y += backup.o
block-obj-$(CONFIG_REPLICATION) += replication.o
block-obj-y += throttle.o copy-on-read.o
block-obj-y += block-copy.o
block-obj-y += crypto.o

View File

@ -18,6 +18,7 @@
#include "block/block_int.h"
#include "block/blockjob_int.h"
#include "block/block_backup.h"
#include "block/block-copy.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/ratelimit.h"
@ -35,47 +36,6 @@ typedef struct CowRequest {
CoQueue wait_queue; /* coroutines blocked on this request */
} CowRequest;
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
typedef void (*ProgressResetCallbackFunc)(void *opaque);
typedef struct BlockCopyState {
BlockBackend *source;
BlockBackend *target;
BdrvDirtyBitmap *copy_bitmap;
int64_t cluster_size;
bool use_copy_range;
int64_t copy_range_size;
uint64_t len;
BdrvRequestFlags write_flags;
/*
* skip_unallocated:
*
* Used by sync=top jobs, which first scan the source node for unallocated
* areas and clear them in the copy_bitmap. During this process, the bitmap
* is thus not fully initialized: It may still have bits set for areas that
* are unallocated and should actually not be copied.
*
* This is indicated by skip_unallocated.
*
* In this case, block_copy() will query the sources allocation status,
* skip unallocated regions, clear them in the copy_bitmap, and invoke
* block_copy_reset_unallocated() every time it does.
*/
bool skip_unallocated;
/* progress_bytes_callback: called when some copying progress is done. */
ProgressBytesCallbackFunc progress_bytes_callback;
/*
* progress_reset_callback: called when some bytes reset from copy_bitmap
* (see @skip_unallocated above). The callee is assumed to recalculate how
* many bytes remain based on the dirty bit count of copy_bitmap.
*/
ProgressResetCallbackFunc progress_reset_callback;
void *progress_opaque;
} BlockCopyState;
typedef struct BackupBlockJob {
BlockJob common;
BlockDriverState *source_bs;
@ -135,319 +95,6 @@ static void cow_request_end(CowRequest *req)
qemu_co_queue_restart_all(&req->wait_queue);
}
static void block_copy_state_free(BlockCopyState *s)
{
if (!s) {
return;
}
bdrv_release_dirty_bitmap(blk_bs(s->source), s->copy_bitmap);
blk_unref(s->source);
blk_unref(s->target);
g_free(s);
}
static BlockCopyState *block_copy_state_new(
BlockDriverState *source, BlockDriverState *target,
int64_t cluster_size, BdrvRequestFlags write_flags,
ProgressBytesCallbackFunc progress_bytes_callback,
ProgressResetCallbackFunc progress_reset_callback,
void *progress_opaque, Error **errp)
{
BlockCopyState *s;
int ret;
uint64_t no_resize = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD;
BdrvDirtyBitmap *copy_bitmap;
copy_bitmap = bdrv_create_dirty_bitmap(source, cluster_size, NULL, errp);
if (!copy_bitmap) {
return NULL;
}
bdrv_disable_dirty_bitmap(copy_bitmap);
s = g_new(BlockCopyState, 1);
*s = (BlockCopyState) {
.source = blk_new(bdrv_get_aio_context(source),
BLK_PERM_CONSISTENT_READ, no_resize),
.target = blk_new(bdrv_get_aio_context(target),
BLK_PERM_WRITE, no_resize),
.copy_bitmap = copy_bitmap,
.cluster_size = cluster_size,
.len = bdrv_dirty_bitmap_size(copy_bitmap),
.write_flags = write_flags,
.progress_bytes_callback = progress_bytes_callback,
.progress_reset_callback = progress_reset_callback,
.progress_opaque = progress_opaque,
};
s->copy_range_size = QEMU_ALIGN_DOWN(MIN(blk_get_max_transfer(s->source),
blk_get_max_transfer(s->target)),
s->cluster_size);
/*
* Set use_copy_range, consider the following:
* 1. Compression is not supported for copy_range.
* 2. copy_range does not respect max_transfer (it's a TODO), so we factor
* that in here. If max_transfer is smaller than the job->cluster_size,
* we do not use copy_range (in that case it's zero after aligning down
* above).
*/
s->use_copy_range =
!(write_flags & BDRV_REQ_WRITE_COMPRESSED) && s->copy_range_size > 0;
/*
* We just allow aio context change on our block backends. block_copy() user
* (now it's only backup) is responsible for source and target being in same
* aio context.
*/
blk_set_disable_request_queuing(s->source, true);
blk_set_allow_aio_context_change(s->source, true);
blk_set_disable_request_queuing(s->target, true);
blk_set_allow_aio_context_change(s->target, true);
ret = blk_insert_bs(s->source, source, errp);
if (ret < 0) {
goto fail;
}
ret = blk_insert_bs(s->target, target, errp);
if (ret < 0) {
goto fail;
}
return s;
fail:
block_copy_state_free(s);
return NULL;
}
/*
* Copy range to target with a bounce buffer and return the bytes copied. If
* error occurred, return a negative error number
*/
static int coroutine_fn block_copy_with_bounce_buffer(BlockCopyState *s,
int64_t start,
int64_t end,
bool is_write_notifier,
bool *error_is_read,
void **bounce_buffer)
{
int ret;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
nbytes = MIN(s->cluster_size, s->len - start);
if (!*bounce_buffer) {
*bounce_buffer = blk_blockalign(s->source, s->cluster_size);
}
ret = blk_co_pread(s->source, start, nbytes, *bounce_buffer, read_flags);
if (ret < 0) {
trace_block_copy_with_bounce_buffer_read_fail(s, start, ret);
if (error_is_read) {
*error_is_read = true;
}
goto fail;
}
ret = blk_co_pwrite(s->target, start, nbytes, *bounce_buffer,
s->write_flags);
if (ret < 0) {
trace_block_copy_with_bounce_buffer_write_fail(s, start, ret);
if (error_is_read) {
*error_is_read = false;
}
goto fail;
}
return nbytes;
fail:
bdrv_set_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
return ret;
}
/*
* Copy range to target and return the bytes copied. If error occurred, return a
* negative error number.
*/
static int coroutine_fn block_copy_with_offload(BlockCopyState *s,
int64_t start,
int64_t end,
bool is_write_notifier)
{
int ret;
int nr_clusters;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(s->copy_range_size, s->cluster_size));
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
nbytes = MIN(s->copy_range_size, MIN(end, s->len) - start);
nr_clusters = DIV_ROUND_UP(nbytes, s->cluster_size);
bdrv_reset_dirty_bitmap(s->copy_bitmap, start,
s->cluster_size * nr_clusters);
ret = blk_co_copy_range(s->source, start, s->target, start, nbytes,
read_flags, s->write_flags);
if (ret < 0) {
trace_block_copy_with_offload_fail(s, start, ret);
bdrv_set_dirty_bitmap(s->copy_bitmap, start,
s->cluster_size * nr_clusters);
return ret;
}
return nbytes;
}
/*
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
*/
static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
int64_t *pnum)
{
BlockDriverState *bs = blk_bs(s->source);
int64_t count, total_count = 0;
int64_t bytes = s->len - offset;
int ret;
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) {
ret = bdrv_is_allocated(bs, offset, bytes, &count);
if (ret < 0) {
return ret;
}
total_count += count;
if (ret || count == 0) {
/*
* ret: partial segment(s) are considered allocated.
* otherwise: unallocated tail is treated as an entire segment.
*/
*pnum = DIV_ROUND_UP(total_count, s->cluster_size);
return ret;
}
/* Unallocated segment(s) with uncertain following segment(s) */
if (total_count >= s->cluster_size) {
*pnum = total_count / s->cluster_size;
return 0;
}
offset += count;
bytes -= count;
}
}
/*
* Reset bits in copy_bitmap starting at offset if they represent unallocated
* data in the image. May reset subsequent contiguous bits.
* @return 0 when the cluster at @offset was unallocated,
* 1 otherwise, and -ret on error.
*/
static int64_t block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset, int64_t *count)
{
int ret;
int64_t clusters, bytes;
ret = block_copy_is_cluster_allocated(s, offset, &clusters);
if (ret < 0) {
return ret;
}
bytes = clusters * s->cluster_size;
if (!ret) {
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
s->progress_reset_callback(s->progress_opaque);
}
*count = bytes;
return ret;
}
static int coroutine_fn block_copy(BlockCopyState *s,
int64_t start, uint64_t bytes,
bool *error_is_read,
bool is_write_notifier)
{
int ret = 0;
int64_t end = bytes + start; /* bytes */
void *bounce_buffer = NULL;
int64_t status_bytes;
/*
* block_copy() user is responsible for keeping source and target in same
* aio context
*/
assert(blk_get_aio_context(s->source) == blk_get_aio_context(s->target));
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
while (start < end) {
int64_t dirty_end;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
trace_block_copy_skip(s, start);
start += s->cluster_size;
continue; /* already copied */
}
dirty_end = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
(end - start));
if (dirty_end < 0) {
dirty_end = end;
}
if (s->skip_unallocated) {
ret = block_copy_reset_unallocated(s, start, &status_bytes);
if (ret == 0) {
trace_block_copy_skip_range(s, start, status_bytes);
start += status_bytes;
continue;
}
/* Clamp to known allocated region */
dirty_end = MIN(dirty_end, start + status_bytes);
}
trace_block_copy_process(s, start);
if (s->use_copy_range) {
ret = block_copy_with_offload(s, start, dirty_end,
is_write_notifier);
if (ret < 0) {
s->use_copy_range = false;
}
}
if (!s->use_copy_range) {
ret = block_copy_with_bounce_buffer(s, start, dirty_end,
is_write_notifier,
error_is_read, &bounce_buffer);
}
if (ret < 0) {
break;
}
start += ret;
s->progress_bytes_callback(ret, s->progress_opaque);
ret = 0;
}
if (bounce_buffer) {
qemu_vfree(bounce_buffer);
}
return ret;
}
static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
{
BackupBlockJob *s = opaque;

333
block/block-copy.c Normal file
View File

@ -0,0 +1,333 @@
/*
* block_copy API
*
* Copyright (C) 2013 Proxmox Server Solutions
* Copyright (c) 2019 Virtuozzo International GmbH.
*
* Authors:
* Dietmar Maurer (dietmar@proxmox.com)
* Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "trace.h"
#include "qapi/error.h"
#include "block/block-copy.h"
#include "sysemu/block-backend.h"
void block_copy_state_free(BlockCopyState *s)
{
if (!s) {
return;
}
bdrv_release_dirty_bitmap(blk_bs(s->source), s->copy_bitmap);
blk_unref(s->source);
blk_unref(s->target);
g_free(s);
}
BlockCopyState *block_copy_state_new(
BlockDriverState *source, BlockDriverState *target,
int64_t cluster_size, BdrvRequestFlags write_flags,
ProgressBytesCallbackFunc progress_bytes_callback,
ProgressResetCallbackFunc progress_reset_callback,
void *progress_opaque, Error **errp)
{
BlockCopyState *s;
int ret;
uint64_t no_resize = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD;
BdrvDirtyBitmap *copy_bitmap;
copy_bitmap = bdrv_create_dirty_bitmap(source, cluster_size, NULL, errp);
if (!copy_bitmap) {
return NULL;
}
bdrv_disable_dirty_bitmap(copy_bitmap);
s = g_new(BlockCopyState, 1);
*s = (BlockCopyState) {
.source = blk_new(bdrv_get_aio_context(source),
BLK_PERM_CONSISTENT_READ, no_resize),
.target = blk_new(bdrv_get_aio_context(target),
BLK_PERM_WRITE, no_resize),
.copy_bitmap = copy_bitmap,
.cluster_size = cluster_size,
.len = bdrv_dirty_bitmap_size(copy_bitmap),
.write_flags = write_flags,
.progress_bytes_callback = progress_bytes_callback,
.progress_reset_callback = progress_reset_callback,
.progress_opaque = progress_opaque,
};
s->copy_range_size = QEMU_ALIGN_DOWN(MIN(blk_get_max_transfer(s->source),
blk_get_max_transfer(s->target)),
s->cluster_size);
/*
* Set use_copy_range, consider the following:
* 1. Compression is not supported for copy_range.
* 2. copy_range does not respect max_transfer (it's a TODO), so we factor
* that in here. If max_transfer is smaller than the job->cluster_size,
* we do not use copy_range (in that case it's zero after aligning down
* above).
*/
s->use_copy_range =
!(write_flags & BDRV_REQ_WRITE_COMPRESSED) && s->copy_range_size > 0;
/*
* We just allow aio context change on our block backends. block_copy() user
* (now it's only backup) is responsible for source and target being in same
* aio context.
*/
blk_set_disable_request_queuing(s->source, true);
blk_set_allow_aio_context_change(s->source, true);
blk_set_disable_request_queuing(s->target, true);
blk_set_allow_aio_context_change(s->target, true);
ret = blk_insert_bs(s->source, source, errp);
if (ret < 0) {
goto fail;
}
ret = blk_insert_bs(s->target, target, errp);
if (ret < 0) {
goto fail;
}
return s;
fail:
block_copy_state_free(s);
return NULL;
}
/*
* Copy range to target with a bounce buffer and return the bytes copied. If
* error occurred, return a negative error number
*/
static int coroutine_fn block_copy_with_bounce_buffer(BlockCopyState *s,
int64_t start,
int64_t end,
bool is_write_notifier,
bool *error_is_read,
void **bounce_buffer)
{
int ret;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
bdrv_reset_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
nbytes = MIN(s->cluster_size, s->len - start);
if (!*bounce_buffer) {
*bounce_buffer = blk_blockalign(s->source, s->cluster_size);
}
ret = blk_co_pread(s->source, start, nbytes, *bounce_buffer, read_flags);
if (ret < 0) {
trace_block_copy_with_bounce_buffer_read_fail(s, start, ret);
if (error_is_read) {
*error_is_read = true;
}
goto fail;
}
ret = blk_co_pwrite(s->target, start, nbytes, *bounce_buffer,
s->write_flags);
if (ret < 0) {
trace_block_copy_with_bounce_buffer_write_fail(s, start, ret);
if (error_is_read) {
*error_is_read = false;
}
goto fail;
}
return nbytes;
fail:
bdrv_set_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
return ret;
}
/*
* Copy range to target and return the bytes copied. If error occurred, return a
* negative error number.
*/
static int coroutine_fn block_copy_with_offload(BlockCopyState *s,
int64_t start,
int64_t end,
bool is_write_notifier)
{
int ret;
int nr_clusters;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(s->copy_range_size, s->cluster_size));
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
nbytes = MIN(s->copy_range_size, MIN(end, s->len) - start);
nr_clusters = DIV_ROUND_UP(nbytes, s->cluster_size);
bdrv_reset_dirty_bitmap(s->copy_bitmap, start,
s->cluster_size * nr_clusters);
ret = blk_co_copy_range(s->source, start, s->target, start, nbytes,
read_flags, s->write_flags);
if (ret < 0) {
trace_block_copy_with_offload_fail(s, start, ret);
bdrv_set_dirty_bitmap(s->copy_bitmap, start,
s->cluster_size * nr_clusters);
return ret;
}
return nbytes;
}
/*
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
*/
static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
int64_t *pnum)
{
BlockDriverState *bs = blk_bs(s->source);
int64_t count, total_count = 0;
int64_t bytes = s->len - offset;
int ret;
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) {
ret = bdrv_is_allocated(bs, offset, bytes, &count);
if (ret < 0) {
return ret;
}
total_count += count;
if (ret || count == 0) {
/*
* ret: partial segment(s) are considered allocated.
* otherwise: unallocated tail is treated as an entire segment.
*/
*pnum = DIV_ROUND_UP(total_count, s->cluster_size);
return ret;
}
/* Unallocated segment(s) with uncertain following segment(s) */
if (total_count >= s->cluster_size) {
*pnum = total_count / s->cluster_size;
return 0;
}
offset += count;
bytes -= count;
}
}
/*
* Reset bits in copy_bitmap starting at offset if they represent unallocated
* data in the image. May reset subsequent contiguous bits.
* @return 0 when the cluster at @offset was unallocated,
* 1 otherwise, and -ret on error.
*/
int64_t block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset, int64_t *count)
{
int ret;
int64_t clusters, bytes;
ret = block_copy_is_cluster_allocated(s, offset, &clusters);
if (ret < 0) {
return ret;
}
bytes = clusters * s->cluster_size;
if (!ret) {
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
s->progress_reset_callback(s->progress_opaque);
}
*count = bytes;
return ret;
}
int coroutine_fn block_copy(BlockCopyState *s,
int64_t start, uint64_t bytes,
bool *error_is_read,
bool is_write_notifier)
{
int ret = 0;
int64_t end = bytes + start; /* bytes */
void *bounce_buffer = NULL;
int64_t status_bytes;
/*
* block_copy() user is responsible for keeping source and target in same
* aio context
*/
assert(blk_get_aio_context(s->source) == blk_get_aio_context(s->target));
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
while (start < end) {
int64_t dirty_end;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
trace_block_copy_skip(s, start);
start += s->cluster_size;
continue; /* already copied */
}
dirty_end = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
(end - start));
if (dirty_end < 0) {
dirty_end = end;
}
if (s->skip_unallocated) {
ret = block_copy_reset_unallocated(s, start, &status_bytes);
if (ret == 0) {
trace_block_copy_skip_range(s, start, status_bytes);
start += status_bytes;
continue;
}
/* Clamp to known allocated region */
dirty_end = MIN(dirty_end, start + status_bytes);
}
trace_block_copy_process(s, start);
if (s->use_copy_range) {
ret = block_copy_with_offload(s, start, dirty_end,
is_write_notifier);
if (ret < 0) {
s->use_copy_range = false;
}
}
if (!s->use_copy_range) {
ret = block_copy_with_bounce_buffer(s, start, dirty_end,
is_write_notifier,
error_is_read, &bounce_buffer);
}
if (ret < 0) {
break;
}
start += ret;
s->progress_bytes_callback(ret, s->progress_opaque);
ret = 0;
}
if (bounce_buffer) {
qemu_vfree(bounce_buffer);
}
return ret;
}

View File

@ -40,6 +40,8 @@ mirror_yield_in_flight(void *s, int64_t offset, int in_flight) "s %p offset %" P
# backup.c
backup_do_cow_enter(void *job, int64_t start, int64_t offset, uint64_t bytes) "job %p start %" PRId64 " offset %" PRId64 " bytes %" PRIu64
backup_do_cow_return(void *job, int64_t offset, uint64_t bytes, int ret) "job %p offset %" PRId64 " bytes %" PRIu64 " ret %d"
# block-copy.c
block_copy_skip(void *bcs, int64_t start) "bcs %p start %"PRId64
block_copy_skip_range(void *bcs, int64_t start, uint64_t bytes) "bcs %p start %"PRId64" bytes %"PRId64
block_copy_process(void *bcs, int64_t start) "bcs %p start %"PRId64

View File

@ -0,0 +1,76 @@
/*
* block_copy API
*
* Copyright (C) 2013 Proxmox Server Solutions
* Copyright (c) 2019 Virtuozzo International GmbH.
*
* Authors:
* Dietmar Maurer (dietmar@proxmox.com)
* Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef BLOCK_COPY_H
#define BLOCK_COPY_H
#include "block/block.h"
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
typedef void (*ProgressResetCallbackFunc)(void *opaque);
typedef struct BlockCopyState {
BlockBackend *source;
BlockBackend *target;
BdrvDirtyBitmap *copy_bitmap;
int64_t cluster_size;
bool use_copy_range;
int64_t copy_range_size;
uint64_t len;
BdrvRequestFlags write_flags;
/*
* skip_unallocated:
*
* Used by sync=top jobs, which first scan the source node for unallocated
* areas and clear them in the copy_bitmap. During this process, the bitmap
* is thus not fully initialized: It may still have bits set for areas that
* are unallocated and should actually not be copied.
*
* This is indicated by skip_unallocated.
*
* In this case, block_copy() will query the sources allocation status,
* skip unallocated regions, clear them in the copy_bitmap, and invoke
* block_copy_reset_unallocated() every time it does.
*/
bool skip_unallocated;
/* progress_bytes_callback: called when some copying progress is done. */
ProgressBytesCallbackFunc progress_bytes_callback;
/*
* progress_reset_callback: called when some bytes reset from copy_bitmap
* (see @skip_unallocated above). The callee is assumed to recalculate how
* many bytes remain based on the dirty bit count of copy_bitmap.
*/
ProgressResetCallbackFunc progress_reset_callback;
void *progress_opaque;
} BlockCopyState;
BlockCopyState *block_copy_state_new(
BlockDriverState *source, BlockDriverState *target,
int64_t cluster_size, BdrvRequestFlags write_flags,
ProgressBytesCallbackFunc progress_bytes_callback,
ProgressResetCallbackFunc progress_reset_callback,
void *progress_opaque, Error **errp);
void block_copy_state_free(BlockCopyState *s);
int64_t block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset, int64_t *count);
int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes,
bool *error_is_read, bool is_write_notifier);
#endif /* BLOCK_COPY_H */