diff --git a/block/block-copy.c b/block/block-copy.c index fa27450b14..82cf945693 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -51,6 +51,7 @@ typedef struct BlockCopyCallState { int ret; bool finished; QemuCoSleepState *sleep_state; + bool cancelled; /* OUT parameters */ bool error_is_read; @@ -594,7 +595,7 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state) assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); - while (bytes && aio_task_pool_status(aio) == 0) { + while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) { BlockCopyTask *task; int64_t status_bytes; @@ -707,7 +708,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) do { ret = block_copy_dirty_clusters(call_state); - if (ret == 0) { + if (ret == 0 && !call_state->cancelled) { ret = block_copy_wait_one(call_state->s, call_state->offset, call_state->bytes); } @@ -721,7 +722,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) * 2. We have waited for some intersecting block-copy request * It may have failed and produced new dirty bits. */ - } while (ret > 0); + } while (ret > 0 && !call_state->cancelled); call_state->finished = true; @@ -801,12 +802,19 @@ bool block_copy_call_finished(BlockCopyCallState *call_state) bool block_copy_call_succeeded(BlockCopyCallState *call_state) { - return call_state->finished && call_state->ret == 0; + return call_state->finished && !call_state->cancelled && + call_state->ret == 0; } bool block_copy_call_failed(BlockCopyCallState *call_state) { - return call_state->finished && call_state->ret < 0; + return call_state->finished && !call_state->cancelled && + call_state->ret < 0; +} + +bool block_copy_call_cancelled(BlockCopyCallState *call_state) +{ + return call_state->cancelled; } int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) @@ -818,6 +826,12 @@ int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) return call_state->ret; } +void block_copy_call_cancel(BlockCopyCallState *call_state) +{ + call_state->cancelled = true; + block_copy_kick(call_state); +} + BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s) { return s->copy_bitmap; diff --git a/include/block/block-copy.h b/include/block/block-copy.h index b5a53ad59e..7821850f88 100644 --- a/include/block/block-copy.h +++ b/include/block/block-copy.h @@ -74,11 +74,24 @@ void block_copy_call_free(BlockCopyCallState *call_state); bool block_copy_call_finished(BlockCopyCallState *call_state); bool block_copy_call_succeeded(BlockCopyCallState *call_state); bool block_copy_call_failed(BlockCopyCallState *call_state); +bool block_copy_call_cancelled(BlockCopyCallState *call_state); int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read); void block_copy_set_speed(BlockCopyState *s, uint64_t speed); void block_copy_kick(BlockCopyCallState *call_state); +/* + * Cancel running block-copy call. + * + * Cancel leaves block-copy state valid: dirty bits are correct and you may use + * cancel + to emulate pause/resume. + * + * Note also, that the cancel is async: it only marks block-copy call to be + * cancelled. So, the call may be cancelled (block_copy_call_cancelled() reports + * true) but not yet finished (block_copy_call_finished() reports false). + */ +void block_copy_call_cancel(BlockCopyCallState *call_state); + BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s); void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip);