Pull request

Rebase notes:
 
 011/36:[0003] [FC] 'block/backup: upgrade copy_bitmap to BdrvDirtyBitmap'
 016/36:[----] [-C] 'iotests: Add virtio-scsi device helper'
 017/36:[0002] [FC] 'iotests: add test 257 for bitmap-mode backups'
 030/36:[0011] [FC] 'block/backup: teach TOP to never copy unallocated regions'
 032/36:[0018] [FC] 'iotests/257: test traditional sync modes'
 
 11: A new hbitmap call was added late in 4.1, changed to
     bdrv_dirty_bitmap_next_zero.
 16: Context-only (self.has_quit is new context in 040)
 17: Removed 'auto' to follow upstream trends in iotest fashion
 30: Handled explicitly on-list with R-B from Max.
 32: Fix capitalization in test, as mentioned on-list.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+ber27ys35W+dsvQfe+BBqr8OQ4FAl1XOE0ACgkQfe+BBqr8
 OQ5vZw//RyNt7SDRyB2X1eQnD1LGNunH7ZgucCNccFf0QlbfEYAQWCobylcLw5jk
 WH3GAbo41YgN5yhqTYKc17bXGTdmrQjE5INmpL1X1Eq85HqOGSxK94lQ3EBodbAS
 ktAriK2b9xW/DZ4OvP6pQ5U6PkQZk/GyzvcgteFjqbR6gGCpGGPr97nRjfn7aNzn
 7UuEl30CrdEZyVEkxIxTWnRWOSGXv3zPupkTzPb4PQ3nOEHX3iq+rmEkp9mE46dW
 VJucWC+SXGfI5+Lpsofa7+oJ2Cqa6spFRYk/3gLmahvm5bDT0CYG5XKCTSFkFedO
 lsibS/LkdecHDsTYlU5eIIhcD5l7hhPd+A4dlSQyNMqqHY90wPsaTgAy9czURjIO
 H37DJfAld1z8AE5ekXisdTIOypa7pmnS+4WyB73ZynJrCyxNRjI2heNavnrkPDMG
 IZQT64wGmaaleQZdOo30rAqp8Lsh7XTag0RRijlw8OdngYQyMzyp94uU0hm4grMu
 6UJD0w81jxacyRZ/rfUT4BLKwzly4vfS1LlVoNaQk5fgkuJeKbfYJnVgL7ZNfzvR
 +DTg1NHOudHkgUaW+LtV2F74D5k1xPYSpT+1WVDP6n8fzXEMyQPLb6RZcWhAazka
 8vAiRSLygoXdDzhQHqvSIqweuZPXOrpRjU1Iiva3r8wcxjKGj7M=
 =4WRP
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging

Pull request

Rebase notes:

011/36:[0003] [FC] 'block/backup: upgrade copy_bitmap to BdrvDirtyBitmap'
016/36:[----] [-C] 'iotests: Add virtio-scsi device helper'
017/36:[0002] [FC] 'iotests: add test 257 for bitmap-mode backups'
030/36:[0011] [FC] 'block/backup: teach TOP to never copy unallocated regions'
032/36:[0018] [FC] 'iotests/257: test traditional sync modes'

11: A new hbitmap call was added late in 4.1, changed to
    bdrv_dirty_bitmap_next_zero.
16: Context-only (self.has_quit is new context in 040)
17: Removed 'auto' to follow upstream trends in iotest fashion
30: Handled explicitly on-list with R-B from Max.
32: Fix capitalization in test, as mentioned on-list.

# gpg: Signature made Sat 17 Aug 2019 00:12:13 BST
# gpg:                using RSA key F9B7ABDBBCACDF95BE76CBD07DEF8106AAFC390E
# gpg: Good signature from "John Snow (John Huston) <jsnow@redhat.com>" [full]
# Primary key fingerprint: FAEB 9711 A12C F475 812F  18F2 88A9 064D 1835 61EB
#      Subkey fingerprint: F9B7 ABDB BCAC DF95 BE76  CBD0 7DEF 8106 AAFC 390E

* remotes/jnsnow/tags/bitmaps-pull-request: (36 commits)
  tests/test-hbitmap: test next_zero and _next_dirty_area after truncate
  block/backup: refactor write_flags
  block/backup: deal with zero detection
  qapi: add dirty-bitmaps to query-named-block-nodes result
  iotests/257: test traditional sync modes
  block/backup: support bitmap sync modes for non-bitmap backups
  block/backup: teach TOP to never copy unallocated regions
  block/backup: add backup_is_cluster_allocated
  block/backup: centralize copy_bitmap initialization
  block/backup: improve sync=bitmap work estimates
  iotests/257: test API failures
  block/backup: hoist bitmap check into QMP interface
  iotests/257: Refactor backup helpers
  iotests/257: add EmulatedBitmap class
  iotests/257: add Pattern class
  iotests: test bitmap moving inside 254
  qapi: implement block-dirty-bitmap-remove transaction action
  blockdev: reduce aio_context locked sections in bitmap add/remove
  block/backup: loosen restriction on readonly bitmaps
  iotests: add test 257 for bitmap-mode backups
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-08-19 10:55:03 +01:00
commit 1f37316238
29 changed files with 6878 additions and 440 deletions

View File

@ -5346,7 +5346,7 @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
for (bm = bdrv_dirty_bitmap_next(bs, NULL); bm;
bm = bdrv_dirty_bitmap_next(bs, bm))
{
bdrv_dirty_bitmap_set_migration(bm, false);
bdrv_dirty_bitmap_skip_store(bm, false);
}
ret = refresh_total_sectors(bs, bs->total_sectors);

View File

@ -38,24 +38,26 @@ typedef struct CowRequest {
typedef struct BackupBlockJob {
BlockJob common;
BlockBackend *target;
/* bitmap for sync=incremental */
BdrvDirtyBitmap *sync_bitmap;
BdrvDirtyBitmap *copy_bitmap;
MirrorSyncMode sync_mode;
BitmapSyncMode bitmap_mode;
BlockdevOnError on_source_error;
BlockdevOnError on_target_error;
CoRwlock flush_rwlock;
uint64_t len;
uint64_t bytes_read;
int64_t cluster_size;
bool compress;
NotifierWithReturn before_write;
QLIST_HEAD(, CowRequest) inflight_reqs;
HBitmap *copy_bitmap;
bool use_copy_range;
int64_t copy_range_size;
bool serialize_target_writes;
BdrvRequestFlags write_flags;
bool initializing_bitmap;
} BackupBlockJob;
static const BlockJobDriver backup_job_driver;
@ -110,10 +112,9 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
BlockBackend *blk = job->common.blk;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(start, job->cluster_size));
hbitmap_reset(job->copy_bitmap, start, job->cluster_size);
bdrv_reset_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
nbytes = MIN(job->cluster_size, job->len - start);
if (!*bounce_buffer) {
*bounce_buffer = blk_blockalign(blk, job->cluster_size);
@ -128,14 +129,8 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
goto fail;
}
if (buffer_is_zero(*bounce_buffer, nbytes)) {
ret = blk_co_pwrite_zeroes(job->target, start,
nbytes, write_flags | BDRV_REQ_MAY_UNMAP);
} else {
ret = blk_co_pwrite(job->target, start,
nbytes, *bounce_buffer, write_flags |
(job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
}
ret = blk_co_pwrite(job->target, start, nbytes, *bounce_buffer,
job->write_flags);
if (ret < 0) {
trace_backup_do_cow_write_fail(job, start, ret);
if (error_is_read) {
@ -146,7 +141,7 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
return nbytes;
fail:
hbitmap_set(job->copy_bitmap, start, job->cluster_size);
bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size);
return ret;
}
@ -163,24 +158,96 @@ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
BlockBackend *blk = job->common.blk;
int nbytes;
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
assert(QEMU_IS_ALIGNED(start, job->cluster_size));
nbytes = MIN(job->copy_range_size, end - start);
nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
hbitmap_reset(job->copy_bitmap, start, job->cluster_size * nr_clusters);
bdrv_reset_dirty_bitmap(job->copy_bitmap, start,
job->cluster_size * nr_clusters);
ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
read_flags, write_flags);
read_flags, job->write_flags);
if (ret < 0) {
trace_backup_do_cow_copy_range_fail(job, start, ret);
hbitmap_set(job->copy_bitmap, start, job->cluster_size * nr_clusters);
bdrv_set_dirty_bitmap(job->copy_bitmap, start,
job->cluster_size * nr_clusters);
return ret;
}
return nbytes;
}
/*
* Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation.
*/
static int backup_is_cluster_allocated(BackupBlockJob *s, int64_t offset,
int64_t *pnum)
{
BlockDriverState *bs = blk_bs(s->common.blk);
int64_t count, total_count = 0;
int64_t bytes = s->len - offset;
int ret;
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) {
ret = bdrv_is_allocated(bs, offset, bytes, &count);
if (ret < 0) {
return ret;
}
total_count += count;
if (ret || count == 0) {
/*
* ret: partial segment(s) are considered allocated.
* otherwise: unallocated tail is treated as an entire segment.
*/
*pnum = DIV_ROUND_UP(total_count, s->cluster_size);
return ret;
}
/* Unallocated segment(s) with uncertain following segment(s) */
if (total_count >= s->cluster_size) {
*pnum = total_count / s->cluster_size;
return 0;
}
offset += count;
bytes -= count;
}
}
/**
* Reset bits in copy_bitmap starting at offset if they represent unallocated
* data in the image. May reset subsequent contiguous bits.
* @return 0 when the cluster at @offset was unallocated,
* 1 otherwise, and -ret on error.
*/
static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s,
int64_t offset, int64_t *count)
{
int ret;
int64_t clusters, bytes, estimate;
ret = backup_is_cluster_allocated(s, offset, &clusters);
if (ret < 0) {
return ret;
}
bytes = clusters * s->cluster_size;
if (!ret) {
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
estimate = bdrv_get_dirty_count(s->copy_bitmap);
job_progress_set_remaining(&s->common.job, estimate);
}
*count = bytes;
return ret;
}
static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int64_t offset, uint64_t bytes,
bool *error_is_read,
@ -190,6 +257,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int ret = 0;
int64_t start, end; /* bytes */
void *bounce_buffer = NULL;
int64_t status_bytes;
qemu_co_rwlock_rdlock(&job->flush_rwlock);
@ -204,17 +272,29 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
while (start < end) {
int64_t dirty_end;
if (!hbitmap_get(job->copy_bitmap, start)) {
if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) {
trace_backup_do_cow_skip(job, start);
start += job->cluster_size;
continue; /* already copied */
}
dirty_end = hbitmap_next_zero(job->copy_bitmap, start, (end - start));
dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start,
(end - start));
if (dirty_end < 0) {
dirty_end = end;
}
if (job->initializing_bitmap) {
ret = backup_bitmap_reset_unallocated(job, start, &status_bytes);
if (ret == 0) {
trace_backup_do_cow_skip_range(job, start, status_bytes);
start += status_bytes;
continue;
}
/* Clamp to known allocated region */
dirty_end = MIN(dirty_end, start + status_bytes);
}
trace_backup_do_cow_process(job, start);
if (job->use_copy_range) {
@ -273,15 +353,29 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
{
BdrvDirtyBitmap *bm;
BlockDriverState *bs = blk_bs(job->common.blk);
bool sync = (((ret == 0) || (job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS)) \
&& (job->bitmap_mode != BITMAP_SYNC_MODE_NEVER));
if (ret < 0) {
/* Merge the successor back into the parent, delete nothing. */
bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
assert(bm);
} else {
/* Everything is fine, delete this bitmap and install the backup. */
if (sync) {
/*
* We succeeded, or we always intended to sync the bitmap.
* Delete this bitmap and install the child.
*/
bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
assert(bm);
} else {
/*
* We failed, or we never intended to sync the bitmap anyway.
* Merge the successor back into the parent, keeping all data.
*/
bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
}
assert(bm);
if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
/* If we failed and synced, merge in the bits we didn't copy: */
bdrv_dirty_bitmap_merge_internal(bm, job->copy_bitmap,
NULL, true);
}
}
@ -304,14 +398,16 @@ static void backup_abort(Job *job)
static void backup_clean(Job *job)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BlockDriverState *bs = blk_bs(s->common.blk);
if (s->copy_bitmap) {
bdrv_release_dirty_bitmap(bs, s->copy_bitmap);
s->copy_bitmap = NULL;
}
assert(s->target);
blk_unref(s->target);
s->target = NULL;
if (s->copy_bitmap) {
hbitmap_free(s->copy_bitmap);
s->copy_bitmap = NULL;
}
}
void backup_do_checkpoint(BlockJob *job, Error **errp)
@ -326,7 +422,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
return;
}
hbitmap_set(backup_job->copy_bitmap, 0, backup_job->len);
bdrv_set_dirty_bitmap(backup_job->copy_bitmap, 0, backup_job->len);
}
static void backup_drain(BlockJob *job)
@ -377,77 +473,57 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
return false;
}
static bool bdrv_is_unallocated_range(BlockDriverState *bs,
int64_t offset, int64_t bytes)
{
int64_t end = offset + bytes;
while (offset < end && !bdrv_is_allocated(bs, offset, bytes, &bytes)) {
if (bytes == 0) {
return true;
}
offset += bytes;
bytes = end - offset;
}
return offset >= end;
}
static int coroutine_fn backup_loop(BackupBlockJob *job)
{
int ret;
bool error_is_read;
int64_t offset;
HBitmapIter hbi;
BlockDriverState *bs = blk_bs(job->common.blk);
hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
while ((offset = hbitmap_iter_next(&hbi)) != -1) {
if (job->sync_mode == MIRROR_SYNC_MODE_TOP &&
bdrv_is_unallocated_range(bs, offset, job->cluster_size))
{
hbitmap_reset(job->copy_bitmap, offset, job->cluster_size);
continue;
}
BdrvDirtyBitmapIter *bdbi;
int ret = 0;
bdbi = bdrv_dirty_iter_new(job->copy_bitmap);
while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
do {
if (yield_and_check(job)) {
return 0;
goto out;
}
ret = backup_do_cow(job, offset,
job->cluster_size, &error_is_read, false);
if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
BLOCK_ERROR_ACTION_REPORT)
{
return ret;
goto out;
}
} while (ret < 0);
}
return 0;
out:
bdrv_dirty_iter_free(bdbi);
return ret;
}
/* init copy_bitmap from sync_bitmap */
static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
static void backup_init_copy_bitmap(BackupBlockJob *job)
{
uint64_t offset = 0;
uint64_t bytes = job->len;
bool ret;
uint64_t estimate;
while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
&offset, &bytes))
{
hbitmap_set(job->copy_bitmap, offset, bytes);
offset += bytes;
if (offset >= job->len) {
break;
if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
ret = bdrv_dirty_bitmap_merge_internal(job->copy_bitmap,
job->sync_bitmap,
NULL, true);
assert(ret);
} else {
if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
/*
* We can't hog the coroutine to initialize this thoroughly.
* Set a flag and resume work when we are able to yield safely.
*/
job->initializing_bitmap = true;
}
bytes = job->len - offset;
bdrv_set_dirty_bitmap(job->copy_bitmap, 0, job->len);
}
/* TODO job_progress_set_remaining() would make more sense */
job_progress_update(&job->common.job,
job->len - hbitmap_count(job->copy_bitmap));
estimate = bdrv_get_dirty_count(job->copy_bitmap);
job_progress_set_remaining(&job->common.job, estimate);
}
static int coroutine_fn backup_run(Job *job, Error **errp)
@ -459,17 +535,31 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
QLIST_INIT(&s->inflight_reqs);
qemu_co_rwlock_init(&s->flush_rwlock);
job_progress_set_remaining(job, s->len);
if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
backup_incremental_init_copy_bitmap(s);
} else {
hbitmap_set(s->copy_bitmap, 0, s->len);
}
backup_init_copy_bitmap(s);
s->before_write.notify = backup_before_write_notify;
bdrv_add_before_write_notifier(bs, &s->before_write);
if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
int64_t offset = 0;
int64_t count;
for (offset = 0; offset < s->len; ) {
if (yield_and_check(s)) {
ret = -ECANCELED;
goto out;
}
ret = backup_bitmap_reset_unallocated(s, offset, &count);
if (ret < 0) {
goto out;
}
offset += count;
}
s->initializing_bitmap = false;
}
if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
/* All bits are set in copy_bitmap to allow any cluster to be copied.
* This does not actually require them to be copied. */
@ -482,6 +572,7 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
ret = backup_loop(s);
}
out:
notifier_with_return_remove(&s->before_write);
/* wait until pending backup_do_cow() calls have completed */
@ -545,6 +636,7 @@ static int64_t backup_calculate_cluster_size(BlockDriverState *target,
BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockDriverState *target, int64_t speed,
MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
BitmapSyncMode bitmap_mode,
bool compress,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
@ -556,11 +648,15 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BackupBlockJob *job = NULL;
int ret;
int64_t cluster_size;
HBitmap *copy_bitmap = NULL;
BdrvDirtyBitmap *copy_bitmap = NULL;
assert(bs);
assert(target);
/* QMP interface protects us from these cases */
assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);
assert(sync_bitmap || sync_mode != MIRROR_SYNC_MODE_BITMAP);
if (bs == target) {
error_setg(errp, "Source and target cannot be the same");
return NULL;
@ -592,10 +688,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
return NULL;
}
if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
if (!sync_bitmap) {
error_setg(errp, "must provide a valid bitmap name for "
"\"incremental\" sync mode");
if (sync_bitmap) {
/* If we need to write to this bitmap, check that we can: */
if (bitmap_mode != BITMAP_SYNC_MODE_NEVER &&
bdrv_dirty_bitmap_check(sync_bitmap, BDRV_BITMAP_DEFAULT, errp)) {
return NULL;
}
@ -603,12 +699,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
return NULL;
}
} else if (sync_bitmap) {
error_setg(errp,
"a sync_bitmap was provided to backup_run, "
"but received an incompatible sync_mode (%s)",
MirrorSyncMode_str(sync_mode));
return NULL;
}
len = bdrv_getlength(bs);
@ -623,7 +713,11 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
goto error;
}
copy_bitmap = hbitmap_alloc(len, ctz32(cluster_size));
copy_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp);
if (!copy_bitmap) {
goto error;
}
bdrv_disable_dirty_bitmap(copy_bitmap);
/* job->len is fixed, so we can't allow resize */
job = block_job_create(job_id, &backup_job_driver, txn, bs,
@ -649,12 +743,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
job->on_source_error = on_source_error;
job->on_target_error = on_target_error;
job->sync_mode = sync_mode;
job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
sync_bitmap : NULL;
job->compress = compress;
job->sync_bitmap = sync_bitmap;
job->bitmap_mode = bitmap_mode;
/*
* Set write flags:
* 1. Detect image-fleecing (and similar) schemes
* 2. Handle compression
*/
job->write_flags =
(bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
(compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
/* Detect image-fleecing (and similar) schemes */
job->serialize_target_writes = bdrv_chain_contains(target, bs);
job->cluster_size = cluster_size;
job->copy_bitmap = copy_bitmap;
copy_bitmap = NULL;
@ -675,7 +775,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
error:
if (copy_bitmap) {
assert(!job || !job->copy_bitmap);
hbitmap_free(copy_bitmap);
bdrv_release_dirty_bitmap(bs, copy_bitmap);
}
if (sync_bitmap) {
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);

View File

@ -48,10 +48,9 @@ struct BdrvDirtyBitmap {
bool inconsistent; /* bitmap is persistent, but inconsistent.
It cannot be used at all in any way, except
a QMP user can remove it. */
bool migration; /* Bitmap is selected for migration, it should
not be stored on the next inactivation
(persistent flag doesn't matter until next
invalidation).*/
bool skip_store; /* We are either migrating or deleting this
* bitmap; it should not be stored on the next
* inactivation. */
QLIST_ENTRY(BdrvDirtyBitmap) list;
};
@ -509,14 +508,19 @@ BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs)
}
/* Called within bdrv_dirty_bitmap_lock..unlock */
bool bdrv_get_dirty_locked(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
int64_t offset)
bool bdrv_dirty_bitmap_get_locked(BdrvDirtyBitmap *bitmap, int64_t offset)
{
if (bitmap) {
return hbitmap_get(bitmap->bitmap, offset);
} else {
return false;
}
return hbitmap_get(bitmap->bitmap, offset);
}
bool bdrv_dirty_bitmap_get(BdrvDirtyBitmap *bitmap, int64_t offset)
{
bool ret;
bdrv_dirty_bitmap_lock(bitmap);
ret = bdrv_dirty_bitmap_get_locked(bitmap, offset);
bdrv_dirty_bitmap_unlock(bitmap);
return ret;
}
/**
@ -757,16 +761,16 @@ void bdrv_dirty_bitmap_set_inconsistent(BdrvDirtyBitmap *bitmap)
}
/* Called with BQL taken. */
void bdrv_dirty_bitmap_set_migration(BdrvDirtyBitmap *bitmap, bool migration)
void bdrv_dirty_bitmap_skip_store(BdrvDirtyBitmap *bitmap, bool skip)
{
qemu_mutex_lock(bitmap->mutex);
bitmap->migration = migration;
bitmap->skip_store = skip;
qemu_mutex_unlock(bitmap->mutex);
}
bool bdrv_dirty_bitmap_get_persistence(BdrvDirtyBitmap *bitmap)
{
return bitmap->persistent && !bitmap->migration;
return bitmap->persistent && !bitmap->skip_store;
}
bool bdrv_dirty_bitmap_inconsistent(const BdrvDirtyBitmap *bitmap)
@ -778,7 +782,7 @@ bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs)
{
BdrvDirtyBitmap *bm;
QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
if (bm->persistent && !bm->readonly && !bm->migration) {
if (bm->persistent && !bm->readonly && !bm->skip_store) {
return true;
}
}
@ -810,6 +814,12 @@ bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap,
return hbitmap_next_dirty_area(bitmap->bitmap, offset, bytes);
}
/**
* bdrv_merge_dirty_bitmap: merge src into dest.
* Ensures permissions on bitmaps are reasonable; use for public API.
*
* @backup: If provided, make a copy of dest here prior to merge.
*/
void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
HBitmap **backup, Error **errp)
{
@ -833,13 +843,7 @@ void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
goto out;
}
if (backup) {
*backup = dest->bitmap;
dest->bitmap = hbitmap_alloc(dest->size, hbitmap_granularity(*backup));
ret = hbitmap_merge(*backup, src->bitmap, dest->bitmap);
} else {
ret = hbitmap_merge(dest->bitmap, src->bitmap, dest->bitmap);
}
ret = bdrv_dirty_bitmap_merge_internal(dest, src, backup, false);
assert(ret);
out:
@ -848,3 +852,47 @@ out:
qemu_mutex_unlock(src->mutex);
}
}
/**
* bdrv_dirty_bitmap_merge_internal: merge src into dest.
* Does NOT check bitmap permissions; not suitable for use as public API.
*
* @backup: If provided, make a copy of dest here prior to merge.
* @lock: If true, lock and unlock bitmaps on the way in/out.
* returns true if the merge succeeded; false if unattempted.
*/
bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
const BdrvDirtyBitmap *src,
HBitmap **backup,
bool lock)
{
bool ret;
assert(!bdrv_dirty_bitmap_readonly(dest));
assert(!bdrv_dirty_bitmap_inconsistent(dest));
assert(!bdrv_dirty_bitmap_inconsistent(src));
if (lock) {
qemu_mutex_lock(dest->mutex);
if (src->mutex != dest->mutex) {
qemu_mutex_lock(src->mutex);
}
}
if (backup) {
*backup = dest->bitmap;
dest->bitmap = hbitmap_alloc(dest->size, hbitmap_granularity(*backup));
ret = hbitmap_merge(*backup, src->bitmap, dest->bitmap);
} else {
ret = hbitmap_merge(dest->bitmap, src->bitmap, dest->bitmap);
}
if (lock) {
qemu_mutex_unlock(dest->mutex);
if (src->mutex != dest->mutex) {
qemu_mutex_unlock(src->mutex);
}
}
return ret;
}

View File

@ -476,7 +476,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
int64_t next_offset = offset + nb_chunks * s->granularity;
int64_t next_chunk = next_offset / s->granularity;
if (next_offset >= s->bdev_length ||
!bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) {
!bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
break;
}
if (test_bit(next_chunk, s->in_flight_bitmap)) {
@ -1755,8 +1755,10 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
bool is_none_mode;
BlockDriverState *base;
if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
error_setg(errp, "Sync mode 'incremental' not supported");
if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
(mode == MIRROR_SYNC_MODE_BITMAP)) {
error_setg(errp, "Sync mode '%s' not supported",
MirrorSyncMode_str(mode));
return;
}
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;

View File

@ -79,6 +79,11 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
info->backing_file = g_strdup(bs->backing_file);
}
if (!QLIST_EMPTY(&bs->dirty_bitmaps)) {
info->has_dirty_bitmaps = true;
info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs);
}
info->detect_zeroes = bs->detect_zeroes;
if (blk && blk_get_public(blk)->throttle_group_member.throttle_state) {

View File

@ -543,7 +543,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
s->backup_job = backup_job_create(
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
0, MIRROR_SYNC_MODE_NONE, NULL, false,
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false,
BLOCKDEV_ON_ERROR_REPORT,
BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL,
backup_job_completed, bs, NULL, &local_err);

View File

@ -41,6 +41,7 @@ mirror_yield_in_flight(void *s, int64_t offset, int in_flight) "s %p offset %" P
backup_do_cow_enter(void *job, int64_t start, int64_t offset, uint64_t bytes) "job %p start %" PRId64 " offset %" PRId64 " bytes %" PRIu64
backup_do_cow_return(void *job, int64_t offset, uint64_t bytes, int ret) "job %p offset %" PRId64 " bytes %" PRIu64 " ret %d"
backup_do_cow_skip(void *job, int64_t start) "job %p start %"PRId64
backup_do_cow_skip_range(void *job, int64_t start, uint64_t bytes) "job %p start %"PRId64" bytes %"PRId64
backup_do_cow_process(void *job, int64_t start) "job %p start %"PRId64
backup_do_cow_read_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"
backup_do_cow_write_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d"

View File

@ -2136,6 +2136,51 @@ static void block_dirty_bitmap_merge_prepare(BlkActionState *common,
errp);
}
static BdrvDirtyBitmap *do_block_dirty_bitmap_remove(
const char *node, const char *name, bool release,
BlockDriverState **bitmap_bs, Error **errp);
static void block_dirty_bitmap_remove_prepare(BlkActionState *common,
Error **errp)
{
BlockDirtyBitmap *action;
BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
common, common);
if (action_check_completion_mode(common, errp) < 0) {
return;
}
action = common->action->u.block_dirty_bitmap_remove.data;
state->bitmap = do_block_dirty_bitmap_remove(action->node, action->name,
false, &state->bs, errp);
if (state->bitmap) {
bdrv_dirty_bitmap_skip_store(state->bitmap, true);
bdrv_dirty_bitmap_set_busy(state->bitmap, true);
}
}
static void block_dirty_bitmap_remove_abort(BlkActionState *common)
{
BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
common, common);
if (state->bitmap) {
bdrv_dirty_bitmap_skip_store(state->bitmap, false);
bdrv_dirty_bitmap_set_busy(state->bitmap, false);
}
}
static void block_dirty_bitmap_remove_commit(BlkActionState *common)
{
BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
common, common);
bdrv_dirty_bitmap_set_busy(state->bitmap, false);
bdrv_release_dirty_bitmap(state->bs, state->bitmap);
}
static void abort_prepare(BlkActionState *common, Error **errp)
{
error_setg(errp, "Transaction aborted using Abort action");
@ -2213,6 +2258,12 @@ static const BlkActionOps actions[] = {
.commit = block_dirty_bitmap_free_backup,
.abort = block_dirty_bitmap_restore,
},
[TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_REMOVE] = {
.instance_size = sizeof(BlockDirtyBitmapState),
.prepare = block_dirty_bitmap_remove_prepare,
.commit = block_dirty_bitmap_remove_commit,
.abort = block_dirty_bitmap_remove_abort,
},
/* Where are transactions for MIRROR, COMMIT and STREAM?
* Although these blockjobs use transaction callbacks like the backup job,
* these jobs do not necessarily adhere to transaction semantics.
@ -2813,7 +2864,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
{
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
AioContext *aio_context = NULL;
if (!name || name[0] == '\0') {
error_setg(errp, "Bitmap name cannot be empty");
@ -2849,16 +2899,20 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
}
if (persistent) {
aio_context = bdrv_get_aio_context(bs);
AioContext *aio_context = bdrv_get_aio_context(bs);
bool ok;
aio_context_acquire(aio_context);
if (!bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp)) {
goto out;
ok = bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp);
aio_context_release(aio_context);
if (!ok) {
return;
}
}
bitmap = bdrv_create_dirty_bitmap(bs, granularity, name, errp);
if (bitmap == NULL) {
goto out;
return;
}
if (disabled) {
@ -2866,45 +2920,54 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name,
}
bdrv_dirty_bitmap_set_persistence(bitmap, persistent);
out:
if (aio_context) {
aio_context_release(aio_context);
}
static BdrvDirtyBitmap *do_block_dirty_bitmap_remove(
const char *node, const char *name, bool release,
BlockDriverState **bitmap_bs, Error **errp)
{
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
if (!bitmap || !bs) {
return NULL;
}
if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO,
errp)) {
return NULL;
}
if (bdrv_dirty_bitmap_get_persistence(bitmap)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
Error *local_err = NULL;
aio_context_acquire(aio_context);
bdrv_remove_persistent_dirty_bitmap(bs, name, &local_err);
aio_context_release(aio_context);
if (local_err != NULL) {
error_propagate(errp, local_err);
return NULL;
}
}
if (release) {
bdrv_release_dirty_bitmap(bs, bitmap);
}
if (bitmap_bs) {
*bitmap_bs = bs;
}
return release ? NULL : bitmap;
}
void qmp_block_dirty_bitmap_remove(const char *node, const char *name,
Error **errp)
{
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
Error *local_err = NULL;
AioContext *aio_context = NULL;
bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
if (!bitmap || !bs) {
return;
}
if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO,
errp)) {
return;
}
if (bdrv_dirty_bitmap_get_persistence(bitmap)) {
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
bdrv_remove_persistent_dirty_bitmap(bs, name, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
goto out;
}
}
bdrv_release_dirty_bitmap(bs, bitmap);
out:
if (aio_context) {
aio_context_release(aio_context);
}
do_block_dirty_bitmap_remove(node, name, true, NULL, errp);
}
/**
@ -3427,20 +3490,16 @@ out:
aio_context_release(aio_context);
}
static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
Error **errp)
/* Common QMP interface for drive-backup and blockdev-backup */
static BlockJob *do_backup_common(BackupCommon *backup,
BlockDriverState *bs,
BlockDriverState *target_bs,
AioContext *aio_context,
JobTxn *txn, Error **errp)
{
BlockDriverState *bs;
BlockDriverState *target_bs;
BlockDriverState *source = NULL;
BlockJob *job = NULL;
BdrvDirtyBitmap *bmap = NULL;
AioContext *aio_context;
QDict *options = NULL;
Error *local_err = NULL;
int flags, job_flags = JOB_DEFAULT;
int64_t size;
bool set_backing_hd = false;
int job_flags = JOB_DEFAULT;
int ret;
if (!backup->has_speed) {
@ -3452,9 +3511,6 @@ static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
if (!backup->has_on_target_error) {
backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT;
}
if (!backup->has_mode) {
backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
}
if (!backup->has_job_id) {
backup->job_id = NULL;
}
@ -3468,6 +3524,107 @@ static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
backup->compress = false;
}
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
if (ret < 0) {
return NULL;
}
if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
(backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) {
/* done before desugaring 'incremental' to print the right message */
if (!backup->has_bitmap) {
error_setg(errp, "must provide a valid bitmap name for "
"'%s' sync mode", MirrorSyncMode_str(backup->sync));
return NULL;
}
}
if (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL) {
if (backup->has_bitmap_mode &&
backup->bitmap_mode != BITMAP_SYNC_MODE_ON_SUCCESS) {
error_setg(errp, "Bitmap sync mode must be '%s' "
"when using sync mode '%s'",
BitmapSyncMode_str(BITMAP_SYNC_MODE_ON_SUCCESS),
MirrorSyncMode_str(backup->sync));
return NULL;
}
backup->has_bitmap_mode = true;
backup->sync = MIRROR_SYNC_MODE_BITMAP;
backup->bitmap_mode = BITMAP_SYNC_MODE_ON_SUCCESS;
}
if (backup->has_bitmap) {
bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap);
if (!bmap) {
error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap);
return NULL;
}
if (!backup->has_bitmap_mode) {
error_setg(errp, "Bitmap sync mode must be given "
"when providing a bitmap");
return NULL;
}
if (bdrv_dirty_bitmap_check(bmap, BDRV_BITMAP_ALLOW_RO, errp)) {
return NULL;
}
/* This does not produce a useful bitmap artifact: */
if (backup->sync == MIRROR_SYNC_MODE_NONE) {
error_setg(errp, "sync mode '%s' does not produce meaningful bitmap"
" outputs", MirrorSyncMode_str(backup->sync));
return NULL;
}
/* If the bitmap isn't used for input or output, this is useless: */
if (backup->bitmap_mode == BITMAP_SYNC_MODE_NEVER &&
backup->sync != MIRROR_SYNC_MODE_BITMAP) {
error_setg(errp, "Bitmap sync mode '%s' has no meaningful effect"
" when combined with sync mode '%s'",
BitmapSyncMode_str(backup->bitmap_mode),
MirrorSyncMode_str(backup->sync));
return NULL;
}
}
if (!backup->has_bitmap && backup->has_bitmap_mode) {
error_setg(errp, "Cannot specify bitmap sync mode without a bitmap");
return NULL;
}
if (!backup->auto_finalize) {
job_flags |= JOB_MANUAL_FINALIZE;
}
if (!backup->auto_dismiss) {
job_flags |= JOB_MANUAL_DISMISS;
}
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
backup->sync, bmap, backup->bitmap_mode,
backup->compress,
backup->on_source_error,
backup->on_target_error,
job_flags, NULL, NULL, txn, errp);
return job;
}
static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
Error **errp)
{
BlockDriverState *bs;
BlockDriverState *target_bs;
BlockDriverState *source = NULL;
BlockJob *job = NULL;
AioContext *aio_context;
QDict *options;
Error *local_err = NULL;
int flags;
int64_t size;
bool set_backing_hd = false;
if (!backup->has_mode) {
backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
}
bs = bdrv_lookup_bs(backup->device, backup->device, errp);
if (!bs) {
return NULL;
@ -3531,10 +3688,10 @@ static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
goto out;
}
options = qdict_new();
qdict_put_str(options, "discard", "unmap");
qdict_put_str(options, "detect-zeroes", "unmap");
if (backup->format) {
if (!options) {
options = qdict_new();
}
qdict_put_str(options, "driver", backup->format);
}
@ -3543,12 +3700,6 @@ static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
goto out;
}
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
if (ret < 0) {
bdrv_unref(target_bs);
goto out;
}
if (set_backing_hd) {
bdrv_set_backing_hd(target_bs, source, &local_err);
if (local_err) {
@ -3556,31 +3707,8 @@ static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
}
}
if (backup->has_bitmap) {
bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap);
if (!bmap) {
error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap);
goto unref;
}
if (bdrv_dirty_bitmap_check(bmap, BDRV_BITMAP_DEFAULT, errp)) {
goto unref;
}
}
if (!backup->auto_finalize) {
job_flags |= JOB_MANUAL_FINALIZE;
}
if (!backup->auto_dismiss) {
job_flags |= JOB_MANUAL_DISMISS;
}
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
backup->sync, bmap, backup->compress,
backup->on_source_error, backup->on_target_error,
job_flags, NULL, NULL, txn, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
goto unref;
}
job = do_backup_common(qapi_DriveBackup_base(backup),
bs, target_bs, aio_context, txn, errp);
unref:
bdrv_unref(target_bs);
@ -3614,78 +3742,25 @@ BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn,
{
BlockDriverState *bs;
BlockDriverState *target_bs;
Error *local_err = NULL;
BdrvDirtyBitmap *bmap = NULL;
AioContext *aio_context;
BlockJob *job = NULL;
int job_flags = JOB_DEFAULT;
int ret;
if (!backup->has_speed) {
backup->speed = 0;
}
if (!backup->has_on_source_error) {
backup->on_source_error = BLOCKDEV_ON_ERROR_REPORT;
}
if (!backup->has_on_target_error) {
backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT;
}
if (!backup->has_job_id) {
backup->job_id = NULL;
}
if (!backup->has_auto_finalize) {
backup->auto_finalize = true;
}
if (!backup->has_auto_dismiss) {
backup->auto_dismiss = true;
}
if (!backup->has_compress) {
backup->compress = false;
}
BlockJob *job;
bs = bdrv_lookup_bs(backup->device, backup->device, errp);
if (!bs) {
return NULL;
}
target_bs = bdrv_lookup_bs(backup->target, backup->target, errp);
if (!target_bs) {
return NULL;
}
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
target_bs = bdrv_lookup_bs(backup->target, backup->target, errp);
if (!target_bs) {
goto out;
}
job = do_backup_common(qapi_BlockdevBackup_base(backup),
bs, target_bs, aio_context, txn, errp);
ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
if (ret < 0) {
goto out;
}
if (backup->has_bitmap) {
bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap);
if (!bmap) {
error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap);
goto out;
}
if (bdrv_dirty_bitmap_check(bmap, BDRV_BITMAP_DEFAULT, errp)) {
goto out;
}
}
if (!backup->auto_finalize) {
job_flags |= JOB_MANUAL_FINALIZE;
}
if (!backup->auto_dismiss) {
job_flags |= JOB_MANUAL_DISMISS;
}
job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
backup->sync, bmap, backup->compress,
backup->on_source_error, backup->on_target_error,
job_flags, NULL, NULL, txn, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
}
out:
aio_context_release(aio_context);
return job;
}

View File

@ -1147,7 +1147,8 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
* @target: Block device to write to.
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
* @sync_mode: What parts of the disk image should be copied to the destination.
* @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
* @sync_bitmap: The dirty bitmap if sync_mode is 'bitmap' or 'incremental'
* @bitmap_mode: The bitmap synchronization policy to use.
* @on_source_error: The action to take upon error reading from the source.
* @on_target_error: The action to take upon error writing to the target.
* @creation_flags: Flags that control the behavior of the Job lifetime.
@ -1163,6 +1164,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockDriverState *target, int64_t speed,
MirrorSyncMode sync_mode,
BdrvDirtyBitmap *sync_bitmap,
BitmapSyncMode bitmap_mode,
bool compress,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
@ -1251,6 +1253,9 @@ void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup);
bool bdrv_dirty_bitmap_merge_internal(BdrvDirtyBitmap *dest,
const BdrvDirtyBitmap *src,
HBitmap **backup, bool lock);
void bdrv_inc_in_flight(BlockDriverState *bs);
void bdrv_dec_in_flight(BlockDriverState *bs);

View File

@ -83,13 +83,13 @@ void bdrv_dirty_bitmap_set_inconsistent(BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_set_busy(BdrvDirtyBitmap *bitmap, bool busy);
void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
HBitmap **backup, Error **errp);
void bdrv_dirty_bitmap_set_migration(BdrvDirtyBitmap *bitmap, bool migration);
void bdrv_dirty_bitmap_skip_store(BdrvDirtyBitmap *bitmap, bool skip);
bool bdrv_dirty_bitmap_get(BdrvDirtyBitmap *bitmap, int64_t offset);
/* Functions that require manual locking. */
void bdrv_dirty_bitmap_lock(BdrvDirtyBitmap *bitmap);
void bdrv_dirty_bitmap_unlock(BdrvDirtyBitmap *bitmap);
bool bdrv_get_dirty_locked(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
int64_t offset);
bool bdrv_dirty_bitmap_get_locked(BdrvDirtyBitmap *bitmap, int64_t offset);
void bdrv_set_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,
int64_t offset, int64_t bytes);
void bdrv_reset_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap,

View File

@ -326,7 +326,7 @@ static int init_dirty_bitmap_migration(void)
/* unset migration flags here, to not roll back it */
QSIMPLEQ_FOREACH(dbms, &dirty_bitmap_mig_state.dbms_list, entry) {
bdrv_dirty_bitmap_set_migration(dbms->bitmap, true);
bdrv_dirty_bitmap_skip_store(dbms->bitmap, true);
}
if (QSIMPLEQ_EMPTY(&dirty_bitmap_mig_state.dbms_list)) {

View File

@ -521,7 +521,6 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
int is_async)
{
BlkMigBlock *blk;
BlockDriverState *bs = blk_bs(bmds->blk);
int64_t total_sectors = bmds->total_sectors;
int64_t sector;
int nr_sectors;
@ -536,8 +535,8 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
blk_mig_unlock();
}
bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
if (bdrv_get_dirty_locked(bs, bmds->dirty_bitmap,
sector * BDRV_SECTOR_SIZE)) {
if (bdrv_dirty_bitmap_get_locked(bmds->dirty_bitmap,
sector * BDRV_SECTOR_SIZE)) {
if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
nr_sectors = total_sectors - sector;
} else {

View File

@ -2004,7 +2004,7 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset,
bdrv_dirty_bitmap_lock(bitmap);
it = bdrv_dirty_iter_new(bitmap);
dirty = bdrv_get_dirty_locked(NULL, bitmap, offset);
dirty = bdrv_dirty_bitmap_get_locked(bitmap, offset);
assert(begin < overall_end && nb_extents);
while (begin < overall_end && i < nb_extents) {

View File

@ -360,6 +360,9 @@
# @write_threshold: configured write threshold for the device.
# 0 if disabled. (Since 2.3)
#
# @dirty-bitmaps: dirty bitmaps information (only present if node
# has one or more dirty bitmaps) (Since 4.2)
#
# Since: 0.14.0
#
##
@ -378,7 +381,7 @@
'*bps_wr_max_length': 'int', '*iops_max_length': 'int',
'*iops_rd_max_length': 'int', '*iops_wr_max_length': 'int',
'*iops_size': 'int', '*group': 'str', 'cache': 'BlockdevCacheInfo',
'write_threshold': 'int' } }
'write_threshold': 'int', '*dirty-bitmaps': ['BlockDirtyInfo'] } }
##
# @BlockDeviceIoStatus:
@ -656,6 +659,7 @@
#
# @dirty-bitmaps: dirty bitmaps information (only present if the
# driver has one or more dirty bitmaps) (Since 2.0)
# Deprecated in 4.2; see BlockDeviceInfo instead.
#
# @io-status: @BlockDeviceIoStatus. Only present if the device
# supports it and the VM is configured to stop on errors
@ -1127,12 +1131,35 @@
#
# @none: only copy data written from now on
#
# @incremental: only copy data described by the dirty bitmap. Since: 2.4
# @incremental: only copy data described by the dirty bitmap. (since: 2.4)
#
# @bitmap: only copy data described by the dirty bitmap. (since: 4.2)
# Behavior on completion is determined by the BitmapSyncMode.
#
# Since: 1.3
##
{ 'enum': 'MirrorSyncMode',
'data': ['top', 'full', 'none', 'incremental'] }
'data': ['top', 'full', 'none', 'incremental', 'bitmap'] }
##
# @BitmapSyncMode:
#
# An enumeration of possible behaviors for the synchronization of a bitmap
# when used for data copy operations.
#
# @on-success: The bitmap is only synced when the operation is successful.
# This is the behavior always used for 'INCREMENTAL' backups.
#
# @never: The bitmap is never synchronized with the operation, and is
# treated solely as a read-only manifest of blocks to copy.
#
# @always: The bitmap is always synchronized with the operation,
# regardless of whether or not the operation was successful.
#
# Since: 4.2
##
{ 'enum': 'BitmapSyncMode',
'data': ['on-success', 'never', 'always'] }
##
# @MirrorCopyMode:
@ -1315,13 +1342,73 @@
'data': { 'node': 'str', 'overlay': 'str' } }
##
# @DriveBackup:
# @BackupCommon:
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node which should be copied.
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, from a
# dirty bitmap, or only new I/O).
#
# @speed: the maximum speed, in bytes per second. The default is 0,
# for unlimited.
#
# @bitmap: The name of a dirty bitmap to use.
# Must be present if sync is "bitmap" or "incremental".
# Can be present if sync is "full" or "top".
# Must not be present otherwise.
# (Since 2.4 (drive-backup), 3.1 (blockdev-backup))
#
# @bitmap-mode: Specifies the type of data the bitmap should contain after
# the operation concludes.
# Must be present if a bitmap was provided,
# Must NOT be present otherwise. (Since 4.2)
#
# @compress: true to compress data, if the target format supports it.
# (default: false) (since 2.8)
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before
# making any block graph changes.
# When true, this job will automatically
# perform its abort or commit actions.
# Defaults to true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 2.12)
#
# Note: @on-source-error and @on-target-error only affect background
# I/O. If an error occurs during a guest write request, the device's
# rerror/werror actions will be used.
#
# Since: 4.2
##
{ 'struct': 'BackupCommon',
'data': { '*job-id': 'str', 'device': 'str',
'sync': 'MirrorSyncMode', '*speed': 'int',
'*bitmap': 'str', '*bitmap-mode': 'BitmapSyncMode',
'*compress': 'bool',
'*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
##
# @DriveBackup:
#
# @target: the target of the new image. If the file exists, or if it
# is a device, the existing file/device will be used as the new
# destination. If it does not exist, a new file will be created.
@ -1329,116 +1416,27 @@
# @format: the format of the new destination, default is to
# probe if @mode is 'existing', else the format of the source
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, from a
# dirty bitmap, or only new I/O).
#
# @mode: whether and how QEMU should create a new image, default is
# 'absolute-paths'.
#
# @speed: the maximum speed, in bytes per second
#
# @bitmap: the name of dirty bitmap if sync is "incremental".
# Must be present if sync is "incremental", must NOT be present
# otherwise. (Since 2.4)
#
# @compress: true to compress data, if the target format supports it.
# (default: false) (since 2.8)
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before
# making any block graph changes.
# When true, this job will automatically
# perform its abort or commit actions.
# Defaults to true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 2.12)
#
# Note: @on-source-error and @on-target-error only affect background
# I/O. If an error occurs during a guest write request, the device's
# rerror/werror actions will be used.
#
# Since: 1.6
##
{ 'struct': 'DriveBackup',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
'*format': 'str', 'sync': 'MirrorSyncMode',
'*mode': 'NewImageMode', '*speed': 'int',
'*bitmap': 'str', '*compress': 'bool',
'*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
'base': 'BackupCommon',
'data': { 'target': 'str',
'*format': 'str',
'*mode': 'NewImageMode' } }
##
# @BlockdevBackup:
#
# @job-id: identifier for the newly-created block job. If
# omitted, the device name will be used. (Since 2.7)
#
# @device: the device name or node-name of a root node which should be copied.
#
# @target: the device name or node-name of the backup target node.
#
# @sync: what parts of the disk image should be copied to the destination
# (all the disk, only the sectors allocated in the topmost image, or
# only new I/O).
#
# @speed: the maximum speed, in bytes per second. The default is 0,
# for unlimited.
#
# @bitmap: the name of dirty bitmap if sync is "incremental".
# Must be present if sync is "incremental", must NOT be present
# otherwise. (Since 3.1)
#
# @compress: true to compress data, if the target format supports it.
# (default: false) (since 2.8)
#
# @on-source-error: the action to take on an error on the source,
# default 'report'. 'stop' and 'enospc' can only be used
# if the block device supports io-status (see BlockInfo).
#
# @on-target-error: the action to take on an error on the target,
# default 'report' (no limitations, since this applies to
# a different block device than @device).
#
# @auto-finalize: When false, this job will wait in a PENDING state after it has
# finished its work, waiting for @block-job-finalize before
# making any block graph changes.
# When true, this job will automatically
# perform its abort or commit actions.
# Defaults to true. (Since 2.12)
#
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
# has completely ceased all work, and awaits @block-job-dismiss.
# When true, this job will automatically disappear from the query
# list without user intervention.
# Defaults to true. (Since 2.12)
#
# Note: @on-source-error and @on-target-error only affect background
# I/O. If an error occurs during a guest write request, the device's
# rerror/werror actions will be used.
#
# Since: 2.3
##
{ 'struct': 'BlockdevBackup',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
'sync': 'MirrorSyncMode', '*speed': 'int',
'*bitmap': 'str', '*compress': 'bool',
'*on-source-error': 'BlockdevOnError',
'*on-target-error': 'BlockdevOnError',
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
'base': 'BackupCommon',
'data': { 'target': 'str' } }
##
# @blockdev-snapshot-sync:

View File

@ -45,6 +45,7 @@
#
# - @abort: since 1.6
# - @block-dirty-bitmap-add: since 2.5
# - @block-dirty-bitmap-remove: since 4.2
# - @block-dirty-bitmap-clear: since 2.5
# - @block-dirty-bitmap-enable: since 4.0
# - @block-dirty-bitmap-disable: since 4.0
@ -61,6 +62,7 @@
'data': {
'abort': 'Abort',
'block-dirty-bitmap-add': 'BlockDirtyBitmapAdd',
'block-dirty-bitmap-remove': 'BlockDirtyBitmap',
'block-dirty-bitmap-clear': 'BlockDirtyBitmap',
'block-dirty-bitmap-enable': 'BlockDirtyBitmap',
'block-dirty-bitmap-disable': 'BlockDirtyBitmap',

View File

@ -154,6 +154,18 @@ The ``status'' field of the ``BlockDirtyInfo'' structure, returned by
the query-block command is deprecated. Two new boolean fields,
``recording'' and ``busy'' effectively replace it.
@subsection query-block result field dirty-bitmaps (Since 4.2)
The ``dirty-bitmaps`` field of the ``BlockInfo`` structure, returned by
the query-block command is itself now deprecated. The ``dirty-bitmaps``
field of the ``BlockDeviceInfo`` struct should be used instead, which is the
type of the ``inserted`` field in query-block replies, as well as the
type of array items in query-named-block-nodes.
Since the ``dirty-bitmaps`` field is optionally present in both the old and
new locations, clients must use introspection to learn where to anticipate
the field if/when it does appear in command output.
@subsection query-cpus (since 2.12.0)
The ``query-cpus'' command is replaced by the ``query-cpus-fast'' command.

View File

@ -85,11 +85,7 @@ class TestSingleDrive(ImageCommitTestCase):
qemu_io('-f', 'raw', '-c', 'write -P 0xab 0 524288', backing_img)
qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0xef 524288 524288', mid_img)
self.vm = iotests.VM().add_drive(test_img, "node-name=top,backing.node-name=mid,backing.backing.node-name=base", interface="none")
if iotests.qemu_default_machine == 's390-ccw-virtio':
self.vm.add_device("virtio-scsi-ccw")
else:
self.vm.add_device("virtio-scsi-pci")
self.vm.add_device(iotests.get_virtio_scsi_device())
self.vm.add_device("scsi-hd,id=scsi0,drive=drive0")
self.vm.launch()
self.has_quit = False

View File

@ -367,10 +367,8 @@ class ThrottleTestGroupNames(iotests.QMPTestCase):
class ThrottleTestRemovableMedia(iotests.QMPTestCase):
def setUp(self):
self.vm = iotests.VM()
if iotests.qemu_default_machine == 's390-ccw-virtio':
self.vm.add_device("virtio-scsi-ccw,id=virtio-scsi")
else:
self.vm.add_device("virtio-scsi-pci,id=virtio-scsi")
self.vm.add_device("{},id=virtio-scsi".format(
iotests.get_virtio_scsi_device()))
self.vm.launch()
def tearDown(self):

View File

@ -35,11 +35,8 @@ class TestBlockdevDel(iotests.QMPTestCase):
def setUp(self):
iotests.qemu_img('create', '-f', iotests.imgfmt, base_img, '1M')
self.vm = iotests.VM()
if iotests.qemu_default_machine == 's390-ccw-virtio':
self.vm.add_device("virtio-scsi-ccw,id=virtio-scsi")
else:
self.vm.add_device("virtio-scsi-pci,id=virtio-scsi")
self.vm.add_device("{},id=virtio-scsi".format(
iotests.get_virtio_scsi_device()))
self.vm.launch()
def tearDown(self):

View File

@ -23,10 +23,7 @@ import os
import iotests
from iotests import log
if iotests.qemu_default_machine == 's390-ccw-virtio':
virtio_scsi_device = 'virtio-scsi-ccw'
else:
virtio_scsi_device = 'virtio-scsi-pci'
virtio_scsi_device = iotests.get_virtio_scsi_device()
vm = iotests.VM()
vm.launch()

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
#
# Test external snapshot with bitmap copying.
# Test external snapshot with bitmap copying and moving.
#
# Copyright (c) 2019 Virtuozzo International GmbH. All rights reserved.
#
@ -32,6 +32,10 @@ vm = iotests.VM().add_drive(disk, opts='node-name=base')
vm.launch()
vm.qmp_log('block-dirty-bitmap-add', node='drive0', name='bitmap0')
vm.qmp_log('block-dirty-bitmap-add', node='drive0', name='bitmap1',
persistent=True)
vm.qmp_log('block-dirty-bitmap-add', node='drive0', name='bitmap2',
persistent=True)
vm.hmp_qemu_io('drive0', 'write 0 512K')
@ -39,16 +43,38 @@ vm.qmp_log('transaction', indent=2, actions=[
{'type': 'blockdev-snapshot-sync',
'data': {'device': 'drive0', 'snapshot-file': top,
'snapshot-node-name': 'snap'}},
# copy non-persistent bitmap0
{'type': 'block-dirty-bitmap-add',
'data': {'node': 'snap', 'name': 'bitmap0'}},
{'type': 'block-dirty-bitmap-merge',
'data': {'node': 'snap', 'target': 'bitmap0',
'bitmaps': [{'node': 'base', 'name': 'bitmap0'}]}}
'bitmaps': [{'node': 'base', 'name': 'bitmap0'}]}},
# copy persistent bitmap1, original will be saved to base image
{'type': 'block-dirty-bitmap-add',
'data': {'node': 'snap', 'name': 'bitmap1', 'persistent': True}},
{'type': 'block-dirty-bitmap-merge',
'data': {'node': 'snap', 'target': 'bitmap1',
'bitmaps': [{'node': 'base', 'name': 'bitmap1'}]}},
# move persistent bitmap2, original will be removed and not saved
# to base image
{'type': 'block-dirty-bitmap-add',
'data': {'node': 'snap', 'name': 'bitmap2', 'persistent': True}},
{'type': 'block-dirty-bitmap-merge',
'data': {'node': 'snap', 'target': 'bitmap2',
'bitmaps': [{'node': 'base', 'name': 'bitmap2'}]}},
{'type': 'block-dirty-bitmap-remove',
'data': {'node': 'base', 'name': 'bitmap2'}}
], filters=[iotests.filter_qmp_testfiles])
result = vm.qmp('query-block')['return'][0]
log("query-block: device = {}, node-name = {}, dirty-bitmaps:".format(
result['device'], result['inserted']['node-name']))
log(result['dirty-bitmaps'], indent=2)
log("\nbitmaps in backing image:")
log(result['inserted']['image']['backing-image']['format-specific'] \
['data']['bitmaps'], indent=2)
vm.shutdown()

View File

@ -1,5 +1,9 @@
{"execute": "block-dirty-bitmap-add", "arguments": {"name": "bitmap0", "node": "drive0"}}
{"return": {}}
{"execute": "block-dirty-bitmap-add", "arguments": {"name": "bitmap1", "node": "drive0", "persistent": true}}
{"return": {}}
{"execute": "block-dirty-bitmap-add", "arguments": {"name": "bitmap2", "node": "drive0", "persistent": true}}
{"return": {}}
{
"execute": "transaction",
"arguments": {
@ -31,6 +35,55 @@
"target": "bitmap0"
},
"type": "block-dirty-bitmap-merge"
},
{
"data": {
"name": "bitmap1",
"node": "snap",
"persistent": true
},
"type": "block-dirty-bitmap-add"
},
{
"data": {
"bitmaps": [
{
"name": "bitmap1",
"node": "base"
}
],
"node": "snap",
"target": "bitmap1"
},
"type": "block-dirty-bitmap-merge"
},
{
"data": {
"name": "bitmap2",
"node": "snap",
"persistent": true
},
"type": "block-dirty-bitmap-add"
},
{
"data": {
"bitmaps": [
{
"name": "bitmap2",
"node": "base"
}
],
"node": "snap",
"target": "bitmap2"
},
"type": "block-dirty-bitmap-merge"
},
{
"data": {
"name": "bitmap2",
"node": "base"
},
"type": "block-dirty-bitmap-remove"
}
]
}
@ -40,6 +93,24 @@
}
query-block: device = drive0, node-name = snap, dirty-bitmaps:
[
{
"busy": false,
"count": 524288,
"granularity": 65536,
"name": "bitmap2",
"persistent": true,
"recording": true,
"status": "active"
},
{
"busy": false,
"count": 524288,
"granularity": 65536,
"name": "bitmap1",
"persistent": true,
"recording": true,
"status": "active"
},
{
"busy": false,
"count": 524288,
@ -50,3 +121,14 @@ query-block: device = drive0, node-name = snap, dirty-bitmaps:
"status": "active"
}
]
bitmaps in backing image:
[
{
"flags": [
"auto"
],
"granularity": 65536,
"name": "bitmap1"
}
]

View File

@ -113,7 +113,7 @@
{
"return": {}
}
{"data": {"device": "j2", "len": 67108864, "offset": 67108864, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"device": "j3", "len": 67108864, "offset": 67108864, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"device": "j2", "len": 0, "offset": 0, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
{"data": {"device": "j3", "len": 0, "offset": 0, "speed": 0, "type": "backup"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
--- Done ---

560
tests/qemu-iotests/257 Executable file
View File

@ -0,0 +1,560 @@
#!/usr/bin/env python
#
# Test bitmap-sync backups (incremental, differential, and partials)
#
# Copyright (c) 2019 John Snow for Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# owner=jsnow@redhat.com
import math
import os
import iotests
from iotests import log, qemu_img
SIZE = 64 * 1024 * 1024
GRANULARITY = 64 * 1024
class Pattern:
def __init__(self, byte, offset, size=GRANULARITY):
self.byte = byte
self.offset = offset
self.size = size
def bits(self, granularity):
lower = self.offset // granularity
upper = (self.offset + self.size - 1) // granularity
return set(range(lower, upper + 1))
class PatternGroup:
"""Grouping of Pattern objects. Initialize with an iterable of Patterns."""
def __init__(self, patterns):
self.patterns = patterns
def bits(self, granularity):
"""Calculate the unique bits dirtied by this pattern grouping"""
res = set()
for pattern in self.patterns:
res |= pattern.bits(granularity)
return res
GROUPS = [
PatternGroup([
# Batch 0: 4 clusters
Pattern('0x49', 0x0000000),
Pattern('0x6c', 0x0100000), # 1M
Pattern('0x6f', 0x2000000), # 32M
Pattern('0x76', 0x3ff0000)]), # 64M - 64K
PatternGroup([
# Batch 1: 6 clusters (3 new)
Pattern('0x65', 0x0000000), # Full overwrite
Pattern('0x77', 0x00f8000), # Partial-left (1M-32K)
Pattern('0x72', 0x2008000), # Partial-right (32M+32K)
Pattern('0x69', 0x3fe0000)]), # Adjacent-left (64M - 128K)
PatternGroup([
# Batch 2: 7 clusters (3 new)
Pattern('0x74', 0x0010000), # Adjacent-right
Pattern('0x69', 0x00e8000), # Partial-left (1M-96K)
Pattern('0x6e', 0x2018000), # Partial-right (32M+96K)
Pattern('0x67', 0x3fe0000,
2*GRANULARITY)]), # Overwrite [(64M-128K)-64M)
PatternGroup([
# Batch 3: 8 clusters (5 new)
# Carefully chosen such that nothing re-dirties the one cluster
# that copies out successfully before failure in Group #1.
Pattern('0xaa', 0x0010000,
3*GRANULARITY), # Overwrite and 2x Adjacent-right
Pattern('0xbb', 0x00d8000), # Partial-left (1M-160K)
Pattern('0xcc', 0x2028000), # Partial-right (32M+160K)
Pattern('0xdd', 0x3fc0000)]), # New; leaving a gap to the right
]
class EmulatedBitmap:
def __init__(self, granularity=GRANULARITY):
self._bits = set()
self.granularity = granularity
def dirty_bits(self, bits):
self._bits |= set(bits)
def dirty_group(self, n):
self.dirty_bits(GROUPS[n].bits(self.granularity))
def clear(self):
self._bits = set()
def clear_bits(self, bits):
self._bits -= set(bits)
def clear_bit(self, bit):
self.clear_bits({bit})
def clear_group(self, n):
self.clear_bits(GROUPS[n].bits(self.granularity))
@property
def first_bit(self):
return sorted(self.bits)[0]
@property
def bits(self):
return self._bits
@property
def count(self):
return len(self.bits)
def compare(self, qmp_bitmap):
"""
Print a nice human-readable message checking that a bitmap as reported
by the QMP interface has as many bits set as we expect it to.
"""
name = qmp_bitmap.get('name', '(anonymous)')
log("= Checking Bitmap {:s} =".format(name))
want = self.count
have = qmp_bitmap['count'] // qmp_bitmap['granularity']
log("expecting {:d} dirty sectors; have {:d}. {:s}".format(
want, have, "OK!" if want == have else "ERROR!"))
log('')
class Drive:
"""Represents, vaguely, a drive attached to a VM.
Includes format, graph, and device information."""
def __init__(self, path, vm=None):
self.path = path
self.vm = vm
self.fmt = None
self.size = None
self.node = None
self.device = None
@property
def name(self):
return self.node or self.device
def img_create(self, fmt, size):
self.fmt = fmt
self.size = size
iotests.qemu_img_create('-f', self.fmt, self.path, str(self.size))
def create_target(self, name, fmt, size):
basename = os.path.basename(self.path)
file_node_name = "file_{}".format(basename)
vm = self.vm
log(vm.command('blockdev-create', job_id='bdc-file-job',
options={
'driver': 'file',
'filename': self.path,
'size': 0,
}))
vm.run_job('bdc-file-job')
log(vm.command('blockdev-add', driver='file',
node_name=file_node_name, filename=self.path))
log(vm.command('blockdev-create', job_id='bdc-fmt-job',
options={
'driver': fmt,
'file': file_node_name,
'size': size,
}))
vm.run_job('bdc-fmt-job')
log(vm.command('blockdev-add', driver=fmt,
node_name=name,
file=file_node_name))
self.fmt = fmt
self.size = size
self.node = name
def query_bitmaps(vm):
res = vm.qmp("query-block")
return {"bitmaps": {device['device'] or device['qdev']:
device.get('dirty-bitmaps', []) for
device in res['return']}}
def get_bitmap(bitmaps, drivename, name, recording=None):
"""
get a specific bitmap from the object returned by query_bitmaps.
:param recording: If specified, filter results by the specified value.
"""
for bitmap in bitmaps['bitmaps'][drivename]:
if bitmap.get('name', '') == name:
if recording is None:
return bitmap
elif bitmap.get('recording') == recording:
return bitmap
return None
def blockdev_backup(vm, device, target, sync, **kwargs):
# Strip any arguments explicitly nulled by the caller:
kwargs = {key: val for key, val in kwargs.items() if val is not None}
result = vm.qmp_log('blockdev-backup',
device=device,
target=target,
sync=sync,
**kwargs)
return result
def blockdev_backup_mktarget(drive, target_id, filepath, sync, **kwargs):
target_drive = Drive(filepath, vm=drive.vm)
target_drive.create_target(target_id, drive.fmt, drive.size)
blockdev_backup(drive.vm, drive.name, target_id, sync, **kwargs)
def reference_backup(drive, n, filepath):
log("--- Reference Backup #{:d} ---\n".format(n))
target_id = "ref_target_{:d}".format(n)
job_id = "ref_backup_{:d}".format(n)
blockdev_backup_mktarget(drive, target_id, filepath, "full",
job_id=job_id)
drive.vm.run_job(job_id, auto_dismiss=True)
log('')
def backup(drive, n, filepath, sync, **kwargs):
log("--- Test Backup #{:d} ---\n".format(n))
target_id = "backup_target_{:d}".format(n)
job_id = "backup_{:d}".format(n)
kwargs.setdefault('auto-finalize', False)
blockdev_backup_mktarget(drive, target_id, filepath, sync,
job_id=job_id, **kwargs)
return job_id
def perform_writes(drive, n):
log("--- Write #{:d} ---\n".format(n))
for pattern in GROUPS[n].patterns:
cmd = "write -P{:s} 0x{:07x} 0x{:x}".format(
pattern.byte,
pattern.offset,
pattern.size)
log(cmd)
log(drive.vm.hmp_qemu_io(drive.name, cmd))
bitmaps = query_bitmaps(drive.vm)
log(bitmaps, indent=2)
log('')
return bitmaps
def compare_images(image, reference, baseimg=None, expected_match=True):
"""
Print a nice human-readable message comparing these images.
"""
expected_ret = 0 if expected_match else 1
if baseimg:
assert qemu_img("rebase", "-u", "-b", baseimg, image) == 0
ret = qemu_img("compare", image, reference)
log('qemu_img compare "{:s}" "{:s}" ==> {:s}, {:s}'.format(
image, reference,
"Identical" if ret == 0 else "Mismatch",
"OK!" if ret == expected_ret else "ERROR!"),
filters=[iotests.filter_testfiles])
def test_bitmap_sync(bsync_mode, msync_mode='bitmap', failure=None):
"""
Test bitmap backup routines.
:param bsync_mode: Is the Bitmap Sync mode, and can be any of:
- on-success: This is the "incremental" style mode. Bitmaps are
synchronized to what was copied out only on success.
(Partial images must be discarded.)
- never: This is the "differential" style mode.
Bitmaps are never synchronized.
- always: This is a "best effort" style mode.
Bitmaps are always synchronized, regardless of failure.
(Partial images must be kept.)
:param msync_mode: The mirror sync mode to use for the first backup.
Can be any one of:
- bitmap: Backups based on bitmap manifest.
- full: Full backups.
- top: Full backups of the top layer only.
:param failure: Is the (optional) failure mode, and can be any of:
- None: No failure. Test the normative path. Default.
- simulated: Cancel the job right before it completes.
This also tests writes "during" the job.
- intermediate: This tests a job that fails mid-process and produces
an incomplete backup. Testing limitations prevent
testing competing writes.
"""
with iotests.FilePaths(['img', 'bsync1', 'bsync2',
'fbackup0', 'fbackup1', 'fbackup2']) as \
(img_path, bsync1, bsync2,
fbackup0, fbackup1, fbackup2), \
iotests.VM() as vm:
mode = "Mode {:s}; Bitmap Sync {:s}".format(msync_mode, bsync_mode)
preposition = "with" if failure else "without"
cond = "{:s} {:s}".format(preposition,
"{:s} failure".format(failure) if failure
else "failure")
log("\n=== {:s} {:s} ===\n".format(mode, cond))
log('--- Preparing image & VM ---\n')
drive0 = Drive(img_path, vm=vm)
drive0.img_create(iotests.imgfmt, SIZE)
vm.add_device("{},id=scsi0".format(iotests.get_virtio_scsi_device()))
vm.launch()
file_config = {
'driver': 'file',
'filename': drive0.path
}
if failure == 'intermediate':
file_config = {
'driver': 'blkdebug',
'image': file_config,
'set-state': [{
'event': 'flush_to_disk',
'state': 1,
'new_state': 2
}, {
'event': 'read_aio',
'state': 2,
'new_state': 3
}],
'inject-error': [{
'event': 'read_aio',
'errno': 5,
'state': 3,
'immediately': False,
'once': True
}]
}
vm.qmp_log('blockdev-add',
filters=[iotests.filter_qmp_testfiles],
node_name="drive0",
driver=drive0.fmt,
file=file_config)
drive0.node = 'drive0'
drive0.device = 'device0'
# Use share-rw to allow writes directly to the node;
# The anonymous block-backend for this configuration prevents us
# from using HMP's qemu-io commands to address the device.
vm.qmp_log("device_add", id=drive0.device,
drive=drive0.name, driver="scsi-hd",
share_rw=True)
log('')
# 0 - Writes and Reference Backup
perform_writes(drive0, 0)
reference_backup(drive0, 0, fbackup0)
log('--- Add Bitmap ---\n')
vm.qmp_log("block-dirty-bitmap-add", node=drive0.name,
name="bitmap0", granularity=GRANULARITY)
log('')
ebitmap = EmulatedBitmap()
# 1 - Writes and Reference Backup
bitmaps = perform_writes(drive0, 1)
ebitmap.dirty_group(1)
bitmap = get_bitmap(bitmaps, drive0.device, 'bitmap0')
ebitmap.compare(bitmap)
reference_backup(drive0, 1, fbackup1)
# 1 - Test Backup (w/ Optional induced failure)
if failure == 'intermediate':
# Activate blkdebug induced failure for second-to-next read
log(vm.hmp_qemu_io(drive0.name, 'flush'))
log('')
job = backup(drive0, 1, bsync1, msync_mode,
bitmap="bitmap0", bitmap_mode=bsync_mode)
def _callback():
"""Issue writes while the job is open to test bitmap divergence."""
# Note: when `failure` is 'intermediate', this isn't called.
log('')
bitmaps = perform_writes(drive0, 2)
# Named bitmap (static, should be unchanged)
ebitmap.compare(get_bitmap(bitmaps, drive0.device, 'bitmap0'))
# Anonymous bitmap (dynamic, shows new writes)
anonymous = EmulatedBitmap()
anonymous.dirty_group(2)
anonymous.compare(get_bitmap(bitmaps, drive0.device, '',
recording=True))
# Simulate the order in which this will happen:
# group 1 gets cleared first, then group two gets written.
if ((bsync_mode == 'on-success' and not failure) or
(bsync_mode == 'always')):
ebitmap.clear()
ebitmap.dirty_group(2)
vm.run_job(job, auto_dismiss=True, auto_finalize=False,
pre_finalize=_callback,
cancel=(failure == 'simulated'))
bitmaps = query_bitmaps(vm)
log(bitmaps, indent=2)
log('')
if bsync_mode == 'always' and failure == 'intermediate':
# TOP treats anything allocated as dirty, expect to see:
if msync_mode == 'top':
ebitmap.dirty_group(0)
# We manage to copy one sector (one bit) before the error.
ebitmap.clear_bit(ebitmap.first_bit)
# Full returns all bits set except what was copied/skipped
if msync_mode == 'full':
fail_bit = ebitmap.first_bit
ebitmap.clear()
ebitmap.dirty_bits(range(fail_bit, SIZE // GRANULARITY))
ebitmap.compare(get_bitmap(bitmaps, drive0.device, 'bitmap0'))
# 2 - Writes and Reference Backup
bitmaps = perform_writes(drive0, 3)
ebitmap.dirty_group(3)
ebitmap.compare(get_bitmap(bitmaps, drive0.device, 'bitmap0'))
reference_backup(drive0, 2, fbackup2)
# 2 - Bitmap Backup (In failure modes, this is a recovery.)
job = backup(drive0, 2, bsync2, "bitmap",
bitmap="bitmap0", bitmap_mode=bsync_mode)
vm.run_job(job, auto_dismiss=True, auto_finalize=False)
bitmaps = query_bitmaps(vm)
log(bitmaps, indent=2)
log('')
if bsync_mode != 'never':
ebitmap.clear()
ebitmap.compare(get_bitmap(bitmaps, drive0.device, 'bitmap0'))
log('--- Cleanup ---\n')
vm.qmp_log("block-dirty-bitmap-remove",
node=drive0.name, name="bitmap0")
log(query_bitmaps(vm), indent=2)
vm.shutdown()
log('')
log('--- Verification ---\n')
# 'simulated' failures will actually all pass here because we canceled
# while "pending". This is actually undefined behavior,
# don't rely on this to be true!
compare_images(bsync1, fbackup1, baseimg=fbackup0,
expected_match=failure != 'intermediate')
if not failure or bsync_mode == 'always':
# Always keep the last backup on success or when using 'always'
base = bsync1
else:
base = fbackup0
compare_images(bsync2, fbackup2, baseimg=base)
compare_images(img_path, fbackup2)
log('')
def test_backup_api():
"""
Test malformed and prohibited invocations of the backup API.
"""
with iotests.FilePaths(['img', 'bsync1']) as \
(img_path, backup_path), \
iotests.VM() as vm:
log("\n=== API failure tests ===\n")
log('--- Preparing image & VM ---\n')
drive0 = Drive(img_path, vm=vm)
drive0.img_create(iotests.imgfmt, SIZE)
vm.add_device("{},id=scsi0".format(iotests.get_virtio_scsi_device()))
vm.launch()
file_config = {
'driver': 'file',
'filename': drive0.path
}
vm.qmp_log('blockdev-add',
filters=[iotests.filter_qmp_testfiles],
node_name="drive0",
driver=drive0.fmt,
file=file_config)
drive0.node = 'drive0'
drive0.device = 'device0'
vm.qmp_log("device_add", id=drive0.device,
drive=drive0.name, driver="scsi-hd")
log('')
target0 = Drive(backup_path, vm=vm)
target0.create_target("backup_target", drive0.fmt, drive0.size)
log('')
vm.qmp_log("block-dirty-bitmap-add", node=drive0.name,
name="bitmap0", granularity=GRANULARITY)
log('')
log('-- Testing invalid QMP commands --\n')
error_cases = {
'incremental': {
None: ['on-success', 'always', 'never', None],
'bitmap404': ['on-success', 'always', 'never', None],
'bitmap0': ['always', 'never']
},
'bitmap': {
None: ['on-success', 'always', 'never', None],
'bitmap404': ['on-success', 'always', 'never', None],
'bitmap0': [None],
},
'full': {
None: ['on-success', 'always', 'never'],
'bitmap404': ['on-success', 'always', 'never', None],
'bitmap0': ['never', None],
},
'top': {
None: ['on-success', 'always', 'never'],
'bitmap404': ['on-success', 'always', 'never', None],
'bitmap0': ['never', None],
},
'none': {
None: ['on-success', 'always', 'never'],
'bitmap404': ['on-success', 'always', 'never', None],
'bitmap0': ['on-success', 'always', 'never', None],
}
}
# Dicts, as always, are not stably-ordered prior to 3.7, so use tuples:
for sync_mode in ('incremental', 'bitmap', 'full', 'top', 'none'):
log("-- Sync mode {:s} tests --\n".format(sync_mode))
for bitmap in (None, 'bitmap404', 'bitmap0'):
for policy in error_cases[sync_mode][bitmap]:
blockdev_backup(drive0.vm, drive0.name, "backup_target",
sync_mode, job_id='api_job',
bitmap=bitmap, bitmap_mode=policy)
log('')
def main():
for bsync_mode in ("never", "on-success", "always"):
for failure in ("simulated", "intermediate", None):
test_bitmap_sync(bsync_mode, "bitmap", failure)
for sync_mode in ('full', 'top'):
for bsync_mode in ('on-success', 'always'):
for failure in ('simulated', 'intermediate', None):
test_bitmap_sync(bsync_mode, sync_mode, failure)
test_backup_api()
if __name__ == '__main__':
iotests.script_main(main, supported_fmts=['qcow2'])

5421
tests/qemu-iotests/257.out Normal file

File diff suppressed because it is too large Load Diff

View File

@ -271,5 +271,6 @@
254 rw backing quick
255 rw quick
256 rw quick
257 rw
258 rw quick
262 rw quick migration

View File

@ -61,7 +61,6 @@ cachemode = os.environ.get('CACHEMODE')
qemu_default_machine = os.environ.get('QEMU_DEFAULT_MACHINE')
socket_scm_helper = os.environ.get('SOCKET_SCM_HELPER', 'socket_scm_helper')
debug = False
luks_default_secret_object = 'secret,id=keysec0,data=' + \
os.environ.get('IMGKEYSECRET', '')
@ -165,6 +164,10 @@ def qemu_io_silent(*args):
(-exitcode, ' '.join(args)))
return exitcode
def get_virtio_scsi_device():
if qemu_default_machine == 's390-ccw-virtio':
return 'virtio-scsi-ccw'
return 'virtio-scsi-pci'
class QemuIoInteractive:
def __init__(self, *args):
@ -359,31 +362,45 @@ class Timeout:
def timeout(self, signum, frame):
raise Exception(self.errmsg)
def file_pattern(name):
return "{0}-{1}".format(os.getpid(), name)
class FilePath(object):
'''An auto-generated filename that cleans itself up.
class FilePaths(object):
"""
FilePaths is an auto-generated filename that cleans itself up.
Use this context manager to generate filenames and ensure that the file
gets deleted::
with TestFilePath('test.img') as img_path:
with FilePaths(['test.img']) as img_path:
qemu_img('create', img_path, '1G')
# migration_sock_path is automatically deleted
'''
def __init__(self, name):
filename = '{0}-{1}'.format(os.getpid(), name)
self.path = os.path.join(test_dir, filename)
"""
def __init__(self, names):
self.paths = []
for name in names:
self.paths.append(os.path.join(test_dir, file_pattern(name)))
def __enter__(self):
return self.path
return self.paths
def __exit__(self, exc_type, exc_val, exc_tb):
try:
os.remove(self.path)
for path in self.paths:
os.remove(path)
except OSError:
pass
return False
class FilePath(FilePaths):
"""
FilePath is a specialization of FilePaths that takes a single filename.
"""
def __init__(self, name):
super(FilePath, self).__init__([name])
def __enter__(self):
return self.paths[0]
def file_path_remover():
for path in reversed(file_path_remover.paths):
@ -408,7 +425,7 @@ def file_path(*names):
paths = []
for name in names:
filename = '{0}-{1}'.format(os.getpid(), name)
filename = file_pattern(name)
path = os.path.join(test_dir, filename)
file_path_remover.paths.append(path)
paths.append(path)
@ -542,7 +559,23 @@ class VM(qtest.QEMUQtestMachine):
# Returns None on success, and an error string on failure
def run_job(self, job, auto_finalize=True, auto_dismiss=False,
pre_finalize=None, use_log=True, wait=60.0):
pre_finalize=None, cancel=False, use_log=True, wait=60.0):
"""
run_job moves a job from creation through to dismissal.
:param job: String. ID of recently-launched job
:param auto_finalize: Bool. True if the job was launched with
auto_finalize. Defaults to True.
:param auto_dismiss: Bool. True if the job was launched with
auto_dismiss=True. Defaults to False.
:param pre_finalize: Callback. A callable that takes no arguments to be
invoked prior to issuing job-finalize, if any.
:param cancel: Bool. When true, cancels the job after the pre_finalize
callback.
:param use_log: Bool. When false, does not log QMP messages.
:param wait: Float. Timeout value specifying how long to wait for any
event, in seconds. Defaults to 60.0.
"""
match_device = {'data': {'device': job}}
match_id = {'data': {'id': job}}
events = [
@ -571,7 +604,11 @@ class VM(qtest.QEMUQtestMachine):
elif status == 'pending' and not auto_finalize:
if pre_finalize:
pre_finalize()
if use_log:
if cancel and use_log:
self.qmp_log('job-cancel', id=job)
elif cancel:
self.qmp('job-cancel', id=job)
elif use_log:
self.qmp_log('job-finalize', id=job)
else:
self.qmp('job-finalize', id=job)
@ -858,11 +895,22 @@ def skip_if_unsupported(required_formats=[], read_only=False):
return func_wrapper
return skip_test_decorator
def main(supported_fmts=[], supported_oses=['linux'], supported_cache_modes=[],
unsupported_fmts=[]):
'''Run tests'''
def execute_unittest(output, verbosity, debug):
runner = unittest.TextTestRunner(stream=output, descriptions=True,
verbosity=verbosity)
try:
# unittest.main() will use sys.exit(); so expect a SystemExit
# exception
unittest.main(testRunner=runner)
finally:
if not debug:
sys.stderr.write(re.sub(r'Ran (\d+) tests? in [\d.]+s',
r'Ran \1 tests', output.getvalue()))
global debug
def execute_test(test_function=None,
supported_fmts=[], supported_oses=['linux'],
supported_cache_modes=[], unsupported_fmts=[]):
"""Run either unittest or script-style tests."""
# We are using TEST_DIR and QEMU_DEFAULT_MACHINE as proxies to
# indicate that we're not being run via "check". There may be
@ -894,13 +942,15 @@ def main(supported_fmts=[], supported_oses=['linux'], supported_cache_modes=[],
logging.basicConfig(level=(logging.DEBUG if debug else logging.WARN))
class MyTestRunner(unittest.TextTestRunner):
def __init__(self, stream=output, descriptions=True, verbosity=verbosity):
unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
if not test_function:
execute_unittest(output, verbosity, debug)
else:
test_function()
# unittest.main() will use sys.exit() so expect a SystemExit exception
try:
unittest.main(testRunner=MyTestRunner)
finally:
if not debug:
sys.stderr.write(re.sub(r'Ran (\d+) tests? in [\d.]+s', r'Ran \1 tests', output.getvalue()))
def script_main(test_function, *args, **kwargs):
"""Run script-style tests outside of the unittest framework"""
execute_test(test_function, *args, **kwargs)
def main(*args, **kwargs):
"""Run tests using the unittest framework"""
execute_test(None, *args, **kwargs)

View File

@ -1004,6 +1004,15 @@ static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused)
test_hbitmap_next_zero_do(data, 4);
}
static void test_hbitmap_next_zero_after_truncate(TestHBitmapData *data,
const void *unused)
{
hbitmap_test_init(data, L1, 0);
hbitmap_test_truncate_impl(data, L1 * 2);
hbitmap_set(data->hb, 0, L1);
test_hbitmap_next_zero_check(data, 0);
}
static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data,
uint64_t offset,
uint64_t count)
@ -1104,6 +1113,15 @@ static void test_hbitmap_next_dirty_area_4(TestHBitmapData *data,
test_hbitmap_next_dirty_area_do(data, 4);
}
static void test_hbitmap_next_dirty_area_after_truncate(TestHBitmapData *data,
const void *unused)
{
hbitmap_test_init(data, L1, 0);
hbitmap_test_truncate_impl(data, L1 * 2);
hbitmap_set(data->hb, L1 + 1, 1);
test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX);
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
@ -1169,6 +1187,8 @@ int main(int argc, char **argv)
test_hbitmap_next_zero_0);
hbitmap_test_add("/hbitmap/next_zero/next_zero_4",
test_hbitmap_next_zero_4);
hbitmap_test_add("/hbitmap/next_zero/next_zero_after_truncate",
test_hbitmap_next_zero_after_truncate);
hbitmap_test_add("/hbitmap/next_dirty_area/next_dirty_area_0",
test_hbitmap_next_dirty_area_0);
@ -1176,6 +1196,8 @@ int main(int argc, char **argv)
test_hbitmap_next_dirty_area_1);
hbitmap_test_add("/hbitmap/next_dirty_area/next_dirty_area_4",
test_hbitmap_next_dirty_area_4);
hbitmap_test_add("/hbitmap/next_dirty_area/next_dirty_area_after_truncate",
test_hbitmap_next_dirty_area_after_truncate);
g_test_run();

View File

@ -781,12 +781,33 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size)
bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b)
{
return (a->size == b->size) && (a->granularity == b->granularity);
return (a->orig_size == b->orig_size);
}
/**
* Given HBitmaps A and B, let A := A (BITOR) B.
* Bitmap B will not be modified.
* hbitmap_sparse_merge: performs dst = dst | src
* works with differing granularities.
* best used when src is sparsely populated.
*/
static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src)
{
uint64_t offset = 0;
uint64_t count = src->orig_size;
while (hbitmap_next_dirty_area(src, &offset, &count)) {
hbitmap_set(dst, offset, count);
offset += count;
if (offset >= src->orig_size) {
break;
}
count = src->orig_size - offset;
}
}
/**
* Given HBitmaps A and B, let R := A (BITOR) B.
* Bitmaps A and B will not be modified,
* except when bitmap R is an alias of A or B.
*
* @return true if the merge was successful,
* false if it was not attempted.
@ -801,7 +822,26 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result)
}
assert(hbitmap_can_merge(b, result));
if (hbitmap_count(b) == 0) {
if ((!hbitmap_count(a) && result == b) ||
(!hbitmap_count(b) && result == a)) {
return true;
}
if (!hbitmap_count(a) && !hbitmap_count(b)) {
hbitmap_reset_all(result);
return true;
}
if (a->granularity != b->granularity) {
if ((a != result) && (b != result)) {
hbitmap_reset_all(result);
}
if (a != result) {
hbitmap_sparse_merge(result, a);
}
if (b != result) {
hbitmap_sparse_merge(result, b);
}
return true;
}
@ -809,6 +849,7 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result)
* It may be possible to improve running times for sparsely populated maps
* by using hbitmap_iter_next, but this is suboptimal for dense maps.
*/
assert(a->size == b->size);
for (i = HBITMAP_LEVELS - 1; i >= 0; i--) {
for (j = 0; j < a->sizes[i]; j++) {
result->levels[i][j] = a->levels[i][j] | b->levels[i][j];