diff --git a/migration/block.c b/migration/block.c index c90288ed29..737b6499f9 100644 --- a/migration/block.c +++ b/migration/block.c @@ -27,8 +27,8 @@ #include "migration/vmstate.h" #include "sysemu/block-backend.h" -#define BLOCK_SIZE (1 << 20) -#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS) +#define BLK_MIG_BLOCK_SIZE (1 << 20) +#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS) #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01 #define BLK_MIG_FLAG_EOS 0x02 @@ -133,7 +133,7 @@ static void blk_send(QEMUFile *f, BlkMigBlock * blk) uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK; if (block_mig_state.zero_blocks && - buffer_is_zero(blk->buf, BLOCK_SIZE)) { + buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) { flags |= BLK_MIG_FLAG_ZERO_BLOCK; } @@ -154,7 +154,7 @@ static void blk_send(QEMUFile *f, BlkMigBlock * blk) return; } - qemu_put_buffer(f, blk->buf, BLOCK_SIZE); + qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE); } int blk_mig_active(void) @@ -309,7 +309,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) } blk = g_new(BlkMigBlock, 1); - blk->buf = g_malloc(BLOCK_SIZE); + blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE); blk->bmds = bmds; blk->sector = cur_sector; blk->nr_sectors = nr_sectors; @@ -350,7 +350,8 @@ static int set_dirty_tracking(void) QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk), - BLOCK_SIZE, NULL, NULL); + BLK_MIG_BLOCK_SIZE, + NULL, NULL); if (!bmds->dirty_bitmap) { ret = -errno; goto fail; @@ -548,7 +549,7 @@ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); blk = g_new(BlkMigBlock, 1); - blk->buf = g_malloc(BLOCK_SIZE); + blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE); blk->bmds = bmds; blk->sector = sector; blk->nr_sectors = nr_sectors; @@ -770,7 +771,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque) /* control the rate of transfer */ blk_mig_lock(); - while (block_mig_state.read_done * BLOCK_SIZE < + while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE < qemu_file_get_rate_limit(f) && block_mig_state.submitted < MAX_PARALLEL_IO && (block_mig_state.submitted + block_mig_state.read_done) < @@ -874,13 +875,13 @@ static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, qemu_mutex_unlock_iothread(); blk_mig_lock(); - pending += block_mig_state.submitted * BLOCK_SIZE + - block_mig_state.read_done * BLOCK_SIZE; + pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE + + block_mig_state.read_done * BLK_MIG_BLOCK_SIZE; blk_mig_unlock(); /* Report at least one block pending during bulk phase */ if (pending <= max_size && !block_mig_state.bulk_completed) { - pending = max_size + BLOCK_SIZE; + pending = max_size + BLK_MIG_BLOCK_SIZE; } DPRINTF("Enter save live pending %" PRIu64 "\n", pending); @@ -901,7 +902,7 @@ static int block_load(QEMUFile *f, void *opaque, int version_id) int nr_sectors; int ret; BlockDriverInfo bdi; - int cluster_size = BLOCK_SIZE; + int cluster_size = BLK_MIG_BLOCK_SIZE; do { addr = qemu_get_be64(f); @@ -939,11 +940,11 @@ static int block_load(QEMUFile *f, void *opaque, int version_id) ret = bdrv_get_info(blk_bs(blk), &bdi); if (ret == 0 && bdi.cluster_size > 0 && - bdi.cluster_size <= BLOCK_SIZE && - BLOCK_SIZE % bdi.cluster_size == 0) { + bdi.cluster_size <= BLK_MIG_BLOCK_SIZE && + BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) { cluster_size = bdi.cluster_size; } else { - cluster_size = BLOCK_SIZE; + cluster_size = BLK_MIG_BLOCK_SIZE; } } @@ -962,14 +963,14 @@ static int block_load(QEMUFile *f, void *opaque, int version_id) int64_t cur_addr; uint8_t *cur_buf; - buf = g_malloc(BLOCK_SIZE); - qemu_get_buffer(f, buf, BLOCK_SIZE); - for (i = 0; i < BLOCK_SIZE / cluster_size; i++) { + buf = g_malloc(BLK_MIG_BLOCK_SIZE); + qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE); + for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) { cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size; cur_buf = buf + i * cluster_size; if ((!block_mig_state.zero_blocks || - cluster_size < BLOCK_SIZE) && + cluster_size < BLK_MIG_BLOCK_SIZE) && buffer_is_zero(cur_buf, cluster_size)) { ret = blk_pwrite_zeroes(blk, cur_addr, cluster_size,