savevm: split save_live_setup from save_live_state

This patch splits stage 1 to its own function for both save_live
users, ram and block.  It is just a copy of the function, removing the
parts of the other stages.  Optimizations would came later.

Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Juan Quintela 2012-06-28 15:11:57 +02:00
parent 6bd6878133
commit d1315aac6e
4 changed files with 99 additions and 34 deletions

View File

@ -303,44 +303,86 @@ static void ram_migration_cancel(void *opaque)
#define MAX_WAIT 50 /* ms, half buffered_file limit */
static int ram_save_live(QEMUFile *f, int stage, void *opaque)
static int ram_save_setup(QEMUFile *f, void *opaque)
{
ram_addr_t addr;
uint64_t bytes_transferred_last;
RAMBlock *block;
double bwidth = 0;
int ret;
int i;
memory_global_sync_dirty_bitmap(get_system_memory());
if (stage == 1) {
RAMBlock *block;
bytes_transferred = 0;
last_block = NULL;
last_offset = 0;
sort_ram_list();
bytes_transferred = 0;
last_block = NULL;
last_offset = 0;
sort_ram_list();
/* Make sure all dirty bits are set */
QLIST_FOREACH(block, &ram_list.blocks, next) {
for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
}
/* Make sure all dirty bits are set */
QLIST_FOREACH(block, &ram_list.blocks, next) {
for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
}
}
memory_global_dirty_log_start();
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
QLIST_FOREACH(block, &ram_list.blocks, next) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
qemu_put_be64(f, block->length);
}
}
memory_global_dirty_log_start();
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
QLIST_FOREACH(block, &ram_list.blocks, next) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
qemu_put_be64(f, block->length);
}
bwidth = qemu_get_clock_ns(rt_clock);
i = 0;
while ((ret = qemu_file_rate_limit(f)) == 0) {
int bytes_sent;
bytes_sent = ram_save_block(f);
bytes_transferred += bytes_sent;
if (bytes_sent == 0) { /* no more blocks */
break;
}
/* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap.
qemu_get_clock_ns() is a bit expensive, so we only check each some
iterations
*/
if ((i & 63) == 0) {
uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
if (t1 > MAX_WAIT) {
DPRINTF("big wait: " PRIu64 " milliseconds, %d iterations\n",
t1, i);
break;
}
}
i++;
}
if (ret < 0) {
return ret;
}
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
}
static int ram_save_live(QEMUFile *f, int stage, void *opaque)
{
uint64_t bytes_transferred_last;
double bwidth = 0;
int ret;
int i;
memory_global_sync_dirty_bitmap(get_system_memory());
bytes_transferred_last = bytes_transferred;
bwidth = qemu_get_clock_ns(rt_clock);
@ -535,6 +577,7 @@ done:
}
SaveVMHandlers savevm_ram_handlers = {
.save_live_setup = ram_save_setup,
.save_live_state = ram_save_live,
.load_state = ram_load,
.cancel = ram_migration_cancel,

View File

@ -541,6 +541,33 @@ static void block_migration_cancel(void *opaque)
blk_mig_cleanup();
}
static int block_save_setup(QEMUFile *f, void *opaque)
{
int ret;
DPRINTF("Enter save live setup submitted %d transferred %d\n",
block_mig_state.submitted, block_mig_state.transferred);
init_blk_migration(f);
/* start track dirty blocks */
set_dirty_tracking(1);
flush_blks(f);
ret = qemu_file_get_error(f);
if (ret) {
blk_mig_cleanup();
return ret;
}
blk_mig_reset_dirty_cursor();
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
return 0;
}
static int block_save_live(QEMUFile *f, int stage, void *opaque)
{
int ret;
@ -548,13 +575,6 @@ static int block_save_live(QEMUFile *f, int stage, void *opaque)
DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
stage, block_mig_state.submitted, block_mig_state.transferred);
if (stage == 1) {
init_blk_migration(f);
/* start track dirty blocks */
set_dirty_tracking(1);
}
flush_blks(f);
ret = qemu_file_get_error(f);
@ -710,6 +730,7 @@ static bool block_is_active(void *opaque)
SaveVMHandlers savevm_block_handlers = {
.set_params = block_set_params,
.save_live_setup = block_save_setup,
.save_live_state = block_save_live,
.load_state = block_load,
.cancel = block_migration_cancel,

View File

@ -1573,7 +1573,7 @@ int qemu_savevm_state_begin(QEMUFile *f,
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
int len;
if (!se->ops || !se->ops->save_live_state) {
if (!se->ops || !se->ops->save_live_setup) {
continue;
}
if (se->ops && se->ops->is_active) {
@ -1593,7 +1593,7 @@ int qemu_savevm_state_begin(QEMUFile *f,
qemu_put_be32(f, se->instance_id);
qemu_put_be32(f, se->version_id);
ret = se->ops->save_live_state(f, QEMU_VM_SECTION_START, se->opaque);
ret = se->ops->save_live_setup(f, se->opaque);
if (ret < 0) {
qemu_savevm_state_cancel(f);
return ret;

View File

@ -32,6 +32,7 @@ typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
typedef struct SaveVMHandlers {
void (*set_params)(const MigrationParams *params, void * opaque);
SaveStateHandler *save_state;
int (*save_live_setup)(QEMUFile *f, void *opaque);
int (*save_live_state)(QEMUFile *f, int stage, void *opaque);
void (*cancel)(void *opaque);
LoadStateHandler *load_state;