migration: Stop sending whole pages through main channel

We have to flush() the QEMUFile because now we sent really few data
through that channel.

Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
Juan Quintela 2018-04-18 10:13:21 +02:00
parent 7a5cc33c48
commit 35374cbdff

View File

@ -1817,15 +1817,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
ram_addr_t offset)
{
uint8_t *p;
p = block->host + offset;
ram_counters.transferred += save_page_header(rs, rs->f, block,
offset | RAM_SAVE_FLAG_PAGE);
multifd_queue_page(block, offset);
qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
ram_counters.transferred += TARGET_PAGE_SIZE;
ram_counters.normal++;
return 1;
@ -3066,6 +3058,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
multifd_send_sync_main();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
return 0;
}
@ -3148,6 +3141,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
multifd_send_sync_main();
out:
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
ram_counters.transferred += 8;
ret = qemu_file_get_error(f);
@ -3201,6 +3195,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
multifd_send_sync_main();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
return 0;
}