Postcopy: Use helpers to map pages during migration

In postcopy, the destination guest is running at the same time
as it's receiving pages; as we receive new pages we must put
them into the guests address space atomically to avoid a running
CPU accessing a partially written page.

Use the helpers in postcopy-ram.c to map these pages.

qemu_get_buffer_in_place is used to avoid a copy out of qemu_file
in the case that postcopy is going to do a copy anyway.

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Dr. David Alan Gilbert 2015-11-05 18:11:11 +00:00 committed by Juan Quintela
parent 696ed9a9b3
commit a71808772a
2 changed files with 130 additions and 1 deletions

View File

@ -1932,6 +1932,14 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
/* Must be called from within a rcu critical section.
* Returns a pointer from within the RCU-protected ram_list.
*/
/*
* Read a RAMBlock ID from the stream f, find the host address of the
* start of that block and add on 'offset'
*
* f: Stream to read from
* offset: Offset within the block
* flags: Page flags (mostly to see if it's a continuation of previous block)
*/
static inline void *host_from_stream_offset(QEMUFile *f,
ram_addr_t offset,
int flags)
@ -2077,11 +2085,126 @@ int ram_postcopy_incoming_init(MigrationIncomingState *mis)
return postcopy_ram_incoming_init(mis, ram_pages);
}
/*
* Called in postcopy mode by ram_load().
* rcu_read_lock is taken prior to this being called.
*/
static int ram_load_postcopy(QEMUFile *f)
{
int flags = 0, ret = 0;
bool place_needed = false;
bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
MigrationIncomingState *mis = migration_incoming_get_current();
/* Temporary page that is later 'placed' */
void *postcopy_host_page = postcopy_get_tmp_page(mis);
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
ram_addr_t addr;
void *host = NULL;
void *page_buffer = NULL;
void *place_source = NULL;
uint8_t ch;
bool all_zero = false;
addr = qemu_get_be64(f);
flags = addr & ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
trace_ram_load_postcopy_loop((uint64_t)addr, flags);
place_needed = false;
if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
host = host_from_stream_offset(f, addr, flags);
if (!host) {
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
break;
}
page_buffer = host;
/*
* Postcopy requires that we place whole host pages atomically.
* To make it atomic, the data is read into a temporary page
* that's moved into place later.
* The migration protocol uses, possibly smaller, target-pages
* however the source ensures it always sends all the components
* of a host page in order.
*/
page_buffer = postcopy_host_page +
((uintptr_t)host & ~qemu_host_page_mask);
/* If all TP are zero then we can optimise the place */
if (!((uintptr_t)host & ~qemu_host_page_mask)) {
all_zero = true;
}
/*
* If it's the last part of a host page then we place the host
* page
*/
place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
~qemu_host_page_mask) == 0;
place_source = postcopy_host_page;
}
switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
case RAM_SAVE_FLAG_COMPRESS:
ch = qemu_get_byte(f);
memset(page_buffer, ch, TARGET_PAGE_SIZE);
if (ch) {
all_zero = false;
}
break;
case RAM_SAVE_FLAG_PAGE:
all_zero = false;
if (!place_needed || !matching_page_sizes) {
qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
} else {
/* Avoids the qemu_file copy during postcopy, which is
* going to do a copy later; can only do it when we
* do this read in one go (matching page sizes)
*/
qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
TARGET_PAGE_SIZE);
}
break;
case RAM_SAVE_FLAG_EOS:
/* normal exit */
break;
default:
error_report("Unknown combination of migration flags: %#x"
" (postcopy mode)", flags);
ret = -EINVAL;
}
if (place_needed) {
/* This gets called at the last target page in the host page */
if (all_zero) {
ret = postcopy_place_page_zero(mis,
host + TARGET_PAGE_SIZE -
qemu_host_page_size);
} else {
ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
qemu_host_page_size,
place_source);
}
}
if (!ret) {
ret = qemu_file_get_error(f);
}
}
return ret;
}
static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
int flags = 0, ret = 0;
static uint64_t seq_iter;
int len = 0;
/*
* If system is running in postcopy mode, page inserts to host memory must
* be atomic
*/
bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
seq_iter++;
@ -2095,7 +2218,12 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
* critical section.
*/
rcu_read_lock();
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
if (postcopy_running) {
ret = ram_load_postcopy(f);
}
while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
ram_addr_t addr, total_ram_bytes;
void *host = NULL;
uint8_t ch;

View File

@ -1257,6 +1257,7 @@ get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, uint64_t
migration_bitmap_sync_start(void) ""
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64""
migration_throttle(void) ""
ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x"
ram_postcopy_send_discard_bitmap(void) ""
ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: %zx len: %zx"