migration: Sync requested pages after postcopy recovery
We synchronize the requested pages right after a postcopy recovery happens. This helps to synchronize the prioritized pages on source so that the faulted threads can be served faster. Reported-by: Xiaohui Li <xiaohli@redhat.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Peter Xu <peterx@redhat.com> Message-Id: <20201021212721.440373-5-peterx@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
parent
8f8bfffcf1
commit
0c26781c09
@ -2010,6 +2010,49 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
|
||||
return LOADVM_QUIT;
|
||||
}
|
||||
|
||||
/* We must be with page_request_mutex held */
|
||||
static gboolean postcopy_sync_page_req(gpointer key, gpointer value,
|
||||
gpointer data)
|
||||
{
|
||||
MigrationIncomingState *mis = data;
|
||||
void *host_addr = (void *) key;
|
||||
ram_addr_t rb_offset;
|
||||
RAMBlock *rb;
|
||||
int ret;
|
||||
|
||||
rb = qemu_ram_block_from_host(host_addr, true, &rb_offset);
|
||||
if (!rb) {
|
||||
/*
|
||||
* This should _never_ happen. However be nice for a migrating VM to
|
||||
* not crash/assert. Post an error (note: intended to not use *_once
|
||||
* because we do want to see all the illegal addresses; and this can
|
||||
* never be triggered by the guest so we're safe) and move on next.
|
||||
*/
|
||||
error_report("%s: illegal host addr %p", __func__, host_addr);
|
||||
/* Try the next entry */
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset);
|
||||
if (ret) {
|
||||
/* Please refer to above comment. */
|
||||
error_report("%s: send rp message failed for addr %p",
|
||||
__func__, host_addr);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
trace_postcopy_page_req_sync(host_addr);
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis)
|
||||
{
|
||||
WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
|
||||
g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis);
|
||||
}
|
||||
}
|
||||
|
||||
static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis)
|
||||
{
|
||||
if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
|
||||
@ -2032,6 +2075,20 @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis)
|
||||
/* Tell source that "we are ready" */
|
||||
migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE);
|
||||
|
||||
/*
|
||||
* After a postcopy recovery, the source should have lost the postcopy
|
||||
* queue, or potentially the requested pages could have been lost during
|
||||
* the network down phase. Let's re-sync with the source VM by re-sending
|
||||
* all the pending pages that we eagerly need, so these threads won't get
|
||||
* blocked too long due to the recovery.
|
||||
*
|
||||
* Without this procedure, the faulted destination VM threads (waiting for
|
||||
* page requests right before the postcopy is interrupted) can keep hanging
|
||||
* until the pages are sent by the source during the background copying of
|
||||
* pages, or another thread faulted on the same address accidentally.
|
||||
*/
|
||||
migrate_send_rp_req_pages_pending(mis);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,7 @@ vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s"
|
||||
vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s"
|
||||
postcopy_pause_incoming(void) ""
|
||||
postcopy_pause_incoming_continued(void) ""
|
||||
postcopy_page_req_sync(void *host_addr) "sync page req %p"
|
||||
|
||||
# vmstate.c
|
||||
vmstate_load_field_error(const char *field, int ret) "field \"%s\" load failed, ret = %d"
|
||||
|
Loading…
Reference in New Issue
Block a user