2008-10-13 05:12:02 +02:00
|
|
|
/*
|
|
|
|
* QEMU live migration
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2008
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
2012-01-13 17:44:23 +01:00
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2008-10-13 05:12:02 +02:00
|
|
|
*/
|
|
|
|
|
2016-01-26 19:16:54 +01:00
|
|
|
#include "qemu/osdep.h"
|
2016-03-20 18:16:19 +01:00
|
|
|
#include "qemu/cutils.h"
|
2015-03-17 18:29:20 +01:00
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 07:23:50 +02:00
|
|
|
#include "qemu/main-loop.h"
|
2017-04-06 12:00:28 +02:00
|
|
|
#include "migration/blocker.h"
|
2017-04-05 15:54:10 +02:00
|
|
|
#include "exec.h"
|
2017-04-05 15:58:29 +02:00
|
|
|
#include "fd.h"
|
2023-09-08 16:22:10 +02:00
|
|
|
#include "file.h"
|
2017-04-05 17:40:11 +02:00
|
|
|
#include "socket.h"
|
2019-08-12 07:23:59 +02:00
|
|
|
#include "sysemu/runstate.h"
|
2019-08-12 07:23:57 +02:00
|
|
|
#include "sysemu/sysemu.h"
|
2020-06-29 11:35:03 +02:00
|
|
|
#include "sysemu/cpu-throttle.h"
|
2017-04-17 20:32:36 +02:00
|
|
|
#include "rdma.h"
|
2017-04-17 20:26:27 +02:00
|
|
|
#include "ram.h"
|
2023-04-20 11:59:50 +02:00
|
|
|
#include "ram-compress.h"
|
2017-04-24 18:53:30 +02:00
|
|
|
#include "migration/global_state.h"
|
2017-04-24 19:02:44 +02:00
|
|
|
#include "migration/misc.h"
|
2017-04-24 20:07:27 +02:00
|
|
|
#include "migration.h"
|
2023-04-26 19:04:06 +02:00
|
|
|
#include "migration-stats.h"
|
2017-04-20 14:48:46 +02:00
|
|
|
#include "savevm.h"
|
2017-04-20 18:52:18 +02:00
|
|
|
#include "qemu-file.h"
|
2022-12-20 19:44:18 +01:00
|
|
|
#include "channel.h"
|
2017-04-17 19:02:59 +02:00
|
|
|
#include "migration/vmstate.h"
|
2012-12-17 18:19:44 +01:00
|
|
|
#include "block/block.h"
|
2018-02-01 12:18:31 +01:00
|
|
|
#include "qapi/error.h"
|
2019-02-27 11:51:27 +01:00
|
|
|
#include "qapi/clone-visitor.h"
|
2020-08-20 17:07:23 +02:00
|
|
|
#include "qapi/qapi-visit-migration.h"
|
2019-02-27 11:51:27 +01:00
|
|
|
#include "qapi/qapi-visit-sockets.h"
|
2018-02-11 10:36:01 +01:00
|
|
|
#include "qapi/qapi-commands-migration.h"
|
|
|
|
#include "qapi/qapi-events-migration.h"
|
2015-03-17 17:22:46 +01:00
|
|
|
#include "qapi/qmp/qerror.h"
|
2018-02-01 12:18:36 +01:00
|
|
|
#include "qapi/qmp/qnull.h"
|
2015-07-09 08:55:38 +02:00
|
|
|
#include "qemu/rcu.h"
|
2017-04-21 14:31:22 +02:00
|
|
|
#include "block.h"
|
2017-04-20 13:12:24 +02:00
|
|
|
#include "postcopy-ram.h"
|
2012-07-23 05:45:29 +02:00
|
|
|
#include "qemu/thread.h"
|
2013-02-22 17:36:19 +01:00
|
|
|
#include "trace.h"
|
2017-04-24 20:50:19 +02:00
|
|
|
#include "exec/target_page.h"
|
2016-04-27 12:05:01 +02:00
|
|
|
#include "io/channel-buffer.h"
|
2022-07-07 20:55:13 +02:00
|
|
|
#include "io/channel-tls.h"
|
2016-10-27 08:42:52 +02:00
|
|
|
#include "migration/colo.h"
|
2017-06-27 06:10:18 +02:00
|
|
|
#include "hw/boards.h"
|
2017-06-27 06:10:19 +02:00
|
|
|
#include "monitor/monitor.h"
|
2019-02-27 14:24:05 +01:00
|
|
|
#include "net/announce.h"
|
2019-10-29 12:49:02 +01:00
|
|
|
#include "qemu/queue.h"
|
2020-01-22 16:16:07 +01:00
|
|
|
#include "multifd.h"
|
2023-02-03 08:35:19 +01:00
|
|
|
#include "threadinfo.h"
|
2020-12-28 16:08:52 +01:00
|
|
|
#include "qemu/yank.h"
|
2021-01-29 11:14:03 +01:00
|
|
|
#include "sysemu/cpus.h"
|
2021-07-22 19:58:41 +02:00
|
|
|
#include "yank_functions.h"
|
2021-12-20 15:53:14 +01:00
|
|
|
#include "sysemu/qtest.h"
|
2023-03-01 21:18:45 +01:00
|
|
|
#include "options.h"
|
2023-06-07 18:21:58 +02:00
|
|
|
#include "sysemu/dirtylimit.h"
|
2008-11-11 17:46:33 +01:00
|
|
|
|
2010-12-13 17:30:12 +01:00
|
|
|
static NotifierList migration_state_notifiers =
|
|
|
|
NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
|
|
|
|
|
2017-04-24 17:37:14 +02:00
|
|
|
/* Messages sent on the return path from destination to source */
|
|
|
|
enum mig_rp_message_type {
|
|
|
|
MIG_RP_MSG_INVALID = 0, /* Must be 0 */
|
|
|
|
MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */
|
|
|
|
MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */
|
|
|
|
|
|
|
|
MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
|
|
|
|
MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */
|
2018-05-02 12:47:28 +02:00
|
|
|
MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */
|
2018-05-02 12:47:30 +02:00
|
|
|
MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */
|
2023-06-21 13:11:55 +02:00
|
|
|
MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */
|
2017-04-24 17:37:14 +02:00
|
|
|
|
|
|
|
MIG_RP_MSG_MAX
|
|
|
|
};
|
|
|
|
|
2011-10-05 13:50:43 +02:00
|
|
|
/* When we add fault tolerance, we could have several
|
|
|
|
migrations at once. For now we don't need to add
|
|
|
|
dynamic creation of migration */
|
|
|
|
|
2017-06-27 06:10:13 +02:00
|
|
|
static MigrationState *current_migration;
|
2018-05-02 12:47:35 +02:00
|
|
|
static MigrationIncomingState *current_incoming;
|
2017-06-27 06:10:13 +02:00
|
|
|
|
2021-02-02 14:55:21 +01:00
|
|
|
static GSList *migration_blockers;
|
|
|
|
|
2017-07-18 05:39:06 +02:00
|
|
|
static bool migration_object_check(MigrationState *ms, Error **errp);
|
2017-10-20 11:05:56 +02:00
|
|
|
static int migration_maybe_pause(MigrationState *s,
|
|
|
|
int *current_active_state,
|
|
|
|
int new_state);
|
2019-02-27 17:49:00 +01:00
|
|
|
static void migrate_fd_cancel(MigrationState *s);
|
2023-08-04 11:30:53 +02:00
|
|
|
static int close_return_path_on_source(MigrationState *s);
|
2017-07-18 05:39:06 +02:00
|
|
|
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 21:28:10 +01:00
|
|
|
static bool migration_needs_multiple_sockets(void)
|
|
|
|
{
|
2023-03-01 22:10:29 +01:00
|
|
|
return migrate_multifd() || migrate_postcopy_preempt();
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 21:28:10 +01:00
|
|
|
}
|
2022-03-31 17:08:42 +02:00
|
|
|
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 21:28:10 +01:00
|
|
|
static bool uri_supports_multi_channels(const char *uri)
|
2022-03-31 17:08:42 +02:00
|
|
|
{
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 21:28:10 +01:00
|
|
|
return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) ||
|
|
|
|
strstart(uri, "vsock:", NULL);
|
2022-03-31 17:08:42 +02:00
|
|
|
}
|
|
|
|
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 21:28:10 +01:00
|
|
|
static bool
|
|
|
|
migration_channels_and_uri_compatible(const char *uri, Error **errp)
|
2022-03-31 17:08:42 +02:00
|
|
|
{
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 21:28:10 +01:00
|
|
|
if (migration_needs_multiple_sockets() &&
|
|
|
|
!uri_supports_multi_channels(uri)) {
|
|
|
|
error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2022-03-31 17:08:42 +02:00
|
|
|
}
|
|
|
|
|
2020-10-21 23:27:18 +02:00
|
|
|
static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
|
|
|
|
{
|
|
|
|
uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp;
|
|
|
|
|
|
|
|
return (a > b) - (a < b);
|
|
|
|
}
|
|
|
|
|
2017-06-27 06:10:13 +02:00
|
|
|
void migration_object_init(void)
|
|
|
|
{
|
|
|
|
/* This can only be called once. */
|
|
|
|
assert(!current_migration);
|
|
|
|
current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
|
2017-06-27 06:10:18 +02:00
|
|
|
|
2018-05-02 12:47:35 +02:00
|
|
|
/*
|
|
|
|
* Init the migrate incoming object as well no matter whether
|
|
|
|
* we'll use it or not.
|
|
|
|
*/
|
|
|
|
assert(!current_incoming);
|
|
|
|
current_incoming = g_new0(MigrationIncomingState, 1);
|
|
|
|
current_incoming->state = MIGRATION_STATUS_NONE;
|
|
|
|
current_incoming->postcopy_remote_fds =
|
|
|
|
g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
|
|
|
|
qemu_mutex_init(¤t_incoming->rp_mutex);
|
2022-07-07 20:55:06 +02:00
|
|
|
qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex);
|
2018-05-02 12:47:35 +02:00
|
|
|
qemu_event_init(¤t_incoming->main_thread_load_event, false);
|
|
|
|
qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0);
|
|
|
|
qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0);
|
2022-07-07 20:55:06 +02:00
|
|
|
qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0);
|
2023-02-08 21:28:13 +01:00
|
|
|
qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0);
|
|
|
|
|
2020-10-21 23:27:18 +02:00
|
|
|
qemu_mutex_init(¤t_incoming->page_request_mutex);
|
2023-09-18 19:28:15 +02:00
|
|
|
qemu_cond_init(¤t_incoming->page_request_cond);
|
2020-10-21 23:27:18 +02:00
|
|
|
current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
|
2018-05-02 12:47:35 +02:00
|
|
|
|
2021-07-20 14:53:53 +02:00
|
|
|
migration_object_check(current_migration, &error_fatal);
|
2020-10-21 11:25:58 +02:00
|
|
|
|
|
|
|
blk_mig_init();
|
|
|
|
ram_mig_init();
|
|
|
|
dirty_bitmap_mig_init();
|
2017-06-27 06:10:13 +02:00
|
|
|
}
|
|
|
|
|
2021-09-29 16:43:10 +02:00
|
|
|
void migration_cancel(const Error *error)
|
2021-04-29 13:27:02 +02:00
|
|
|
{
|
2021-09-29 16:43:10 +02:00
|
|
|
if (error) {
|
|
|
|
migrate_set_error(current_migration, error);
|
|
|
|
}
|
2023-06-07 18:12:40 +02:00
|
|
|
if (migrate_dirty_limit()) {
|
|
|
|
qmp_cancel_vcpu_dirty_limit(false, -1, NULL);
|
|
|
|
}
|
2021-04-29 13:27:02 +02:00
|
|
|
migrate_fd_cancel(current_migration);
|
|
|
|
}
|
|
|
|
|
2019-02-27 17:49:00 +01:00
|
|
|
void migration_shutdown(void)
|
2017-12-28 10:16:16 +01:00
|
|
|
{
|
2021-11-11 03:11:33 +01:00
|
|
|
/*
|
|
|
|
* When the QEMU main thread exit, the COLO thread
|
|
|
|
* may wait a semaphore. So, we should wakeup the
|
|
|
|
* COLO thread before migration shutdown.
|
|
|
|
*/
|
|
|
|
colo_shutdown();
|
2019-02-27 17:49:00 +01:00
|
|
|
/*
|
|
|
|
* Cancel the current migration - that will (eventually)
|
|
|
|
* stop the migration using this structure
|
|
|
|
*/
|
2021-09-29 16:43:10 +02:00
|
|
|
migration_cancel(NULL);
|
2017-12-28 10:16:16 +01:00
|
|
|
object_unref(OBJECT(current_migration));
|
2020-07-27 21:42:31 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancel outgoing migration of dirty bitmaps. It should
|
|
|
|
* at least unref used block nodes.
|
|
|
|
*/
|
|
|
|
dirty_bitmap_mig_cancel_outgoing();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancel incoming migration of dirty bitmaps. Dirty bitmaps
|
|
|
|
* are non-critical data, and their loss never considered as
|
|
|
|
* something serious.
|
|
|
|
*/
|
|
|
|
dirty_bitmap_mig_cancel_incoming();
|
2017-12-28 10:16:16 +01:00
|
|
|
}
|
|
|
|
|
2015-05-21 14:24:14 +02:00
|
|
|
/* For outgoing */
|
2012-08-13 09:42:49 +02:00
|
|
|
MigrationState *migrate_get_current(void)
|
2011-10-05 13:50:43 +02:00
|
|
|
{
|
2017-06-27 06:10:13 +02:00
|
|
|
/* This can only be called after the object created. */
|
|
|
|
assert(current_migration);
|
|
|
|
return current_migration;
|
2011-10-05 13:50:43 +02:00
|
|
|
}
|
|
|
|
|
2015-05-21 14:24:14 +02:00
|
|
|
MigrationIncomingState *migration_incoming_get_current(void)
|
|
|
|
{
|
2018-05-02 12:47:35 +02:00
|
|
|
assert(current_incoming);
|
|
|
|
return current_incoming;
|
2015-05-21 14:24:14 +02:00
|
|
|
}
|
|
|
|
|
2022-03-01 09:39:14 +01:00
|
|
|
void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
|
|
|
|
{
|
|
|
|
if (mis->socket_address_list) {
|
|
|
|
qapi_free_SocketAddressList(mis->socket_address_list);
|
|
|
|
mis->socket_address_list = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mis->transport_cleanup) {
|
|
|
|
mis->transport_cleanup(mis->transport_data);
|
|
|
|
mis->transport_data = mis->transport_cleanup = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-21 14:24:14 +02:00
|
|
|
void migration_incoming_state_destroy(void)
|
|
|
|
{
|
2017-01-23 22:32:06 +01:00
|
|
|
struct MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
2023-02-10 07:36:31 +01:00
|
|
|
multifd_load_cleanup();
|
2023-04-20 11:59:50 +02:00
|
|
|
compress_threads_load_cleanup();
|
2023-02-10 07:36:31 +01:00
|
|
|
|
2017-05-19 08:43:29 +02:00
|
|
|
if (mis->to_src_file) {
|
2017-05-19 08:43:30 +02:00
|
|
|
/* Tell source that we are done */
|
|
|
|
migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
|
2017-05-19 08:43:29 +02:00
|
|
|
qemu_fclose(mis->to_src_file);
|
|
|
|
mis->to_src_file = NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-19 08:43:30 +02:00
|
|
|
if (mis->from_src_file) {
|
2021-07-22 19:58:41 +02:00
|
|
|
migration_ioc_unregister_yank_from_file(mis->from_src_file);
|
2017-05-19 08:43:30 +02:00
|
|
|
qemu_fclose(mis->from_src_file);
|
|
|
|
mis->from_src_file = NULL;
|
|
|
|
}
|
2018-03-12 18:21:04 +01:00
|
|
|
if (mis->postcopy_remote_fds) {
|
|
|
|
g_array_free(mis->postcopy_remote_fds, TRUE);
|
|
|
|
mis->postcopy_remote_fds = NULL;
|
|
|
|
}
|
2017-05-19 08:43:30 +02:00
|
|
|
|
2022-03-01 09:39:14 +01:00
|
|
|
migration_incoming_transport_cleanup(mis);
|
2020-09-28 16:48:30 +02:00
|
|
|
qemu_event_reset(&mis->main_thread_load_event);
|
|
|
|
|
2020-10-21 23:27:18 +02:00
|
|
|
if (mis->page_requested) {
|
|
|
|
g_tree_destroy(mis->page_requested);
|
|
|
|
mis->page_requested = NULL;
|
|
|
|
}
|
|
|
|
|
2022-07-07 20:55:02 +02:00
|
|
|
if (mis->postcopy_qemufile_dst) {
|
|
|
|
migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst);
|
|
|
|
qemu_fclose(mis->postcopy_qemufile_dst);
|
|
|
|
mis->postcopy_qemufile_dst = NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-28 16:08:52 +01:00
|
|
|
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
2015-05-21 14:24:14 +02:00
|
|
|
}
|
|
|
|
|
2015-07-07 14:44:05 +02:00
|
|
|
static void migrate_generate_event(int new_state)
|
|
|
|
{
|
2023-03-01 22:08:09 +01:00
|
|
|
if (migrate_events()) {
|
2018-08-15 15:37:37 +02:00
|
|
|
qapi_event_send_migration(new_state);
|
2015-07-07 14:44:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-24 17:37:14 +02:00
|
|
|
/*
|
|
|
|
* Send a message on the return channel back to the source
|
|
|
|
* of the migration.
|
|
|
|
*/
|
2018-02-08 11:31:12 +01:00
|
|
|
static int migrate_send_rp_message(MigrationIncomingState *mis,
|
|
|
|
enum mig_rp_message_type message_type,
|
|
|
|
uint16_t len, void *data)
|
2017-04-24 17:37:14 +02:00
|
|
|
{
|
2018-02-08 11:31:12 +01:00
|
|
|
int ret = 0;
|
|
|
|
|
2017-04-24 17:37:14 +02:00
|
|
|
trace_migrate_send_rp_message((int)message_type, len);
|
2021-03-11 04:15:35 +01:00
|
|
|
QEMU_LOCK_GUARD(&mis->rp_mutex);
|
2018-02-08 11:31:12 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It's possible that the file handle got lost due to network
|
|
|
|
* failures.
|
|
|
|
*/
|
|
|
|
if (!mis->to_src_file) {
|
|
|
|
ret = -EIO;
|
2021-03-11 04:15:35 +01:00
|
|
|
return ret;
|
2018-02-08 11:31:12 +01:00
|
|
|
}
|
|
|
|
|
2017-04-24 17:37:14 +02:00
|
|
|
qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
|
|
|
|
qemu_put_be16(mis->to_src_file, len);
|
|
|
|
qemu_put_buffer(mis->to_src_file, data, len);
|
|
|
|
qemu_fflush(mis->to_src_file);
|
2018-02-08 11:31:12 +01:00
|
|
|
|
|
|
|
/* It's possible that qemu file got error during sending */
|
|
|
|
ret = qemu_file_get_error(mis->to_src_file);
|
|
|
|
|
|
|
|
return ret;
|
2017-04-24 17:37:14 +02:00
|
|
|
}
|
|
|
|
|
2020-09-08 22:30:18 +02:00
|
|
|
/* Request one page from the source VM at the given start address.
|
|
|
|
* rb: the RAMBlock to request the page in
|
2015-11-05 19:11:07 +01:00
|
|
|
* Start: Address offset within the RB
|
|
|
|
* Len: Length in bytes required - must be a multiple of pagesize
|
|
|
|
*/
|
2020-10-21 23:27:17 +02:00
|
|
|
int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
|
|
|
|
RAMBlock *rb, ram_addr_t start)
|
2015-11-05 19:11:07 +01:00
|
|
|
{
|
2016-03-23 15:59:57 +01:00
|
|
|
uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
|
2015-11-05 19:11:07 +01:00
|
|
|
size_t msglen = 12; /* start + len */
|
2020-09-08 22:30:18 +02:00
|
|
|
size_t len = qemu_ram_pagesize(rb);
|
2018-02-08 11:31:12 +01:00
|
|
|
enum mig_rp_message_type msg_type;
|
2020-09-08 22:30:18 +02:00
|
|
|
const char *rbname;
|
|
|
|
int rbname_len;
|
2015-11-05 19:11:07 +01:00
|
|
|
|
|
|
|
*(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
|
|
|
|
*(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
|
|
|
|
|
2020-09-08 22:30:18 +02:00
|
|
|
/*
|
|
|
|
* We maintain the last ramblock that we requested for page. Note that we
|
|
|
|
* don't need locking because this function will only be called within the
|
|
|
|
* postcopy ram fault thread.
|
|
|
|
*/
|
|
|
|
if (rb != mis->last_rb) {
|
|
|
|
mis->last_rb = rb;
|
|
|
|
|
|
|
|
rbname = qemu_ram_get_idstr(rb);
|
|
|
|
rbname_len = strlen(rbname);
|
|
|
|
|
2015-11-05 19:11:07 +01:00
|
|
|
assert(rbname_len < 256);
|
|
|
|
|
|
|
|
bufc[msglen++] = rbname_len;
|
|
|
|
memcpy(bufc + msglen, rbname, rbname_len);
|
|
|
|
msglen += rbname_len;
|
2018-02-08 11:31:12 +01:00
|
|
|
msg_type = MIG_RP_MSG_REQ_PAGES_ID;
|
2015-11-05 19:11:07 +01:00
|
|
|
} else {
|
2018-02-08 11:31:12 +01:00
|
|
|
msg_type = MIG_RP_MSG_REQ_PAGES;
|
2015-11-05 19:11:07 +01:00
|
|
|
}
|
2018-02-08 11:31:12 +01:00
|
|
|
|
|
|
|
return migrate_send_rp_message(mis, msg_type, msglen, bufc);
|
2015-11-05 19:11:07 +01:00
|
|
|
}
|
|
|
|
|
2020-10-21 23:27:17 +02:00
|
|
|
int migrate_send_rp_req_pages(MigrationIncomingState *mis,
|
2020-10-21 23:27:18 +02:00
|
|
|
RAMBlock *rb, ram_addr_t start, uint64_t haddr)
|
2020-10-21 23:27:17 +02:00
|
|
|
{
|
2021-10-11 19:53:44 +02:00
|
|
|
void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
|
2020-11-11 15:22:03 +01:00
|
|
|
bool received = false;
|
2020-10-21 23:27:18 +02:00
|
|
|
|
|
|
|
WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
|
|
|
|
received = ramblock_recv_bitmap_test_byte_offset(rb, start);
|
|
|
|
if (!received && !g_tree_lookup(mis->page_requested, aligned)) {
|
|
|
|
/*
|
|
|
|
* The page has not been received, and it's not yet in the page
|
|
|
|
* request list. Queue it. Set the value of element to 1, so that
|
|
|
|
* things like g_tree_lookup() will return TRUE (1) when found.
|
|
|
|
*/
|
|
|
|
g_tree_insert(mis->page_requested, aligned, (gpointer)1);
|
2023-09-18 19:28:15 +02:00
|
|
|
qatomic_inc(&mis->page_requested_count);
|
2020-10-21 23:27:18 +02:00
|
|
|
trace_postcopy_page_req_add(aligned, mis->page_requested_count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the page is there, skip sending the message. We don't even need the
|
|
|
|
* lock because as long as the page arrived, it'll be there forever.
|
|
|
|
*/
|
|
|
|
if (received) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-21 23:27:17 +02:00
|
|
|
return migrate_send_rp_message_req_pages(mis, rb, start);
|
|
|
|
}
|
|
|
|
|
2018-09-03 06:38:47 +02:00
|
|
|
static bool migration_colo_enabled;
|
|
|
|
bool migration_incoming_colo_enabled(void)
|
|
|
|
{
|
|
|
|
return migration_colo_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
void migration_incoming_disable_colo(void)
|
|
|
|
{
|
2020-06-26 09:22:36 +02:00
|
|
|
ram_block_discard_disable(false);
|
2018-09-03 06:38:47 +02:00
|
|
|
migration_colo_enabled = false;
|
|
|
|
}
|
|
|
|
|
2020-06-26 09:22:36 +02:00
|
|
|
int migration_incoming_enable_colo(void)
|
2018-09-03 06:38:47 +02:00
|
|
|
{
|
2023-04-28 21:49:21 +02:00
|
|
|
#ifndef CONFIG_REPLICATION
|
|
|
|
error_report("ENABLE_COLO command come in migration stream, but COLO "
|
|
|
|
"module is not built in");
|
|
|
|
return -ENOTSUP;
|
|
|
|
#endif
|
|
|
|
|
2023-04-28 21:49:28 +02:00
|
|
|
if (!migrate_colo()) {
|
|
|
|
error_report("ENABLE_COLO command come in migration stream, but c-colo "
|
|
|
|
"capability is not set");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-06-26 09:22:36 +02:00
|
|
|
if (ram_block_discard_disable(true)) {
|
|
|
|
error_report("COLO: cannot disable RAM discard");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2018-09-03 06:38:47 +02:00
|
|
|
migration_colo_enabled = true;
|
2020-06-26 09:22:36 +02:00
|
|
|
return 0;
|
2018-09-03 06:38:47 +02:00
|
|
|
}
|
|
|
|
|
2019-02-27 11:51:27 +01:00
|
|
|
void migrate_add_address(SocketAddress *address)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
2020-11-13 02:13:37 +01:00
|
|
|
QAPI_LIST_PREPEND(mis->socket_address_list,
|
|
|
|
QAPI_CLONE(SocketAddress, address));
|
2019-02-27 11:51:27 +01:00
|
|
|
}
|
|
|
|
|
2020-10-27 09:22:57 +01:00
|
|
|
static void qemu_start_incoming_migration(const char *uri, Error **errp)
|
2008-10-13 05:12:02 +02:00
|
|
|
{
|
2020-08-06 09:40:29 +02:00
|
|
|
const char *p = NULL;
|
2023-07-12 21:07:40 +02:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2008-10-13 05:14:31 +02:00
|
|
|
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 21:28:10 +01:00
|
|
|
/* URI is not suitable for migration? */
|
|
|
|
if (!migration_channels_and_uri_compatible(uri, errp)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-07-12 21:07:40 +02:00
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
|
|
|
|
MIGRATION_STATUS_SETUP);
|
|
|
|
|
2020-10-27 09:22:57 +01:00
|
|
|
if (strstart(uri, "tcp:", &p) ||
|
|
|
|
strstart(uri, "unix:", NULL) ||
|
|
|
|
strstart(uri, "vsock:", NULL)) {
|
2020-08-06 09:40:29 +02:00
|
|
|
socket_start_incoming_migration(p ? p : uri, errp);
|
2013-07-22 16:01:54 +02:00
|
|
|
#ifdef CONFIG_RDMA
|
2015-02-19 12:40:27 +01:00
|
|
|
} else if (strstart(uri, "rdma:", &p)) {
|
2023-06-13 16:57:42 +02:00
|
|
|
if (migrate_compress()) {
|
|
|
|
error_setg(errp, "RDMA and compression can't be used together");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (migrate_xbzrle()) {
|
|
|
|
error_setg(errp, "RDMA and XBZRLE can't be used together");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (migrate_multifd()) {
|
|
|
|
error_setg(errp, "RDMA and multifd can't be used together");
|
|
|
|
return;
|
|
|
|
}
|
2013-07-22 16:01:54 +02:00
|
|
|
rdma_start_incoming_migration(p, errp);
|
|
|
|
#endif
|
2015-02-19 12:40:27 +01:00
|
|
|
} else if (strstart(uri, "exec:", &p)) {
|
2012-10-02 18:21:18 +02:00
|
|
|
exec_start_incoming_migration(p, errp);
|
2015-02-19 12:40:27 +01:00
|
|
|
} else if (strstart(uri, "fd:", &p)) {
|
2012-10-02 18:21:18 +02:00
|
|
|
fd_start_incoming_migration(p, errp);
|
2023-09-08 16:22:10 +02:00
|
|
|
} else if (strstart(uri, "file:", &p)) {
|
|
|
|
file_start_incoming_migration(p, errp);
|
2015-02-19 12:40:27 +01:00
|
|
|
} else {
|
error: Strip trailing '\n' from error string arguments (again)
Commit 6daf194d and be62a2eb got rid of a bunch, but they keep coming
back. Tracked down with this Coccinelle semantic patch:
@r@
expression err, eno, cls, fmt;
position p;
@@
(
error_report(fmt, ...)@p
|
error_set(err, cls, fmt, ...)@p
|
error_set_errno(err, eno, cls, fmt, ...)@p
|
error_setg(err, fmt, ...)@p
|
error_setg_errno(err, eno, fmt, ...)@p
)
@script:python@
fmt << r.fmt;
p << r.p;
@@
if "\\n" in str(fmt):
print "%s:%s:%s:%s" % (p[0].file, p[0].line, p[0].column, fmt)
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-id: 1360354939-10994-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-02-08 21:22:16 +01:00
|
|
|
error_setg(errp, "unknown migration protocol: %s", uri);
|
2010-06-09 14:10:54 +02:00
|
|
|
}
|
2008-10-13 05:12:02 +02:00
|
|
|
}
|
|
|
|
|
2016-02-24 09:53:38 +01:00
|
|
|
static void process_incoming_migration_bh(void *opaque)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
|
|
|
MigrationIncomingState *mis = opaque;
|
|
|
|
|
2018-04-16 19:09:30 +02:00
|
|
|
/* If capability late_block_activate is set:
|
|
|
|
* Only fire up the block code now if we're going to restart the
|
|
|
|
* VM, else 'cont' will do it.
|
|
|
|
* This causes file locking to happen; so we don't want it to happen
|
|
|
|
* unless we really are starting the VM.
|
|
|
|
*/
|
|
|
|
if (!migrate_late_block_activate() ||
|
|
|
|
(autostart && (!global_state_received() ||
|
|
|
|
global_state_get_runstate() == RUN_STATE_RUNNING))) {
|
2022-02-09 11:54:51 +01:00
|
|
|
/* Make sure all file formats throw away their mutable metadata.
|
2018-04-16 19:09:30 +02:00
|
|
|
* If we get an error here, just don't restart the VM yet. */
|
2022-02-09 11:54:51 +01:00
|
|
|
bdrv_activate_all(&local_err);
|
2018-04-16 19:09:30 +02:00
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
local_err = NULL;
|
|
|
|
autostart = false;
|
|
|
|
}
|
2017-04-04 17:29:03 +02:00
|
|
|
}
|
|
|
|
|
2016-02-24 09:53:38 +01:00
|
|
|
/*
|
|
|
|
* This must happen after all error conditions are dealt with and
|
|
|
|
* we're sure the VM is going to be running on this host.
|
|
|
|
*/
|
2019-02-27 14:24:08 +01:00
|
|
|
qemu_announce_self(&mis->announce_timer, migrate_announce_params());
|
2016-02-24 09:53:38 +01:00
|
|
|
|
2023-02-10 07:36:31 +01:00
|
|
|
multifd_load_shutdown();
|
2016-02-24 09:53:38 +01:00
|
|
|
|
2018-03-13 20:34:01 +01:00
|
|
|
dirty_bitmap_mig_before_vm_start();
|
|
|
|
|
2016-02-24 09:53:38 +01:00
|
|
|
if (!global_state_received() ||
|
|
|
|
global_state_get_runstate() == RUN_STATE_RUNNING) {
|
|
|
|
if (autostart) {
|
|
|
|
vm_start();
|
|
|
|
} else {
|
|
|
|
runstate_set(RUN_STATE_PAUSED);
|
|
|
|
}
|
2019-03-03 15:50:17 +01:00
|
|
|
} else if (migration_incoming_colo_enabled()) {
|
|
|
|
migration_incoming_disable_colo();
|
|
|
|
vm_start();
|
2016-02-24 09:53:38 +01:00
|
|
|
} else {
|
|
|
|
runstate_set(global_state_get_runstate());
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* This must happen after any state changes since as soon as an external
|
|
|
|
* observer sees this event they might start to prod at the VM assuming
|
|
|
|
* it's ready to use.
|
|
|
|
*/
|
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_COMPLETED);
|
|
|
|
qemu_bh_delete(mis->bh);
|
|
|
|
migration_incoming_state_destroy();
|
|
|
|
}
|
|
|
|
|
2022-09-22 10:49:23 +02:00
|
|
|
static void coroutine_fn
|
|
|
|
process_incoming_migration_co(void *opaque)
|
2010-06-09 14:10:55 +02:00
|
|
|
{
|
2017-01-23 22:32:06 +01:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2015-11-05 19:11:21 +01:00
|
|
|
PostcopyState ps;
|
2012-08-07 10:51:51 +02:00
|
|
|
int ret;
|
|
|
|
|
2017-07-24 12:42:02 +02:00
|
|
|
assert(mis->from_src_file);
|
2023-04-20 11:59:50 +02:00
|
|
|
|
|
|
|
if (compress_threads_load_setup(mis->from_src_file)) {
|
|
|
|
error_report("Failed to setup decompress threads");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2017-02-24 19:28:34 +01:00
|
|
|
mis->largest_page_size = qemu_ram_pagesize_largest();
|
2015-11-05 19:10:52 +01:00
|
|
|
postcopy_state_set(POSTCOPY_INCOMING_NONE);
|
2023-07-12 21:07:40 +02:00
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP,
|
2015-12-16 12:47:34 +01:00
|
|
|
MIGRATION_STATUS_ACTIVE);
|
2023-05-15 15:06:39 +02:00
|
|
|
|
|
|
|
mis->loadvm_co = qemu_coroutine_self();
|
2017-07-24 12:42:02 +02:00
|
|
|
ret = qemu_loadvm_state(mis->from_src_file);
|
2023-05-15 15:06:39 +02:00
|
|
|
mis->loadvm_co = NULL;
|
2015-05-21 14:24:14 +02:00
|
|
|
|
2015-11-05 19:11:21 +01:00
|
|
|
ps = postcopy_state_get();
|
|
|
|
trace_process_incoming_migration_co_end(ret, ps);
|
|
|
|
if (ps != POSTCOPY_INCOMING_NONE) {
|
|
|
|
if (ps == POSTCOPY_INCOMING_ADVISE) {
|
|
|
|
/*
|
|
|
|
* Where a migration had postcopy enabled (and thus went to advise)
|
|
|
|
* but managed to complete within the precopy period, we can use
|
|
|
|
* the normal exit.
|
|
|
|
*/
|
|
|
|
postcopy_ram_incoming_cleanup(mis);
|
|
|
|
} else if (ret >= 0) {
|
|
|
|
/*
|
|
|
|
* Postcopy was started, cleanup should happen at the end of the
|
|
|
|
* postcopy thread.
|
|
|
|
*/
|
|
|
|
trace_process_incoming_migration_co_postcopy_end_main();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Else if something went wrong then just fall out of the normal exit */
|
|
|
|
}
|
|
|
|
|
2023-04-28 21:49:24 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
error_report("load of migration failed: %s", strerror(-ret));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2023-05-15 15:06:40 +02:00
|
|
|
if (colo_incoming_co() < 0) {
|
|
|
|
goto fail;
|
2016-10-27 08:42:55 +02:00
|
|
|
}
|
|
|
|
|
2016-02-24 09:53:38 +01:00
|
|
|
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
|
|
|
|
qemu_bh_schedule(mis->bh);
|
2019-01-13 15:08:49 +01:00
|
|
|
return;
|
|
|
|
fail:
|
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
qemu_fclose(mis->from_src_file);
|
2023-02-10 07:36:28 +01:00
|
|
|
|
|
|
|
multifd_load_cleanup();
|
2023-04-20 11:59:50 +02:00
|
|
|
compress_threads_load_cleanup();
|
2023-02-10 07:36:28 +01:00
|
|
|
|
2019-01-13 15:08:49 +01:00
|
|
|
exit(EXIT_FAILURE);
|
2010-06-09 14:10:55 +02:00
|
|
|
}
|
|
|
|
|
2019-06-12 11:44:19 +02:00
|
|
|
/**
|
2021-07-20 14:54:02 +02:00
|
|
|
* migration_incoming_setup: Setup incoming migration
|
2019-06-12 11:44:19 +02:00
|
|
|
* @f: file for main migration channel
|
|
|
|
* @errp: where to put errors
|
2021-07-20 14:54:02 +02:00
|
|
|
*
|
|
|
|
* Returns: %true on success, %false on error.
|
2019-06-12 11:44:19 +02:00
|
|
|
*/
|
2021-07-20 14:54:02 +02:00
|
|
|
static bool migration_incoming_setup(QEMUFile *f, Error **errp)
|
2012-08-07 10:57:43 +02:00
|
|
|
{
|
2017-07-24 12:42:02 +02:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2012-08-07 10:57:43 +02:00
|
|
|
|
2017-07-24 12:42:02 +02:00
|
|
|
if (!mis->from_src_file) {
|
|
|
|
mis->from_src_file = f;
|
|
|
|
}
|
2016-04-27 12:04:56 +02:00
|
|
|
qemu_file_set_blocking(f, false);
|
2021-07-20 14:54:02 +02:00
|
|
|
return true;
|
2017-07-17 12:30:25 +02:00
|
|
|
}
|
|
|
|
|
2018-03-07 08:40:52 +01:00
|
|
|
void migration_incoming_process(void)
|
2017-07-17 12:30:25 +02:00
|
|
|
{
|
|
|
|
Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
|
coroutine: move entry argument to qemu_coroutine_create
In practice the entry argument is always known at creation time, and
it is confusing that sometimes qemu_coroutine_enter is used with a
non-NULL argument to re-enter a coroutine (this happens in
block/sheepdog.c and tests/test-coroutine.c). So pass the opaque value
at creation time, for consistency with e.g. aio_bh_new.
Mostly done with the following semantic patch:
@ entry1 @
expression entry, arg, co;
@@
- co = qemu_coroutine_create(entry);
+ co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry2 @
expression entry, arg;
identifier co;
@@
- Coroutine *co = qemu_coroutine_create(entry);
+ Coroutine *co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry3 @
expression entry, arg;
@@
- qemu_coroutine_enter(qemu_coroutine_create(entry), arg);
+ qemu_coroutine_enter(qemu_coroutine_create(entry, arg));
@ reentry @
expression co;
@@
- qemu_coroutine_enter(co, NULL);
+ qemu_coroutine_enter(co);
except for the aforementioned few places where the semantic patch
stumbled (as expected) and for test_co_queue, which would otherwise
produce an uninitialized variable warning.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-07-04 19:10:01 +02:00
|
|
|
qemu_coroutine_enter(co);
|
2012-08-07 10:57:43 +02:00
|
|
|
}
|
|
|
|
|
2018-06-27 15:22:45 +02:00
|
|
|
/* Returns true if recovered from a paused migration, otherwise false */
|
2022-03-31 17:08:44 +02:00
|
|
|
static bool postcopy_try_recover(void)
|
2017-07-17 12:30:25 +02:00
|
|
|
{
|
2018-05-02 12:47:26 +02:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
|
|
|
if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
|
|
|
|
/* Resumed from a paused postcopy migration */
|
|
|
|
|
2022-03-31 17:08:44 +02:00
|
|
|
/* This should be set already in migration_incoming_setup() */
|
|
|
|
assert(mis->from_src_file);
|
2018-05-02 12:47:26 +02:00
|
|
|
/* Postcopy has standalone thread to do vm load */
|
2022-03-31 17:08:44 +02:00
|
|
|
qemu_file_set_blocking(mis->from_src_file, true);
|
2018-05-02 12:47:26 +02:00
|
|
|
|
|
|
|
/* Re-configure the return path */
|
2022-03-31 17:08:44 +02:00
|
|
|
mis->to_src_file = qemu_file_get_return_path(mis->from_src_file);
|
2018-05-02 12:47:26 +02:00
|
|
|
|
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_RECOVER);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Here, we only wake up the main loading thread (while the
|
2022-07-07 20:55:06 +02:00
|
|
|
* rest threads will still be waiting), so that we can receive
|
2018-05-02 12:47:26 +02:00
|
|
|
* commands from source now, and answer it if needed. The
|
2022-07-07 20:55:06 +02:00
|
|
|
* rest threads will be woken up afterwards until we are sure
|
2018-05-02 12:47:26 +02:00
|
|
|
* that source is ready to reply to page requests.
|
|
|
|
*/
|
|
|
|
qemu_sem_post(&mis->postcopy_pause_sem_dst);
|
2018-06-27 15:22:45 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-12 11:44:19 +02:00
|
|
|
void migration_fd_process_incoming(QEMUFile *f, Error **errp)
|
2018-06-27 15:22:45 +02:00
|
|
|
{
|
2022-03-31 17:08:44 +02:00
|
|
|
if (!migration_incoming_setup(f, errp)) {
|
2018-06-27 15:22:45 +02:00
|
|
|
return;
|
2018-05-02 12:47:26 +02:00
|
|
|
}
|
2022-03-31 17:08:44 +02:00
|
|
|
if (postcopy_try_recover()) {
|
2019-06-12 11:44:19 +02:00
|
|
|
return;
|
|
|
|
}
|
2018-06-27 15:22:45 +02:00
|
|
|
migration_incoming_process();
|
2017-07-17 12:30:25 +02:00
|
|
|
}
|
|
|
|
|
2023-02-08 21:28:13 +01:00
|
|
|
/*
|
|
|
|
* Returns true when we want to start a new incoming migration process,
|
|
|
|
* false otherwise.
|
|
|
|
*/
|
|
|
|
static bool migration_should_start_incoming(bool main_channel)
|
|
|
|
{
|
|
|
|
/* Multifd doesn't start unless all channels are established */
|
2023-03-01 22:10:29 +01:00
|
|
|
if (migrate_multifd()) {
|
2023-02-08 21:28:13 +01:00
|
|
|
return migration_has_all_channels();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Preempt channel only starts when the main channel is created */
|
|
|
|
if (migrate_postcopy_preempt()) {
|
|
|
|
return main_channel;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For all the rest types of migration, we should only reach here when
|
|
|
|
* it's the main channel that's being created, and we should always
|
|
|
|
* proceed with this channel.
|
|
|
|
*/
|
|
|
|
assert(main_channel);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-01-13 15:08:46 +01:00
|
|
|
void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
|
2017-07-24 12:42:02 +02:00
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2019-06-12 11:44:19 +02:00
|
|
|
Error *local_err = NULL;
|
2022-07-07 20:55:02 +02:00
|
|
|
QEMUFile *f;
|
2022-12-20 19:44:18 +01:00
|
|
|
bool default_channel = true;
|
|
|
|
uint32_t channel_magic = 0;
|
|
|
|
int ret = 0;
|
2017-07-24 12:42:02 +02:00
|
|
|
|
2023-03-01 22:10:29 +01:00
|
|
|
if (migrate_multifd() && !migrate_postcopy_ram() &&
|
2022-12-20 19:44:18 +01:00
|
|
|
qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
|
|
|
|
/*
|
|
|
|
* With multiple channels, it is possible that we receive channels
|
|
|
|
* out of order on destination side, causing incorrect mapping of
|
|
|
|
* source channels on destination side. Check channel MAGIC to
|
|
|
|
* decide type of channel. Please note this is best effort, postcopy
|
|
|
|
* preempt channel does not send any magic number so avoid it for
|
|
|
|
* postcopy live migration. Also tls live migration already does
|
|
|
|
* tls handshake while initializing main channel so with tls this
|
|
|
|
* issue is not possible.
|
|
|
|
*/
|
|
|
|
ret = migration_channel_read_peek(ioc, (void *)&channel_magic,
|
|
|
|
sizeof(channel_magic), &local_err);
|
|
|
|
|
|
|
|
if (ret != 0) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC));
|
|
|
|
} else {
|
|
|
|
default_channel = !mis->from_src_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (multifd_load_setup(errp) != 0) {
|
|
|
|
error_setg(errp, "Failed to setup multifd channels");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (default_channel) {
|
2022-07-07 20:55:02 +02:00
|
|
|
f = qemu_file_new_input(ioc);
|
2018-06-27 15:22:46 +02:00
|
|
|
|
2021-07-20 14:54:02 +02:00
|
|
|
if (!migration_incoming_setup(f, errp)) {
|
2019-06-12 11:44:19 +02:00
|
|
|
return;
|
|
|
|
}
|
2018-06-27 15:22:46 +02:00
|
|
|
} else {
|
|
|
|
/* Multiple connections */
|
2022-07-07 20:55:02 +02:00
|
|
|
assert(migration_needs_multiple_sockets());
|
2023-03-01 22:10:29 +01:00
|
|
|
if (migrate_multifd()) {
|
2022-12-20 19:44:18 +01:00
|
|
|
multifd_recv_new_channel(ioc, &local_err);
|
2022-07-07 20:55:02 +02:00
|
|
|
} else {
|
|
|
|
assert(migrate_postcopy_preempt());
|
|
|
|
f = qemu_file_new_input(ioc);
|
2022-12-20 19:44:18 +01:00
|
|
|
postcopy_preempt_new_channel(mis, f);
|
2022-07-07 20:55:02 +02:00
|
|
|
}
|
2019-01-13 15:08:46 +01:00
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
2017-07-24 12:42:02 +02:00
|
|
|
}
|
2018-06-27 15:22:44 +02:00
|
|
|
|
2023-02-08 21:28:13 +01:00
|
|
|
if (migration_should_start_incoming(default_channel)) {
|
2022-03-31 17:08:44 +02:00
|
|
|
/* If it's a recovery, we're done */
|
|
|
|
if (postcopy_try_recover()) {
|
|
|
|
return;
|
|
|
|
}
|
2018-06-27 15:22:44 +02:00
|
|
|
migration_incoming_process();
|
|
|
|
}
|
2017-07-24 12:42:02 +02:00
|
|
|
}
|
|
|
|
|
2017-07-24 13:06:25 +02:00
|
|
|
/**
|
|
|
|
* @migration_has_all_channels: We have received all channels that we need
|
|
|
|
*
|
|
|
|
* Returns true when we have got connections to all the channels that
|
|
|
|
* we need for migration.
|
|
|
|
*/
|
|
|
|
bool migration_has_all_channels(void)
|
|
|
|
{
|
2018-06-19 18:35:52 +02:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2018-02-19 18:59:02 +01:00
|
|
|
|
2022-07-07 20:55:02 +02:00
|
|
|
if (!mis->from_src_file) {
|
|
|
|
return false;
|
|
|
|
}
|
2018-02-19 18:59:02 +01:00
|
|
|
|
2023-03-01 22:10:29 +01:00
|
|
|
if (migrate_multifd()) {
|
2022-07-07 20:55:02 +02:00
|
|
|
return multifd_recv_all_channels_created();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (migrate_postcopy_preempt()) {
|
|
|
|
return mis->postcopy_qemufile_dst != NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2017-07-24 13:06:25 +02:00
|
|
|
}
|
|
|
|
|
2023-06-21 13:11:55 +02:00
|
|
|
int migrate_send_rp_switchover_ack(MigrationIncomingState *mis)
|
|
|
|
{
|
|
|
|
return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL);
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:47 +01:00
|
|
|
/*
|
|
|
|
* Send a 'SHUT' message on the return channel with the given value
|
|
|
|
* to indicate that we've finished with the RP. Non-0 value indicates
|
|
|
|
* error.
|
|
|
|
*/
|
|
|
|
void migrate_send_rp_shut(MigrationIncomingState *mis,
|
|
|
|
uint32_t value)
|
|
|
|
{
|
|
|
|
uint32_t buf;
|
|
|
|
|
|
|
|
buf = cpu_to_be32(value);
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a 'PONG' message on the return channel with the given value
|
|
|
|
* (normally in response to a 'PING')
|
|
|
|
*/
|
|
|
|
void migrate_send_rp_pong(MigrationIncomingState *mis,
|
|
|
|
uint32_t value)
|
|
|
|
{
|
|
|
|
uint32_t buf;
|
|
|
|
|
|
|
|
buf = cpu_to_be32(value);
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:28 +02:00
|
|
|
void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
|
|
|
|
char *block_name)
|
|
|
|
{
|
|
|
|
char buf[512];
|
|
|
|
int len;
|
|
|
|
int64_t res;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First, we send the header part. It contains only the len of
|
|
|
|
* idstr, and the idstr itself.
|
|
|
|
*/
|
|
|
|
len = strlen(block_name);
|
|
|
|
buf[0] = len;
|
|
|
|
memcpy(buf + 1, block_name, len);
|
|
|
|
|
|
|
|
if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
|
|
|
|
error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
|
|
|
|
__func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Next, we dump the received bitmap to the stream.
|
|
|
|
*
|
|
|
|
* TODO: currently we are safe since we are the only one that is
|
|
|
|
* using the to_src_file handle (fault thread is still paused),
|
|
|
|
* and it's ok even not taking the mutex. However the best way is
|
|
|
|
* to take the lock before sending the message header, and release
|
|
|
|
* the lock after sending the bitmap.
|
|
|
|
*/
|
|
|
|
qemu_mutex_lock(&mis->rp_mutex);
|
|
|
|
res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
|
|
|
|
qemu_mutex_unlock(&mis->rp_mutex);
|
|
|
|
|
|
|
|
trace_migrate_send_rp_recv_bitmap(block_name, res);
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:30 +02:00
|
|
|
void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
|
|
|
|
{
|
|
|
|
uint32_t buf;
|
|
|
|
|
|
|
|
buf = cpu_to_be32(value);
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:48 +01:00
|
|
|
/*
|
|
|
|
* Return true if we're already in the middle of a migration
|
|
|
|
* (i.e. any of the active or setup states)
|
|
|
|
*/
|
2018-10-26 10:36:20 +02:00
|
|
|
bool migration_is_setup_or_active(int state)
|
2015-11-05 19:10:48 +01:00
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
2015-11-05 19:10:58 +01:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
2018-05-02 12:47:18 +02:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
2018-05-02 12:47:25 +02:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
2015-11-05 19:10:48 +01:00
|
|
|
case MIGRATION_STATUS_SETUP:
|
2017-10-20 11:05:51 +02:00
|
|
|
case MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
case MIGRATION_STATUS_DEVICE:
|
2019-10-29 12:49:02 +01:00
|
|
|
case MIGRATION_STATUS_WAIT_UNPLUG:
|
2020-02-24 07:54:12 +01:00
|
|
|
case MIGRATION_STATUS_COLO:
|
2015-11-05 19:10:48 +01:00
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-21 15:39:23 +01:00
|
|
|
bool migration_is_running(int state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
|
|
|
case MIGRATION_STATUS_SETUP:
|
|
|
|
case MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
case MIGRATION_STATUS_DEVICE:
|
|
|
|
case MIGRATION_STATUS_WAIT_UNPLUG:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-15 23:24:48 +01:00
|
|
|
static bool migrate_show_downtime(MigrationState *s)
|
|
|
|
{
|
|
|
|
return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy();
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:54:11 +02:00
|
|
|
static void populate_time_info(MigrationInfo *info, MigrationState *s)
|
|
|
|
{
|
|
|
|
info->has_status = true;
|
|
|
|
info->has_setup_time = true;
|
|
|
|
info->setup_time = s->setup_time;
|
2022-12-15 23:24:48 +01:00
|
|
|
|
2019-07-16 02:54:11 +02:00
|
|
|
if (s->state == MIGRATION_STATUS_COMPLETED) {
|
|
|
|
info->has_total_time = true;
|
|
|
|
info->total_time = s->total_time;
|
|
|
|
} else {
|
|
|
|
info->has_total_time = true;
|
|
|
|
info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
|
|
|
|
s->start_time;
|
2022-12-15 23:24:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (migrate_show_downtime(s)) {
|
|
|
|
info->has_downtime = true;
|
|
|
|
info->downtime = s->downtime;
|
|
|
|
} else {
|
2019-07-16 02:54:11 +02:00
|
|
|
info->has_expected_downtime = true;
|
|
|
|
info->expected_downtime = s->expected_downtime;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-13 13:16:41 +02:00
|
|
|
static void populate_ram_info(MigrationInfo *info, MigrationState *s)
|
|
|
|
{
|
2021-11-22 16:21:16 +01:00
|
|
|
size_t page_size = qemu_target_page_size();
|
|
|
|
|
2016-06-13 13:16:41 +02:00
|
|
|
info->ram = g_malloc0(sizeof(*info->ram));
|
2023-04-26 19:37:19 +02:00
|
|
|
info->ram->transferred = stat64_get(&mig_stats.transferred);
|
2016-06-13 13:16:41 +02:00
|
|
|
info->ram->total = ram_bytes_total();
|
2023-04-26 19:37:19 +02:00
|
|
|
info->ram->duplicate = stat64_get(&mig_stats.zero_pages);
|
2017-03-13 20:35:54 +01:00
|
|
|
/* legacy value. It is not used anymore */
|
|
|
|
info->ram->skipped = 0;
|
2023-04-26 19:37:19 +02:00
|
|
|
info->ram->normal = stat64_get(&mig_stats.normal_pages);
|
2022-10-11 23:55:51 +02:00
|
|
|
info->ram->normal_bytes = info->ram->normal * page_size;
|
2016-06-13 13:16:41 +02:00
|
|
|
info->ram->mbps = s->mbps;
|
2023-04-11 18:02:34 +02:00
|
|
|
info->ram->dirty_sync_count =
|
2023-04-26 19:37:19 +02:00
|
|
|
stat64_get(&mig_stats.dirty_sync_count);
|
2022-07-11 23:11:12 +02:00
|
|
|
info->ram->dirty_sync_missed_zero_copy =
|
2023-04-26 19:37:19 +02:00
|
|
|
stat64_get(&mig_stats.dirty_sync_missed_zero_copy);
|
2023-04-11 18:04:59 +02:00
|
|
|
info->ram->postcopy_requests =
|
2023-04-26 19:37:19 +02:00
|
|
|
stat64_get(&mig_stats.postcopy_requests);
|
2021-11-22 16:21:16 +01:00
|
|
|
info->ram->page_size = page_size;
|
2023-04-26 19:37:19 +02:00
|
|
|
info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes);
|
2019-01-11 07:37:30 +01:00
|
|
|
info->ram->pages_per_second = s->pages_per_second;
|
2023-04-26 19:37:19 +02:00
|
|
|
info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes);
|
|
|
|
info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes);
|
|
|
|
info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes);
|
2016-06-13 13:16:41 +02:00
|
|
|
|
2023-03-01 22:20:13 +01:00
|
|
|
if (migrate_xbzrle()) {
|
2017-05-04 10:09:21 +02:00
|
|
|
info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
|
|
|
|
info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
|
2017-06-06 19:49:03 +02:00
|
|
|
info->xbzrle_cache->bytes = xbzrle_counters.bytes;
|
|
|
|
info->xbzrle_cache->pages = xbzrle_counters.pages;
|
|
|
|
info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
|
|
|
|
info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
|
2020-04-30 02:59:35 +02:00
|
|
|
info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate;
|
2017-06-06 19:49:03 +02:00
|
|
|
info->xbzrle_cache->overflow = xbzrle_counters.overflow;
|
2017-05-04 10:09:21 +02:00
|
|
|
}
|
|
|
|
|
2023-06-13 16:57:45 +02:00
|
|
|
populate_compress(info);
|
2018-09-06 09:01:00 +02:00
|
|
|
|
2017-05-03 13:16:38 +02:00
|
|
|
if (cpu_throttle_active()) {
|
|
|
|
info->has_cpu_throttle_percentage = true;
|
|
|
|
info->cpu_throttle_percentage = cpu_throttle_get_percentage();
|
|
|
|
}
|
|
|
|
|
2016-06-13 13:16:41 +02:00
|
|
|
if (s->state != MIGRATION_STATUS_COMPLETED) {
|
|
|
|
info->ram->remaining = ram_bytes_remaining();
|
2023-04-11 18:19:05 +02:00
|
|
|
info->ram->dirty_pages_rate =
|
2023-04-26 19:37:19 +02:00
|
|
|
stat64_get(&mig_stats.dirty_pages_rate);
|
2016-06-13 13:16:41 +02:00
|
|
|
}
|
2023-06-07 18:21:58 +02:00
|
|
|
|
|
|
|
if (migrate_dirty_limit() && dirtylimit_in_service()) {
|
|
|
|
info->has_dirty_limit_throttle_time_per_round = true;
|
|
|
|
info->dirty_limit_throttle_time_per_round =
|
|
|
|
dirtylimit_throttle_time_per_round();
|
|
|
|
|
|
|
|
info->has_dirty_limit_ring_full_time = true;
|
|
|
|
info->dirty_limit_ring_full_time = dirtylimit_ring_full_time();
|
|
|
|
}
|
2016-06-13 13:16:41 +02:00
|
|
|
}
|
|
|
|
|
2017-05-04 10:21:46 +02:00
|
|
|
static void populate_disk_info(MigrationInfo *info)
|
|
|
|
{
|
|
|
|
if (blk_mig_active()) {
|
|
|
|
info->disk = g_malloc0(sizeof(*info->disk));
|
|
|
|
info->disk->transferred = blk_mig_bytes_transferred();
|
|
|
|
info->disk->remaining = blk_mig_bytes_remaining();
|
|
|
|
info->disk->total = blk_mig_bytes_total();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-22 19:17:27 +01:00
|
|
|
static void fill_source_migration_info(MigrationInfo *info)
|
2008-10-13 05:12:02 +02:00
|
|
|
{
|
2011-10-05 13:50:43 +02:00
|
|
|
MigrationState *s = migrate_get_current();
|
2022-04-13 13:33:29 +02:00
|
|
|
int state = qatomic_read(&s->state);
|
2021-04-29 16:04:24 +02:00
|
|
|
GSList *cur_blocker = migration_blockers;
|
2011-10-05 13:50:43 +02:00
|
|
|
|
2021-02-02 14:55:21 +01:00
|
|
|
info->blocked_reasons = NULL;
|
|
|
|
|
2021-04-29 16:04:24 +02:00
|
|
|
/*
|
|
|
|
* There are two types of reasons a migration might be blocked;
|
|
|
|
* a) devices marked in VMState as non-migratable, and
|
|
|
|
* b) Explicit migration blockers
|
|
|
|
* We need to add both of them here.
|
|
|
|
*/
|
|
|
|
qemu_savevm_non_migratable_list(&info->blocked_reasons);
|
2021-02-02 14:55:21 +01:00
|
|
|
|
2021-04-29 16:04:24 +02:00
|
|
|
while (cur_blocker) {
|
|
|
|
QAPI_LIST_PREPEND(info->blocked_reasons,
|
|
|
|
g_strdup(error_get_pretty(cur_blocker->data)));
|
|
|
|
cur_blocker = g_slist_next(cur_blocker);
|
2021-02-02 14:55:21 +01:00
|
|
|
}
|
2021-04-29 16:04:24 +02:00
|
|
|
info->has_blocked_reasons = info->blocked_reasons != NULL;
|
2021-02-02 14:55:21 +01:00
|
|
|
|
2022-04-13 13:33:29 +02:00
|
|
|
switch (state) {
|
2015-03-13 09:08:38 +01:00
|
|
|
case MIGRATION_STATUS_NONE:
|
2011-10-05 13:50:43 +02:00
|
|
|
/* no migration has happened ever */
|
2018-03-22 19:17:27 +01:00
|
|
|
/* do not overwrite destination migration status */
|
|
|
|
return;
|
2015-03-13 09:08:38 +01:00
|
|
|
case MIGRATION_STATUS_SETUP:
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 16:01:57 +02:00
|
|
|
info->has_status = true;
|
2013-07-22 16:01:58 +02:00
|
|
|
info->has_total_time = false;
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 16:01:57 +02:00
|
|
|
break;
|
2015-03-13 09:08:38 +01:00
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
2015-11-05 19:10:58 +01:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
2017-10-20 11:05:51 +02:00
|
|
|
case MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
case MIGRATION_STATUS_DEVICE:
|
2018-05-02 12:47:18 +02:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
2018-05-02 12:47:25 +02:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
2019-07-16 02:54:11 +02:00
|
|
|
/* TODO add some postcopy stats */
|
|
|
|
populate_time_info(info, s);
|
2016-06-13 13:16:41 +02:00
|
|
|
populate_ram_info(info, s);
|
2017-05-04 10:21:46 +02:00
|
|
|
populate_disk_info(info);
|
2023-09-06 17:08:48 +02:00
|
|
|
migration_populate_vfio_info(info);
|
2011-10-05 13:50:43 +02:00
|
|
|
break;
|
2016-10-27 08:42:54 +02:00
|
|
|
case MIGRATION_STATUS_COLO:
|
|
|
|
info->has_status = true;
|
|
|
|
/* TODO: display COLO specific information (checkpoint info etc.) */
|
|
|
|
break;
|
2015-03-13 09:08:38 +01:00
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
2019-07-16 02:54:11 +02:00
|
|
|
populate_time_info(info, s);
|
2016-06-13 13:16:41 +02:00
|
|
|
populate_ram_info(info, s);
|
2023-09-06 17:08:48 +02:00
|
|
|
migration_populate_vfio_info(info);
|
2011-10-05 13:50:43 +02:00
|
|
|
break;
|
2015-03-13 09:08:38 +01:00
|
|
|
case MIGRATION_STATUS_FAILED:
|
2011-09-13 22:37:16 +02:00
|
|
|
info->has_status = true;
|
2011-10-05 13:50:43 +02:00
|
|
|
break;
|
2015-03-13 09:08:38 +01:00
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
2011-09-13 22:37:16 +02:00
|
|
|
info->has_status = true;
|
2011-10-05 13:50:43 +02:00
|
|
|
break;
|
2019-10-29 12:49:02 +01:00
|
|
|
case MIGRATION_STATUS_WAIT_UNPLUG:
|
|
|
|
info->has_status = true;
|
|
|
|
break;
|
2008-10-13 05:12:02 +02:00
|
|
|
}
|
2022-04-13 13:33:29 +02:00
|
|
|
info->status = state;
|
2023-10-05 00:02:31 +02:00
|
|
|
|
|
|
|
QEMU_LOCK_GUARD(&s->error_mutex);
|
|
|
|
if (s->error) {
|
|
|
|
info->error_desc = g_strdup(error_get_pretty(s->error));
|
|
|
|
}
|
2008-10-13 05:12:02 +02:00
|
|
|
}
|
|
|
|
|
2018-03-22 19:17:27 +01:00
|
|
|
static void fill_destination_migration_info(MigrationInfo *info)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
2019-02-27 11:51:27 +01:00
|
|
|
if (mis->socket_address_list) {
|
|
|
|
info->has_socket_address = true;
|
|
|
|
info->socket_address =
|
|
|
|
QAPI_CLONE(SocketAddressList, mis->socket_address_list);
|
|
|
|
}
|
|
|
|
|
2018-03-22 19:17:27 +01:00
|
|
|
switch (mis->state) {
|
|
|
|
case MIGRATION_STATUS_NONE:
|
|
|
|
return;
|
|
|
|
case MIGRATION_STATUS_SETUP:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
2018-07-10 11:18:56 +02:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
2018-03-22 19:17:27 +01:00
|
|
|
case MIGRATION_STATUS_FAILED:
|
|
|
|
case MIGRATION_STATUS_COLO:
|
|
|
|
info->has_status = true;
|
|
|
|
break;
|
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
|
|
|
info->has_status = true;
|
|
|
|
fill_destination_postcopy_migration_info(info);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
info->status = mis->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
MigrationInfo *qmp_query_migrate(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationInfo *info = g_malloc0(sizeof(*info));
|
|
|
|
|
|
|
|
fill_destination_migration_info(info);
|
|
|
|
fill_source_migration_info(info);
|
|
|
|
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:56 +01:00
|
|
|
void qmp_migrate_start_postcopy(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
2018-03-13 20:34:01 +01:00
|
|
|
if (!migrate_postcopy()) {
|
2015-11-12 12:34:44 +01:00
|
|
|
error_setg(errp, "Enable postcopy with migrate_set_capability before"
|
2015-11-05 19:10:56 +01:00
|
|
|
" the start of migration");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->state == MIGRATION_STATUS_NONE) {
|
|
|
|
error_setg(errp, "Postcopy must be started after migration has been"
|
|
|
|
" started");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* we don't error if migration has finished since that would be racy
|
|
|
|
* with issuing this command.
|
|
|
|
*/
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&s->start_postcopy, true);
|
2015-11-05 19:10:56 +01:00
|
|
|
}
|
|
|
|
|
2008-11-11 17:46:33 +01:00
|
|
|
/* shared migration helpers */
|
|
|
|
|
2015-12-16 12:47:33 +01:00
|
|
|
void migrate_set_state(int *state, int old_state, int new_state)
|
2013-11-07 12:01:15 +01:00
|
|
|
{
|
2017-08-30 10:32:01 +02:00
|
|
|
assert(new_state < MIGRATION_STATUS__MAX);
|
2020-09-23 12:56:46 +02:00
|
|
|
if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
|
2017-08-30 10:32:01 +02:00
|
|
|
trace_migrate_set_state(MigrationStatus_str(new_state));
|
2015-07-07 14:44:05 +02:00
|
|
|
migrate_generate_event(new_state);
|
2013-11-07 12:01:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-08 13:33:43 +02:00
|
|
|
static void migrate_fd_cleanup(MigrationState *s)
|
2008-11-11 17:46:33 +01:00
|
|
|
{
|
2013-02-22 17:36:21 +01:00
|
|
|
qemu_bh_delete(s->cleanup_bh);
|
|
|
|
s->cleanup_bh = NULL;
|
|
|
|
|
2022-03-31 17:08:39 +02:00
|
|
|
g_free(s->hostname);
|
|
|
|
s->hostname = NULL;
|
2023-01-17 12:22:43 +01:00
|
|
|
json_writer_free(s->vmdesc);
|
|
|
|
s->vmdesc = NULL;
|
2022-03-31 17:08:39 +02:00
|
|
|
|
2018-01-03 13:20:06 +01:00
|
|
|
qemu_savevm_state_cleanup();
|
|
|
|
|
2016-01-15 04:37:42 +01:00
|
|
|
if (s->to_dst_file) {
|
2018-05-02 12:47:38 +02:00
|
|
|
QEMUFile *tmp;
|
2016-01-14 16:52:55 +01:00
|
|
|
|
2014-03-11 00:42:29 +01:00
|
|
|
trace_migrate_fd_cleanup();
|
2013-02-22 17:36:46 +01:00
|
|
|
qemu_mutex_unlock_iothread();
|
2015-11-05 19:11:05 +01:00
|
|
|
if (s->migration_thread_running) {
|
|
|
|
qemu_thread_join(&s->thread);
|
|
|
|
s->migration_thread_running = false;
|
|
|
|
}
|
2013-02-22 17:36:46 +01:00
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
|
2019-01-13 15:08:47 +01:00
|
|
|
multifd_save_cleanup();
|
2018-05-02 12:47:38 +02:00
|
|
|
qemu_mutex_lock(&s->qemu_file_lock);
|
|
|
|
tmp = s->to_dst_file;
|
2016-01-15 04:37:42 +01:00
|
|
|
s->to_dst_file = NULL;
|
2018-05-02 12:47:38 +02:00
|
|
|
qemu_mutex_unlock(&s->qemu_file_lock);
|
|
|
|
/*
|
|
|
|
* Close the file handle without the lock to make sure the
|
|
|
|
* critical section won't block for long.
|
|
|
|
*/
|
2021-07-22 19:58:41 +02:00
|
|
|
migration_ioc_unregister_yank_from_file(tmp);
|
2018-05-02 12:47:38 +02:00
|
|
|
qemu_fclose(tmp);
|
2008-11-11 17:46:33 +01:00
|
|
|
}
|
|
|
|
|
2023-09-18 19:28:22 +02:00
|
|
|
/*
|
|
|
|
* We already cleaned up to_dst_file, so errors from the return
|
|
|
|
* path might be due to that, ignore them.
|
|
|
|
*/
|
2023-08-04 11:30:53 +02:00
|
|
|
close_return_path_on_source(s);
|
2023-09-18 19:28:22 +02:00
|
|
|
|
2019-07-17 02:53:41 +02:00
|
|
|
assert(!migration_is_active(s));
|
2013-02-22 17:36:09 +01:00
|
|
|
|
2015-11-02 08:37:00 +01:00
|
|
|
if (s->state == MIGRATION_STATUS_CANCELLING) {
|
2015-12-16 12:47:33 +01:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
|
2015-11-02 08:37:00 +01:00
|
|
|
MIGRATION_STATUS_CANCELLED);
|
2013-02-22 17:36:09 +01:00
|
|
|
}
|
2013-02-22 17:36:18 +01:00
|
|
|
|
2017-09-05 12:50:22 +02:00
|
|
|
if (s->error) {
|
|
|
|
/* It is used on info migrate. We can't free it */
|
|
|
|
error_report_err(error_copy(s->error));
|
|
|
|
}
|
2013-02-22 17:36:18 +01:00
|
|
|
notifier_list_notify(&migration_state_notifiers, s);
|
2023-03-02 12:10:33 +01:00
|
|
|
block_cleanup_parameters();
|
2020-12-28 16:08:52 +01:00
|
|
|
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
2008-11-11 17:46:33 +01:00
|
|
|
}
|
|
|
|
|
2019-04-08 13:33:43 +02:00
|
|
|
static void migrate_fd_cleanup_schedule(MigrationState *s)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Ref the state for bh, because it may be called when
|
|
|
|
* there're already no other refs
|
|
|
|
*/
|
|
|
|
object_ref(OBJECT(s));
|
|
|
|
qemu_bh_schedule(s->cleanup_bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void migrate_fd_cleanup_bh(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationState *s = opaque;
|
|
|
|
migrate_fd_cleanup(s);
|
|
|
|
object_unref(OBJECT(s));
|
|
|
|
}
|
|
|
|
|
2017-09-05 12:50:22 +02:00
|
|
|
void migrate_set_error(MigrationState *s, const Error *error)
|
|
|
|
{
|
2020-04-04 06:21:08 +02:00
|
|
|
QEMU_LOCK_GUARD(&s->error_mutex);
|
2017-09-05 12:50:22 +02:00
|
|
|
if (!s->error) {
|
|
|
|
s->error = error_copy(error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-05 00:02:32 +02:00
|
|
|
bool migrate_has_error(MigrationState *s)
|
|
|
|
{
|
|
|
|
/* The lock is not helpful here, but still follow the rule */
|
|
|
|
QEMU_LOCK_GUARD(&s->error_mutex);
|
|
|
|
return qatomic_read(&s->error);
|
|
|
|
}
|
|
|
|
|
2021-07-08 21:06:53 +02:00
|
|
|
static void migrate_error_free(MigrationState *s)
|
|
|
|
{
|
|
|
|
QEMU_LOCK_GUARD(&s->error_mutex);
|
|
|
|
if (s->error) {
|
|
|
|
error_free(s->error);
|
|
|
|
s->error = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-06 12:29:37 +02:00
|
|
|
static void migrate_fd_error(MigrationState *s, const Error *error)
|
2008-11-11 17:46:33 +01:00
|
|
|
{
|
2016-10-21 19:41:45 +02:00
|
|
|
trace_migrate_fd_error(error_get_pretty(error));
|
2016-01-15 04:37:42 +01:00
|
|
|
assert(s->to_dst_file == NULL);
|
2015-12-16 12:47:33 +01:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
2017-09-05 12:50:22 +02:00
|
|
|
migrate_set_error(s, error);
|
2011-02-22 23:32:54 +01:00
|
|
|
}
|
|
|
|
|
2010-05-11 16:28:39 +02:00
|
|
|
static void migrate_fd_cancel(MigrationState *s)
|
2008-11-11 17:46:33 +01:00
|
|
|
{
|
2013-11-07 09:21:23 +01:00
|
|
|
int old_state ;
|
2023-09-18 19:28:18 +02:00
|
|
|
|
2014-03-11 00:42:29 +01:00
|
|
|
trace_migrate_fd_cancel();
|
2008-11-11 17:46:33 +01:00
|
|
|
|
2021-07-22 19:58:38 +02:00
|
|
|
WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
|
|
|
|
if (s->rp_state.from_dst_file) {
|
|
|
|
/* shutdown the rp socket, so causing the rp thread to shutdown */
|
|
|
|
qemu_file_shutdown(s->rp_state.from_dst_file);
|
|
|
|
}
|
2015-11-05 19:10:49 +01:00
|
|
|
}
|
|
|
|
|
2013-11-07 09:21:23 +01:00
|
|
|
do {
|
|
|
|
old_state = s->state;
|
2020-01-21 15:39:23 +01:00
|
|
|
if (!migration_is_running(old_state)) {
|
2013-11-07 09:21:23 +01:00
|
|
|
break;
|
|
|
|
}
|
2017-10-20 11:05:55 +02:00
|
|
|
/* If the migration is paused, kick it out of the pause */
|
|
|
|
if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
|
|
|
|
qemu_sem_post(&s->pause_sem);
|
|
|
|
}
|
2015-12-16 12:47:33 +01:00
|
|
|
migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
|
2015-03-13 09:08:38 +01:00
|
|
|
} while (s->state != MIGRATION_STATUS_CANCELLING);
|
2015-01-08 12:11:32 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're unlucky the migration code might be stuck somewhere in a
|
|
|
|
* send/write while the network has failed and is waiting to timeout;
|
|
|
|
* if we've got shutdown(2) available then we can force it to quit.
|
|
|
|
*/
|
2023-09-18 19:28:18 +02:00
|
|
|
if (s->state == MIGRATION_STATUS_CANCELLING) {
|
|
|
|
WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
|
|
|
|
if (s->to_dst_file) {
|
|
|
|
qemu_file_shutdown(s->to_dst_file);
|
|
|
|
}
|
|
|
|
}
|
2015-01-08 12:11:32 +01:00
|
|
|
}
|
2017-01-24 08:59:52 +01:00
|
|
|
if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2022-02-09 11:54:51 +01:00
|
|
|
bdrv_activate_all(&local_err);
|
2017-01-24 08:59:52 +01:00
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
} else {
|
|
|
|
s->block_inactive = false;
|
|
|
|
}
|
|
|
|
}
|
2008-11-11 17:46:33 +01:00
|
|
|
}
|
|
|
|
|
2010-12-13 17:30:12 +01:00
|
|
|
void add_migration_state_change_notifier(Notifier *notify)
|
|
|
|
{
|
|
|
|
notifier_list_add(&migration_state_notifiers, notify);
|
|
|
|
}
|
|
|
|
|
|
|
|
void remove_migration_state_change_notifier(Notifier *notify)
|
|
|
|
{
|
2012-01-13 17:34:01 +01:00
|
|
|
notifier_remove(notify);
|
2010-12-13 17:30:12 +01:00
|
|
|
}
|
|
|
|
|
2013-07-29 15:01:58 +02:00
|
|
|
bool migration_in_setup(MigrationState *s)
|
2011-10-25 13:50:11 +02:00
|
|
|
{
|
2015-03-13 09:08:38 +01:00
|
|
|
return s->state == MIGRATION_STATUS_SETUP;
|
2011-10-25 13:50:11 +02:00
|
|
|
}
|
|
|
|
|
2011-02-23 00:43:59 +01:00
|
|
|
bool migration_has_finished(MigrationState *s)
|
2010-12-13 17:30:12 +01:00
|
|
|
{
|
2015-03-13 09:08:38 +01:00
|
|
|
return s->state == MIGRATION_STATUS_COMPLETED;
|
2010-12-13 17:30:12 +01:00
|
|
|
}
|
2010-05-11 16:28:39 +02:00
|
|
|
|
2011-10-25 13:50:11 +02:00
|
|
|
bool migration_has_failed(MigrationState *s)
|
|
|
|
{
|
2015-03-13 09:08:38 +01:00
|
|
|
return (s->state == MIGRATION_STATUS_CANCELLED ||
|
|
|
|
s->state == MIGRATION_STATUS_FAILED);
|
2011-10-25 13:50:11 +02:00
|
|
|
}
|
|
|
|
|
2017-03-20 22:25:28 +01:00
|
|
|
bool migration_in_postcopy(void)
|
2015-11-05 19:10:58 +01:00
|
|
|
{
|
2017-03-20 22:25:28 +01:00
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
2019-09-23 19:49:42 +02:00
|
|
|
switch (s->state) {
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2015-11-05 19:10:58 +01:00
|
|
|
}
|
|
|
|
|
2016-02-22 18:17:32 +01:00
|
|
|
bool migration_in_postcopy_after_devices(MigrationState *s)
|
|
|
|
{
|
2017-03-20 22:25:28 +01:00
|
|
|
return migration_in_postcopy() && s->postcopy_after_devices;
|
2016-02-22 18:17:32 +01:00
|
|
|
}
|
|
|
|
|
2020-06-26 09:22:33 +02:00
|
|
|
bool migration_in_incoming_postcopy(void)
|
|
|
|
{
|
|
|
|
PostcopyState ps = postcopy_state_get();
|
|
|
|
|
|
|
|
return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
|
|
|
|
}
|
|
|
|
|
2023-01-17 12:22:46 +01:00
|
|
|
bool migration_incoming_postcopy_advised(void)
|
|
|
|
{
|
|
|
|
PostcopyState ps = postcopy_state_get();
|
|
|
|
|
|
|
|
return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
|
|
|
|
}
|
|
|
|
|
2021-04-01 11:22:24 +02:00
|
|
|
bool migration_in_bg_snapshot(void)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
|
|
|
return migrate_background_snapshot() &&
|
|
|
|
migration_is_setup_or_active(s->state);
|
|
|
|
}
|
|
|
|
|
2017-03-22 17:36:57 +01:00
|
|
|
bool migration_is_idle(void)
|
2017-01-16 12:31:53 +01:00
|
|
|
{
|
2019-04-01 11:08:25 +02:00
|
|
|
MigrationState *s = current_migration;
|
|
|
|
|
|
|
|
if (!s) {
|
|
|
|
return true;
|
|
|
|
}
|
2017-01-16 12:31:53 +01:00
|
|
|
|
|
|
|
switch (s->state) {
|
|
|
|
case MIGRATION_STATUS_NONE:
|
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
|
|
|
case MIGRATION_STATUS_FAILED:
|
|
|
|
return true;
|
|
|
|
case MIGRATION_STATUS_SETUP:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_COLO:
|
2017-10-20 11:05:51 +02:00
|
|
|
case MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
case MIGRATION_STATUS_DEVICE:
|
2019-10-29 12:49:02 +01:00
|
|
|
case MIGRATION_STATUS_WAIT_UNPLUG:
|
2017-01-16 12:31:53 +01:00
|
|
|
return false;
|
|
|
|
case MIGRATION_STATUS__MAX:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-17 02:53:41 +02:00
|
|
|
bool migration_is_active(MigrationState *s)
|
|
|
|
{
|
|
|
|
return (s->state == MIGRATION_STATUS_ACTIVE ||
|
|
|
|
s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
}
|
|
|
|
|
2023-09-06 17:08:51 +02:00
|
|
|
int migrate_init(MigrationState *s, Error **errp)
|
2010-05-11 16:28:39 +02:00
|
|
|
{
|
2023-09-06 17:08:51 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = qemu_savevm_state_prepare(errp);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-12 16:38:27 +01:00
|
|
|
/*
|
|
|
|
* Reinitialise all migration state, except
|
|
|
|
* parameters/capabilities that the user set, and
|
|
|
|
* locks.
|
|
|
|
*/
|
|
|
|
s->cleanup_bh = 0;
|
2021-01-29 11:14:06 +01:00
|
|
|
s->vm_start_bh = 0;
|
2016-01-15 04:37:42 +01:00
|
|
|
s->to_dst_file = NULL;
|
2015-11-12 16:38:27 +01:00
|
|
|
s->state = MIGRATION_STATUS_NONE;
|
|
|
|
s->rp_state.from_dst_file = NULL;
|
|
|
|
s->rp_state.error = false;
|
|
|
|
s->mbps = 0.0;
|
2019-01-11 07:37:30 +01:00
|
|
|
s->pages_per_second = 0.0;
|
2015-11-12 16:38:27 +01:00
|
|
|
s->downtime = 0;
|
|
|
|
s->expected_downtime = 0;
|
|
|
|
s->setup_time = 0;
|
|
|
|
s->start_postcopy = false;
|
2016-02-22 18:17:32 +01:00
|
|
|
s->postcopy_after_devices = false;
|
2015-11-12 16:38:27 +01:00
|
|
|
s->migration_thread_running = false;
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 12:05:00 +02:00
|
|
|
error_free(s->error);
|
|
|
|
s->error = NULL;
|
2020-09-15 05:03:57 +02:00
|
|
|
s->hostname = NULL;
|
2023-10-09 20:43:21 +02:00
|
|
|
s->vmdesc = NULL;
|
2015-11-12 16:38:27 +01:00
|
|
|
|
2015-12-16 12:47:33 +01:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
|
2010-05-11 16:28:39 +02:00
|
|
|
|
2018-01-03 13:20:08 +01:00
|
|
|
s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
s->total_time = 0;
|
2023-05-17 14:37:51 +02:00
|
|
|
s->vm_old_state = -1;
|
2018-01-03 13:20:13 +01:00
|
|
|
s->iteration_initial_bytes = 0;
|
|
|
|
s->threshold_size = 0;
|
2023-06-21 13:11:55 +02:00
|
|
|
s->switchover_acked = false;
|
2023-10-11 22:35:15 +02:00
|
|
|
s->rdma_migration = false;
|
2023-09-06 17:08:50 +02:00
|
|
|
/*
|
2023-06-13 16:57:44 +02:00
|
|
|
* set mig_stats memory to zero for a new migration
|
2023-09-06 17:08:50 +02:00
|
|
|
*/
|
|
|
|
memset(&mig_stats, 0, sizeof(mig_stats));
|
|
|
|
migration_reset_vfio_bytes_transferred();
|
2023-09-06 17:08:51 +02:00
|
|
|
|
|
|
|
return 0;
|
2010-05-11 16:28:39 +02:00
|
|
|
}
|
2011-02-22 23:54:21 +01:00
|
|
|
|
2023-10-18 15:03:36 +02:00
|
|
|
int migrate_add_blocker_internal(Error **reasonp, Error **errp)
|
2011-11-14 22:09:43 +01:00
|
|
|
{
|
2021-09-22 18:20:07 +02:00
|
|
|
/* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
|
|
|
|
if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
|
2023-10-18 15:03:36 +02:00
|
|
|
error_propagate_prepend(errp, *reasonp,
|
2021-09-22 18:20:07 +02:00
|
|
|
"disallowing migration blocker "
|
|
|
|
"(migration/snapshot in progress) for: ");
|
2023-10-18 15:03:36 +02:00
|
|
|
*reasonp = NULL;
|
2021-09-22 18:20:07 +02:00
|
|
|
return -EBUSY;
|
2017-01-16 12:31:53 +01:00
|
|
|
}
|
|
|
|
|
2023-10-18 15:03:36 +02:00
|
|
|
migration_blockers = g_slist_prepend(migration_blockers, *reasonp);
|
2021-09-22 18:20:07 +02:00
|
|
|
return 0;
|
2011-11-14 22:09:43 +01:00
|
|
|
}
|
|
|
|
|
2023-10-18 15:03:36 +02:00
|
|
|
int migrate_add_blocker(Error **reasonp, Error **errp)
|
2021-09-22 18:20:08 +02:00
|
|
|
{
|
|
|
|
if (only_migratable) {
|
2023-10-18 15:03:36 +02:00
|
|
|
error_propagate_prepend(errp, *reasonp,
|
2021-09-22 18:20:08 +02:00
|
|
|
"disallowing migration blocker "
|
|
|
|
"(--only-migratable) for: ");
|
2023-10-18 15:03:36 +02:00
|
|
|
*reasonp = NULL;
|
2021-09-22 18:20:08 +02:00
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
2023-10-18 15:03:36 +02:00
|
|
|
return migrate_add_blocker_internal(reasonp, errp);
|
2021-09-22 18:20:08 +02:00
|
|
|
}
|
|
|
|
|
2023-10-18 15:03:36 +02:00
|
|
|
void migrate_del_blocker(Error **reasonp)
|
2011-11-14 22:09:43 +01:00
|
|
|
{
|
2023-10-18 15:03:36 +02:00
|
|
|
if (*reasonp) {
|
|
|
|
migration_blockers = g_slist_remove(migration_blockers, *reasonp);
|
|
|
|
error_free(*reasonp);
|
|
|
|
*reasonp = NULL;
|
|
|
|
}
|
2011-11-14 22:09:43 +01:00
|
|
|
}
|
|
|
|
|
2015-02-19 12:40:28 +01:00
|
|
|
void qmp_migrate_incoming(const char *uri, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
2015-02-26 15:54:41 +01:00
|
|
|
static bool once = true;
|
2015-02-19 12:40:28 +01:00
|
|
|
|
2015-02-26 15:54:41 +01:00
|
|
|
if (!once) {
|
|
|
|
error_setg(errp, "The incoming migration has already been started");
|
2019-11-13 18:53:25 +01:00
|
|
|
return;
|
2015-02-26 15:54:41 +01:00
|
|
|
}
|
2020-10-27 09:22:57 +01:00
|
|
|
if (!runstate_check(RUN_STATE_INMIGRATE)) {
|
|
|
|
error_setg(errp, "'-incoming' was not specified on the command line");
|
|
|
|
return;
|
|
|
|
}
|
2015-02-19 12:40:28 +01:00
|
|
|
|
2021-06-29 20:13:55 +02:00
|
|
|
if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-19 12:40:28 +01:00
|
|
|
qemu_start_incoming_migration(uri, &local_err);
|
|
|
|
|
|
|
|
if (local_err) {
|
2021-06-29 20:13:55 +02:00
|
|
|
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
2015-02-19 12:40:28 +01:00
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-26 15:54:41 +01:00
|
|
|
once = false;
|
2015-02-19 12:40:28 +01:00
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:36 +02:00
|
|
|
void qmp_migrate_recover(const char *uri, Error **errp)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
2021-06-29 20:13:56 +02:00
|
|
|
/*
|
|
|
|
* Don't even bother to use ERRP_GUARD() as it _must_ always be set by
|
|
|
|
* callers (no one should ignore a recover failure); if there is, it's a
|
|
|
|
* programming error.
|
|
|
|
*/
|
|
|
|
assert(errp);
|
|
|
|
|
2018-05-02 12:47:36 +02:00
|
|
|
if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
|
|
|
|
error_setg(errp, "Migrate recover can only be run "
|
|
|
|
"when postcopy is paused.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-03-31 17:08:45 +02:00
|
|
|
/* If there's an existing transport, release it */
|
|
|
|
migration_incoming_transport_cleanup(mis);
|
2018-05-02 12:47:36 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that this call will never start a real migration; it will
|
|
|
|
* only re-setup the migration stream and poke existing migration
|
|
|
|
* to continue using that newly established channel.
|
|
|
|
*/
|
|
|
|
qemu_start_incoming_migration(uri, errp);
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:39 +02:00
|
|
|
void qmp_migrate_pause(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *ms = migrate_get_current();
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2023-09-18 19:28:18 +02:00
|
|
|
int ret = 0;
|
2018-05-02 12:47:39 +02:00
|
|
|
|
|
|
|
if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
|
|
|
/* Source side, during postcopy */
|
|
|
|
qemu_mutex_lock(&ms->qemu_file_lock);
|
2023-09-18 19:28:18 +02:00
|
|
|
if (ms->to_dst_file) {
|
|
|
|
ret = qemu_file_shutdown(ms->to_dst_file);
|
|
|
|
}
|
2018-05-02 12:47:39 +02:00
|
|
|
qemu_mutex_unlock(&ms->qemu_file_lock);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to pause source migration");
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
|
|
|
ret = qemu_file_shutdown(mis->from_src_file);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to pause destination migration");
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
error_setg(errp, "migrate-pause is currently only supported "
|
|
|
|
"during postcopy-active state");
|
|
|
|
}
|
|
|
|
|
2016-05-04 21:44:19 +02:00
|
|
|
bool migration_is_blocked(Error **errp)
|
|
|
|
{
|
|
|
|
if (qemu_savevm_state_blocked(errp)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (migration_blockers) {
|
2017-06-08 15:39:05 +02:00
|
|
|
error_propagate(errp, error_copy(migration_blockers->data));
|
2016-05-04 21:44:19 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:24 +02:00
|
|
|
/* Returns true if continue to migrate, or false if error detected */
|
|
|
|
static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
|
|
|
|
bool resume, Error **errp)
|
2011-02-22 23:54:21 +01:00
|
|
|
{
|
2012-10-03 14:34:33 +02:00
|
|
|
Error *local_err = NULL;
|
2018-05-02 12:47:24 +02:00
|
|
|
|
|
|
|
if (resume) {
|
|
|
|
if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
|
|
|
|
error_setg(errp, "Cannot resume if there is no "
|
|
|
|
"paused migration");
|
|
|
|
return false;
|
|
|
|
}
|
2018-07-23 14:33:03 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Postcopy recovery won't work well with release-ram
|
|
|
|
* capability since release-ram will drop the page buffer as
|
|
|
|
* long as the page is put into the send buffer. So if there
|
|
|
|
* is a network failure happened, any page buffers that have
|
|
|
|
* not yet reached the destination VM but have already been
|
|
|
|
* sent from the source VM will be lost forever. Let's refuse
|
|
|
|
* the client from resuming such a postcopy migration.
|
|
|
|
* Luckily release-ram was designed to only be used when src
|
|
|
|
* and destination VMs are on the same host, so it should be
|
|
|
|
* fine.
|
|
|
|
*/
|
|
|
|
if (migrate_release_ram()) {
|
|
|
|
error_setg(errp, "Postcopy recovery cannot work "
|
|
|
|
"when release-ram capability is set");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:24 +02:00
|
|
|
/* This is a resume, skip init status */
|
|
|
|
return true;
|
|
|
|
}
|
2011-02-22 23:54:21 +01:00
|
|
|
|
2020-01-21 15:39:23 +01:00
|
|
|
if (migration_is_running(s->state)) {
|
2015-03-17 11:54:50 +01:00
|
|
|
error_setg(errp, QERR_MIGRATION_ACTIVE);
|
2018-05-02 12:47:24 +02:00
|
|
|
return false;
|
2011-02-22 23:54:21 +01:00
|
|
|
}
|
2018-05-02 12:47:24 +02:00
|
|
|
|
2014-04-14 18:03:59 +02:00
|
|
|
if (runstate_check(RUN_STATE_INMIGRATE)) {
|
|
|
|
error_setg(errp, "Guest is waiting for an incoming migration");
|
2018-05-02 12:47:24 +02:00
|
|
|
return false;
|
2014-04-14 18:03:59 +02:00
|
|
|
}
|
|
|
|
|
2020-12-08 02:46:25 +01:00
|
|
|
if (runstate_check(RUN_STATE_POSTMIGRATE)) {
|
|
|
|
error_setg(errp, "Can't migrate the vm that was paused due to "
|
|
|
|
"previous migration");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-05-04 21:44:19 +02:00
|
|
|
if (migration_is_blocked(errp)) {
|
2018-05-02 12:47:24 +02:00
|
|
|
return false;
|
2011-11-14 22:09:43 +01:00
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:24 +02:00
|
|
|
if (blk || blk_inc) {
|
2023-03-01 22:00:16 +01:00
|
|
|
if (migrate_colo()) {
|
2021-06-08 10:23:28 +02:00
|
|
|
error_setg(errp, "No disk migration is required in COLO mode");
|
|
|
|
return false;
|
|
|
|
}
|
2023-03-02 00:49:47 +01:00
|
|
|
if (migrate_block() || migrate_block_incremental()) {
|
2017-04-05 18:32:37 +02:00
|
|
|
error_setg(errp, "Command options are incompatible with "
|
|
|
|
"current migration capabilities");
|
2018-05-02 12:47:24 +02:00
|
|
|
return false;
|
2017-04-05 18:32:37 +02:00
|
|
|
}
|
2023-03-01 21:02:42 +01:00
|
|
|
if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, &local_err)) {
|
2017-04-05 18:32:37 +02:00
|
|
|
error_propagate(errp, local_err);
|
2018-05-02 12:47:24 +02:00
|
|
|
return false;
|
2017-04-05 18:32:37 +02:00
|
|
|
}
|
|
|
|
s->must_remove_block_options = true;
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:24 +02:00
|
|
|
if (blk_inc) {
|
2023-03-02 12:05:03 +01:00
|
|
|
migrate_set_block_incremental(true);
|
2017-04-05 18:32:37 +02:00
|
|
|
}
|
|
|
|
|
2023-09-06 17:08:51 +02:00
|
|
|
if (migrate_init(s, errp)) {
|
|
|
|
return false;
|
|
|
|
}
|
2011-02-22 23:54:21 +01:00
|
|
|
|
2018-05-02 12:47:24 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qmp_migrate(const char *uri, bool has_blk, bool blk,
|
|
|
|
bool has_inc, bool inc, bool has_detach, bool detach,
|
|
|
|
bool has_resume, bool resume, Error **errp)
|
|
|
|
{
|
2023-07-06 12:29:36 +02:00
|
|
|
bool resume_requested;
|
2018-05-02 12:47:24 +02:00
|
|
|
Error *local_err = NULL;
|
|
|
|
MigrationState *s = migrate_get_current();
|
2020-08-06 09:40:29 +02:00
|
|
|
const char *p = NULL;
|
2018-05-02 12:47:24 +02:00
|
|
|
|
migration: Rework multi-channel checks on URI
The whole idea of multi-channel checks was not properly done, IMHO.
Currently we check multi-channel in a lot of places, but actually that's
not needed because we only need to check it right after we get the URI and
that should be it.
If the URI check succeeded, we should never need to check it again because
we must have it. If it check fails, we should fail immediately on either
the qmp_migrate or qmp_migrate_incoming, instead of failingg it later after
the connection established.
Neither should we fail any set capabiliities like what we used to do here:
5ad15e8614 ("migration: allow enabling mutilfd for specific protocol only", 2021-10-19)
Because logically the URI will only be set later after the capability is
set, so it doesn't make a lot of sense to check the URI type when setting
the capability, because we're checking the cap with an old URI passed in,
and that may not even be the URI we're going to use later.
This patch mostly reverted all such checks for before, dropping the
variable migrate_allow_multi_channels and helpers. Instead, add a common
helper to check URI for multi-channels for either qmp_migrate and
qmp_migrate_incoming and that should do all the proper checks. The failure
will only trigger with the "migrate" or "migrate_incoming" command, or when
user specified "-incoming xxx" where "xxx" is not "defer".
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2023-02-08 21:28:10 +01:00
|
|
|
/* URI is not suitable for migration? */
|
|
|
|
if (!migration_channels_and_uri_compatible(uri, errp)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-07-06 12:29:36 +02:00
|
|
|
resume_requested = has_resume && resume;
|
2018-05-02 12:47:24 +02:00
|
|
|
if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
|
2023-07-06 12:29:36 +02:00
|
|
|
resume_requested, errp)) {
|
2018-05-02 12:47:24 +02:00
|
|
|
/* Error detected, put into errp */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-07-06 12:29:36 +02:00
|
|
|
if (!resume_requested) {
|
2020-12-28 16:08:52 +01:00
|
|
|
if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-06 09:40:29 +02:00
|
|
|
if (strstart(uri, "tcp:", &p) ||
|
2020-08-06 09:40:30 +02:00
|
|
|
strstart(uri, "unix:", NULL) ||
|
|
|
|
strstart(uri, "vsock:", NULL)) {
|
2020-08-06 09:40:29 +02:00
|
|
|
socket_start_outgoing_migration(s, p ? p : uri, &local_err);
|
2013-07-22 16:01:54 +02:00
|
|
|
#ifdef CONFIG_RDMA
|
2013-12-18 21:52:01 +01:00
|
|
|
} else if (strstart(uri, "rdma:", &p)) {
|
2013-07-22 16:01:54 +02:00
|
|
|
rdma_start_outgoing_migration(s, p, &local_err);
|
|
|
|
#endif
|
2011-02-22 23:54:21 +01:00
|
|
|
} else if (strstart(uri, "exec:", &p)) {
|
2012-10-02 10:02:46 +02:00
|
|
|
exec_start_outgoing_migration(s, p, &local_err);
|
2011-02-22 23:54:21 +01:00
|
|
|
} else if (strstart(uri, "fd:", &p)) {
|
2012-10-02 10:02:46 +02:00
|
|
|
fd_start_outgoing_migration(s, p, &local_err);
|
2023-09-08 16:22:10 +02:00
|
|
|
} else if (strstart(uri, "file:", &p)) {
|
|
|
|
file_start_outgoing_migration(s, p, &local_err);
|
2010-12-13 17:30:12 +01:00
|
|
|
} else {
|
2023-06-21 15:09:39 +02:00
|
|
|
error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri",
|
2015-03-17 11:54:50 +01:00
|
|
|
"a valid migration protocol");
|
2015-12-16 12:47:33 +01:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
2023-03-02 12:10:33 +01:00
|
|
|
block_cleanup_parameters();
|
2011-02-22 23:54:21 +01:00
|
|
|
}
|
|
|
|
|
2012-10-02 10:02:46 +02:00
|
|
|
if (local_err) {
|
2023-07-06 12:29:36 +02:00
|
|
|
if (!resume_requested) {
|
2020-12-28 16:08:52 +01:00
|
|
|
yank_unregister_instance(MIGRATION_YANK_INSTANCE);
|
|
|
|
}
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 12:05:00 +02:00
|
|
|
migrate_fd_error(s, local_err);
|
2012-10-02 10:02:46 +02:00
|
|
|
error_propagate(errp, local_err);
|
2011-12-05 17:48:01 +01:00
|
|
|
return;
|
2011-11-09 21:29:01 +01:00
|
|
|
}
|
2011-02-22 23:54:21 +01:00
|
|
|
}
|
|
|
|
|
2011-11-28 01:54:09 +01:00
|
|
|
void qmp_migrate_cancel(Error **errp)
|
2011-02-22 23:54:21 +01:00
|
|
|
{
|
2021-09-29 16:43:10 +02:00
|
|
|
migration_cancel(NULL);
|
2011-02-22 23:54:21 +01:00
|
|
|
}
|
|
|
|
|
2017-10-20 11:05:53 +02:00
|
|
|
void qmp_migrate_continue(MigrationStatus state, Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
if (s->state != state) {
|
|
|
|
error_setg(errp, "Migration not in expected state: %s",
|
|
|
|
MigrationStatus_str(s->state));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
qemu_sem_post(&s->pause_sem);
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
/* migration thread support */
|
|
|
|
/*
|
|
|
|
* Something bad happened to the RP stream, mark an error
|
|
|
|
* The caller shall print or trace something to indicate why
|
|
|
|
*/
|
|
|
|
static void mark_source_rp_bad(MigrationState *s)
|
|
|
|
{
|
|
|
|
s->rp_state.error = true;
|
|
|
|
}
|
|
|
|
|
2023-10-05 00:02:37 +02:00
|
|
|
void migration_rp_wait(MigrationState *s)
|
|
|
|
{
|
|
|
|
qemu_sem_wait(&s->rp_state.rp_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
void migration_rp_kick(MigrationState *s)
|
|
|
|
{
|
|
|
|
qemu_sem_post(&s->rp_state.rp_sem);
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
static struct rp_cmd_args {
|
|
|
|
ssize_t len; /* -1 = variable */
|
|
|
|
const char *name;
|
|
|
|
} rp_cmd_args[] = {
|
|
|
|
[MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
|
|
|
|
[MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
|
|
|
|
[MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
|
2015-11-05 19:11:07 +01:00
|
|
|
[MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
|
|
|
|
[MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
|
2018-05-02 12:47:28 +02:00
|
|
|
[MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" },
|
2018-05-02 12:47:30 +02:00
|
|
|
[MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" },
|
2023-06-21 13:11:55 +02:00
|
|
|
[MIG_RP_MSG_SWITCHOVER_ACK] = { .len = 0, .name = "SWITCHOVER_ACK" },
|
2015-11-05 19:10:49 +01:00
|
|
|
[MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
|
|
|
|
};
|
|
|
|
|
2015-11-05 19:11:07 +01:00
|
|
|
/*
|
|
|
|
* Process a request for pages received on the return path,
|
|
|
|
* We're allowed to send more than requested (e.g. to round to our page size)
|
|
|
|
* and we don't need to send pages that have already been sent.
|
|
|
|
*/
|
|
|
|
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
|
|
|
|
ram_addr_t start, size_t len)
|
|
|
|
{
|
2022-03-23 16:57:22 +01:00
|
|
|
long our_host_ps = qemu_real_host_page_size();
|
2015-11-05 19:11:08 +01:00
|
|
|
|
2015-11-05 19:11:07 +01:00
|
|
|
trace_migrate_handle_rp_req_pages(rbname, start, len);
|
2015-11-05 19:11:08 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we currently insist on matching page sizes, just sanity check
|
|
|
|
* we're being asked for whole host pages.
|
|
|
|
*/
|
2021-10-11 19:53:44 +02:00
|
|
|
if (!QEMU_IS_ALIGNED(start, our_host_ps) ||
|
|
|
|
!QEMU_IS_ALIGNED(len, our_host_ps)) {
|
2015-11-05 19:11:08 +01:00
|
|
|
error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
|
|
|
|
" len: %zd", __func__, start, len);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-14 18:41:03 +01:00
|
|
|
if (ram_save_queue_pages(rbname, start, len)) {
|
2015-11-05 19:11:08 +01:00
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
2015-11-05 19:11:07 +01:00
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:28 +02:00
|
|
|
static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
|
|
|
|
{
|
|
|
|
RAMBlock *block = qemu_ram_block_by_name(block_name);
|
|
|
|
|
|
|
|
if (!block) {
|
|
|
|
error_report("%s: invalid block name '%s'", __func__, block_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fetch the received bitmap and refresh the dirty bitmap */
|
|
|
|
return ram_dirty_bitmap_reload(s, block);
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:30 +02:00
|
|
|
static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
|
|
|
|
{
|
|
|
|
trace_source_return_path_thread_resume_ack(value);
|
|
|
|
|
|
|
|
if (value != MIGRATION_RESUME_ACK_VALUE) {
|
|
|
|
error_report("%s: illegal resume_ack value %"PRIu32,
|
|
|
|
__func__, value);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now both sides are active. */
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
|
2018-05-02 12:47:34 +02:00
|
|
|
/* Notify send thread that time to continue send pages */
|
2023-10-05 00:02:37 +02:00
|
|
|
migration_rp_kick(s);
|
2018-05-02 12:47:30 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-10-11 23:55:57 +02:00
|
|
|
/*
|
|
|
|
* Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if
|
|
|
|
* existed) in a safe way.
|
|
|
|
*/
|
|
|
|
static void migration_release_dst_files(MigrationState *ms)
|
2021-07-22 19:58:38 +02:00
|
|
|
{
|
|
|
|
QEMUFile *file;
|
|
|
|
|
|
|
|
WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
|
|
|
|
/*
|
|
|
|
* Reset the from_dst_file pointer first before releasing it, as we
|
|
|
|
* can't block within lock section
|
|
|
|
*/
|
|
|
|
file = ms->rp_state.from_dst_file;
|
|
|
|
ms->rp_state.from_dst_file = NULL;
|
|
|
|
}
|
|
|
|
|
2022-10-11 23:55:57 +02:00
|
|
|
/*
|
|
|
|
* Do the same to postcopy fast path socket too if there is. No
|
|
|
|
* locking needed because this qemufile should only be managed by
|
|
|
|
* return path thread.
|
|
|
|
*/
|
|
|
|
if (ms->postcopy_qemufile_src) {
|
|
|
|
migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src);
|
|
|
|
qemu_file_shutdown(ms->postcopy_qemufile_src);
|
|
|
|
qemu_fclose(ms->postcopy_qemufile_src);
|
|
|
|
ms->postcopy_qemufile_src = NULL;
|
|
|
|
}
|
|
|
|
|
2021-07-22 19:58:38 +02:00
|
|
|
qemu_fclose(file);
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
/*
|
|
|
|
* Handles messages sent on the return path towards the source VM
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void *source_return_path_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationState *ms = opaque;
|
|
|
|
QEMUFile *rp = ms->rp_state.from_dst_file;
|
|
|
|
uint16_t header_len, header_type;
|
2016-03-09 07:12:12 +01:00
|
|
|
uint8_t buf[512];
|
2015-11-05 19:10:49 +01:00
|
|
|
uint32_t tmp32, sibling_error;
|
2015-11-05 19:11:07 +01:00
|
|
|
ram_addr_t start = 0; /* =0 to silence warning */
|
|
|
|
size_t len = 0, expected_len;
|
2015-11-05 19:10:49 +01:00
|
|
|
int res;
|
|
|
|
|
|
|
|
trace_source_return_path_thread_entry();
|
2018-08-06 15:29:29 +02:00
|
|
|
rcu_register_thread();
|
2018-05-02 12:47:21 +02:00
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
|
|
|
|
migration_is_setup_or_active(ms->state)) {
|
|
|
|
trace_source_return_path_thread_loop_top();
|
|
|
|
header_type = qemu_get_be16(rp);
|
|
|
|
header_len = qemu_get_be16(rp);
|
|
|
|
|
2018-02-08 11:31:05 +01:00
|
|
|
if (qemu_file_get_error(rp)) {
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
if (header_type >= MIG_RP_MSG_MAX ||
|
|
|
|
header_type == MIG_RP_MSG_INVALID) {
|
|
|
|
error_report("RP: Received invalid message 0x%04x length 0x%04x",
|
2020-03-27 18:19:08 +01:00
|
|
|
header_type, header_len);
|
2015-11-05 19:10:49 +01:00
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((rp_cmd_args[header_type].len != -1 &&
|
|
|
|
header_len != rp_cmd_args[header_type].len) ||
|
2016-03-09 07:12:12 +01:00
|
|
|
header_len > sizeof(buf)) {
|
2015-11-05 19:10:49 +01:00
|
|
|
error_report("RP: Received '%s' message (0x%04x) with"
|
2020-03-27 18:19:08 +01:00
|
|
|
"incorrect length %d expecting %zu",
|
|
|
|
rp_cmd_args[header_type].name, header_type, header_len,
|
|
|
|
(size_t)rp_cmd_args[header_type].len);
|
2015-11-05 19:10:49 +01:00
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We know we've got a valid header by this point */
|
|
|
|
res = qemu_get_buffer(rp, buf, header_len);
|
|
|
|
if (res != header_len) {
|
|
|
|
error_report("RP: Failed reading data for message 0x%04x"
|
|
|
|
" read %d expected %d",
|
|
|
|
header_type, res, header_len);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, we have the message and the data */
|
|
|
|
switch (header_type) {
|
|
|
|
case MIG_RP_MSG_SHUT:
|
2016-06-10 18:09:22 +02:00
|
|
|
sibling_error = ldl_be_p(buf);
|
2015-11-05 19:10:49 +01:00
|
|
|
trace_source_return_path_thread_shut(sibling_error);
|
|
|
|
if (sibling_error) {
|
|
|
|
error_report("RP: Sibling indicated error %d", sibling_error);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We'll let the main thread deal with closing the RP
|
|
|
|
* we could do a shutdown(2) on it, but we're the only user
|
|
|
|
* anyway, so there's nothing gained.
|
|
|
|
*/
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
case MIG_RP_MSG_PONG:
|
2016-06-10 18:09:22 +02:00
|
|
|
tmp32 = ldl_be_p(buf);
|
2015-11-05 19:10:49 +01:00
|
|
|
trace_source_return_path_thread_pong(tmp32);
|
2023-02-08 21:28:12 +01:00
|
|
|
qemu_sem_post(&ms->rp_state.rp_pong_acks);
|
2015-11-05 19:10:49 +01:00
|
|
|
break;
|
|
|
|
|
2015-11-05 19:11:07 +01:00
|
|
|
case MIG_RP_MSG_REQ_PAGES:
|
2016-06-10 18:09:22 +02:00
|
|
|
start = ldq_be_p(buf);
|
|
|
|
len = ldl_be_p(buf + 8);
|
2015-11-05 19:11:07 +01:00
|
|
|
migrate_handle_rp_req_pages(ms, NULL, start, len);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIG_RP_MSG_REQ_PAGES_ID:
|
|
|
|
expected_len = 12 + 1; /* header + termination */
|
|
|
|
|
|
|
|
if (header_len >= expected_len) {
|
2016-06-10 18:09:22 +02:00
|
|
|
start = ldq_be_p(buf);
|
|
|
|
len = ldl_be_p(buf + 8);
|
2015-11-05 19:11:07 +01:00
|
|
|
/* Now we expect an idstr */
|
|
|
|
tmp32 = buf[12]; /* Length of the following idstr */
|
|
|
|
buf[13 + tmp32] = '\0';
|
|
|
|
expected_len += tmp32;
|
|
|
|
}
|
|
|
|
if (header_len != expected_len) {
|
|
|
|
error_report("RP: Req_Page_id with length %d expecting %zd",
|
2020-03-27 18:19:08 +01:00
|
|
|
header_len, expected_len);
|
2015-11-05 19:11:07 +01:00
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
|
|
|
|
break;
|
|
|
|
|
2018-05-02 12:47:28 +02:00
|
|
|
case MIG_RP_MSG_RECV_BITMAP:
|
|
|
|
if (header_len < 1) {
|
|
|
|
error_report("%s: missing block name", __func__);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* Format: len (1B) + idstr (<255B). This ends the idstr. */
|
|
|
|
buf[buf[0] + 1] = '\0';
|
|
|
|
if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2018-05-02 12:47:30 +02:00
|
|
|
case MIG_RP_MSG_RESUME_ACK:
|
|
|
|
tmp32 = ldl_be_p(buf);
|
|
|
|
if (migrate_handle_rp_resume_ack(ms, tmp32)) {
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2023-06-21 13:11:55 +02:00
|
|
|
case MIG_RP_MSG_SWITCHOVER_ACK:
|
|
|
|
ms->switchover_acked = true;
|
|
|
|
trace_source_return_path_thread_switchover_acked();
|
|
|
|
break;
|
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 12:47:21 +02:00
|
|
|
|
|
|
|
out:
|
2023-09-18 19:28:21 +02:00
|
|
|
if (qemu_file_get_error(rp)) {
|
2015-11-05 19:10:49 +01:00
|
|
|
trace_source_return_path_thread_bad_end();
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_source_return_path_thread_end();
|
2018-08-06 15:29:29 +02:00
|
|
|
rcu_unregister_thread();
|
2015-11-05 19:10:49 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-09-18 19:28:21 +02:00
|
|
|
static int open_return_path_on_source(MigrationState *ms)
|
2015-11-05 19:10:49 +01:00
|
|
|
{
|
2016-01-15 04:37:42 +01:00
|
|
|
ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
|
2015-11-05 19:10:49 +01:00
|
|
|
if (!ms->rp_state.from_dst_file) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_open_return_path_on_source();
|
2018-05-02 12:47:24 +02:00
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
qemu_thread_create(&ms->rp_state.rp_thread, "return path",
|
|
|
|
source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
|
2021-07-22 19:58:37 +02:00
|
|
|
ms->rp_state.rp_thread_created = true;
|
2015-11-05 19:10:49 +01:00
|
|
|
|
|
|
|
trace_open_return_path_on_source_continue();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
static int close_return_path_on_source(MigrationState *ms)
|
2015-11-05 19:10:49 +01:00
|
|
|
{
|
2023-09-18 19:28:20 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!ms->rp_state.rp_thread_created) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_migration_return_path_end_before();
|
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
/*
|
2023-09-18 19:28:17 +02:00
|
|
|
* If this is a normal exit then the destination will send a SHUT
|
|
|
|
* and the rp_thread will exit, however if there's an error we
|
|
|
|
* need to cause it to exit. shutdown(2), if we have it, will
|
|
|
|
* cause it to unblock if it's stuck waiting for the destination.
|
2015-11-05 19:10:49 +01:00
|
|
|
*/
|
2023-09-18 19:28:17 +02:00
|
|
|
WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
|
|
|
|
if (ms->to_dst_file && ms->rp_state.from_dst_file &&
|
|
|
|
qemu_file_get_error(ms->to_dst_file)) {
|
|
|
|
qemu_file_shutdown(ms->rp_state.from_dst_file);
|
|
|
|
}
|
2015-11-05 19:10:49 +01:00
|
|
|
}
|
2023-09-18 19:28:17 +02:00
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
trace_await_return_path_close_on_source_joining();
|
|
|
|
qemu_thread_join(&ms->rp_state.rp_thread);
|
2021-07-22 19:58:37 +02:00
|
|
|
ms->rp_state.rp_thread_created = false;
|
2015-11-05 19:10:49 +01:00
|
|
|
trace_await_return_path_close_on_source_close();
|
2023-09-18 19:28:20 +02:00
|
|
|
|
|
|
|
ret = ms->rp_state.error;
|
2023-09-18 19:28:21 +02:00
|
|
|
ms->rp_state.error = false;
|
2023-09-18 19:28:22 +02:00
|
|
|
|
|
|
|
migration_release_dst_files(ms);
|
|
|
|
|
2023-09-18 19:28:20 +02:00
|
|
|
trace_migration_return_path_end_after(ret);
|
|
|
|
return ret;
|
2015-11-05 19:10:49 +01:00
|
|
|
}
|
|
|
|
|
2023-02-08 21:28:13 +01:00
|
|
|
static inline void
|
|
|
|
migration_wait_main_channel(MigrationState *ms)
|
|
|
|
{
|
|
|
|
/* Wait until one PONG message received */
|
|
|
|
qemu_sem_wait(&ms->rp_state.rp_pong_acks);
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:05 +01:00
|
|
|
/*
|
|
|
|
* Switch from normal iteration to postcopy
|
|
|
|
* Returns non-0 on error
|
|
|
|
*/
|
2023-06-21 15:09:39 +02:00
|
|
|
static int postcopy_start(MigrationState *ms, Error **errp)
|
2015-11-05 19:11:05 +01:00
|
|
|
{
|
|
|
|
int ret;
|
2016-04-27 12:05:01 +02:00
|
|
|
QIOChannelBuffer *bioc;
|
|
|
|
QEMUFile *fb;
|
2015-11-05 19:11:05 +01:00
|
|
|
int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2023-05-04 13:38:33 +02:00
|
|
|
uint64_t bandwidth = migrate_max_postcopy_bandwidth();
|
2017-02-02 16:59:09 +01:00
|
|
|
bool restart_block = false;
|
2017-10-20 11:05:56 +02:00
|
|
|
int cur_state = MIGRATION_STATUS_ACTIVE;
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
|
2023-02-08 21:28:13 +01:00
|
|
|
if (migrate_postcopy_preempt()) {
|
|
|
|
migration_wait_main_channel(ms);
|
|
|
|
if (postcopy_preempt_establish_channel(ms)) {
|
|
|
|
migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED);
|
|
|
|
return -1;
|
|
|
|
}
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
}
|
|
|
|
|
2017-10-20 11:05:56 +02:00
|
|
|
if (!migrate_pause_before_switchover()) {
|
|
|
|
migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
}
|
2015-11-05 19:11:05 +01:00
|
|
|
|
|
|
|
trace_postcopy_start();
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
trace_postcopy_start_set_run();
|
|
|
|
|
qmp hmp: Make system_wakeup check wake-up support and run state
The qmp/hmp command 'system_wakeup' is simply a direct call to
'qemu_system_wakeup_request' from vl.c. This function verifies if
runstate is SUSPENDED and if the wake up reason is valid before
proceeding. However, no error or warning is thrown if any of those
pre-requirements isn't met. There is no way for the caller to
differentiate between a successful wakeup or an error state caused
when trying to wake up a guest that wasn't suspended.
This means that system_wakeup is silently failing, which can be
considered a bug. Adding error handling isn't an API break in this
case - applications that didn't check the result will remain broken,
the ones that check it will have a chance to deal with it.
Adding to that, the commit before previous created a new QMP API called
query-current-machine, with a new flag called wakeup-suspend-support,
that indicates if the guest has the capability of waking up from suspended
state. Although such guest will never reach SUSPENDED state and erroring
it out in this scenario would suffice, it is more informative for the user
to differentiate between a failure because the guest isn't suspended versus
a failure because the guest does not have support for wake up at all.
All this considered, this patch changes qmp_system_wakeup to check if
the guest is capable of waking up from suspend, and if it is suspended.
After this patch, this is the output of system_wakeup in a guest that
does not have wake-up from suspend support (ppc64):
(qemu) system_wakeup
wake-up from suspend is not supported by this guest
(qemu)
And this is the output of system_wakeup in a x86 guest that has the
support but isn't suspended:
(qemu) system_wakeup
Unable to wake up: guest is not in suspended state
(qemu)
Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20181205194701.17836-4-danielhb413@gmail.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2018-12-05 20:47:01 +01:00
|
|
|
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
|
2015-11-05 19:11:05 +01:00
|
|
|
global_state_store();
|
|
|
|
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
|
2015-12-22 14:07:08 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-11-05 19:11:05 +01:00
|
|
|
|
2017-10-20 11:05:56 +02:00
|
|
|
ret = migration_maybe_pause(ms, &cur_state,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-12-22 14:07:08 +01:00
|
|
|
ret = bdrv_inactivate_all();
|
2015-11-05 19:11:05 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2017-02-02 16:59:09 +01:00
|
|
|
restart_block = true;
|
2015-11-05 19:11:05 +01:00
|
|
|
|
2015-11-11 15:02:27 +01:00
|
|
|
/*
|
|
|
|
* Cause any non-postcopiable, but iterative devices to
|
|
|
|
* send out their final data.
|
|
|
|
*/
|
2017-06-16 18:06:58 +02:00
|
|
|
qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
|
2015-11-11 15:02:27 +01:00
|
|
|
|
2015-11-05 19:11:05 +01:00
|
|
|
/*
|
|
|
|
* in Finish migrate and with the io-lock held everything should
|
|
|
|
* be quiet, but we've potentially still got dirty pages and we
|
|
|
|
* need to tell the destination to throw any pages it's already received
|
|
|
|
* that are dirty
|
|
|
|
*/
|
2017-07-10 18:30:16 +02:00
|
|
|
if (migrate_postcopy_ram()) {
|
2021-12-07 12:50:14 +01:00
|
|
|
ram_postcopy_send_discard_bitmap(ms);
|
2015-11-05 19:11:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* send rest of state - note things that are doing postcopy
|
|
|
|
* will notice we're in POSTCOPY_ACTIVE and not actually
|
|
|
|
* wrap their state up here
|
|
|
|
*/
|
2023-05-15 21:56:58 +02:00
|
|
|
migration_rate_set(bandwidth);
|
2017-07-10 18:30:16 +02:00
|
|
|
if (migrate_postcopy_ram()) {
|
|
|
|
/* Ping just for debugging, helps line traces up */
|
|
|
|
qemu_savevm_send_ping(ms->to_dst_file, 2);
|
|
|
|
}
|
2015-11-05 19:11:05 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* While loading the device state we may trigger page transfer
|
|
|
|
* requests and the fd must be free to process those, and thus
|
|
|
|
* the destination must read the whole device state off the fd before
|
|
|
|
* it starts processing it. Unfortunately the ad-hoc migration format
|
|
|
|
* doesn't allow the destination to know the size to read without fully
|
|
|
|
* parsing it through each devices load-state code (especially the open
|
|
|
|
* coded devices that use get/put).
|
|
|
|
* So we wrap the device state up in a package with a length at the start;
|
|
|
|
* to do this we use a qemu_buf to hold the whole of the device state.
|
|
|
|
*/
|
2016-04-27 12:05:01 +02:00
|
|
|
bioc = qio_channel_buffer_new(4096);
|
2016-09-30 12:57:14 +02:00
|
|
|
qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
|
2022-06-20 13:02:05 +02:00
|
|
|
fb = qemu_file_new_output(QIO_CHANNEL(bioc));
|
2016-04-27 12:05:01 +02:00
|
|
|
object_unref(OBJECT(bioc));
|
2015-11-05 19:11:05 +01:00
|
|
|
|
2015-11-05 19:11:18 +01:00
|
|
|
/*
|
|
|
|
* Make sure the receiver can get incoming pages before we send the rest
|
|
|
|
* of the state
|
|
|
|
*/
|
|
|
|
qemu_savevm_send_postcopy_listen(fb);
|
|
|
|
|
2017-06-16 18:06:58 +02:00
|
|
|
qemu_savevm_state_complete_precopy(fb, false, false);
|
2017-07-10 18:30:16 +02:00
|
|
|
if (migrate_postcopy_ram()) {
|
|
|
|
qemu_savevm_send_ping(fb, 3);
|
|
|
|
}
|
2015-11-05 19:11:05 +01:00
|
|
|
|
|
|
|
qemu_savevm_send_postcopy_run(fb);
|
|
|
|
|
|
|
|
/* <><> end of stuff going into the package */
|
|
|
|
|
2017-02-02 16:59:09 +01:00
|
|
|
/* Last point of recovery; as soon as we send the package the destination
|
|
|
|
* can open devices and potentially start running.
|
|
|
|
* Lets just check again we've not got any errors.
|
|
|
|
*/
|
|
|
|
ret = qemu_file_get_error(ms->to_dst_file);
|
|
|
|
if (ret) {
|
2023-06-21 15:09:39 +02:00
|
|
|
error_setg(errp, "postcopy_start: Migration stream errored (pre package)");
|
2017-02-02 16:59:09 +01:00
|
|
|
goto fail_closefb;
|
|
|
|
}
|
|
|
|
|
|
|
|
restart_block = false;
|
|
|
|
|
2015-11-05 19:11:05 +01:00
|
|
|
/* Now send that blob */
|
2016-04-27 12:05:01 +02:00
|
|
|
if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
|
2015-11-05 19:11:05 +01:00
|
|
|
goto fail_closefb;
|
|
|
|
}
|
|
|
|
qemu_fclose(fb);
|
2016-02-22 18:17:32 +01:00
|
|
|
|
|
|
|
/* Send a notify to give a chance for anything that needs to happen
|
|
|
|
* at the transition to postcopy and after the device state; in particular
|
|
|
|
* spice needs to trigger a transition now
|
|
|
|
*/
|
|
|
|
ms->postcopy_after_devices = true;
|
|
|
|
notifier_list_notify(&migration_state_notifiers, ms);
|
|
|
|
|
2015-11-05 19:11:05 +01:00
|
|
|
ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
|
|
|
|
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
|
2017-07-10 18:30:16 +02:00
|
|
|
if (migrate_postcopy_ram()) {
|
|
|
|
/*
|
|
|
|
* Although this ping is just for debug, it could potentially be
|
|
|
|
* used for getting a better measurement of downtime at the source.
|
|
|
|
*/
|
|
|
|
qemu_savevm_send_ping(ms->to_dst_file, 4);
|
|
|
|
}
|
2015-11-05 19:11:05 +01:00
|
|
|
|
2017-02-03 16:23:21 +01:00
|
|
|
if (migrate_release_ram()) {
|
|
|
|
ram_postcopy_migrated_memory_release(ms);
|
|
|
|
}
|
|
|
|
|
2016-01-15 04:37:42 +01:00
|
|
|
ret = qemu_file_get_error(ms->to_dst_file);
|
2015-11-05 19:11:05 +01:00
|
|
|
if (ret) {
|
2023-06-21 15:09:39 +02:00
|
|
|
error_setg(errp, "postcopy_start: Migration stream errored");
|
2015-12-16 12:47:33 +01:00
|
|
|
migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
|
2015-11-05 19:11:05 +01:00
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
|
migration: Postcopy preemption enablement
This patch enables postcopy-preempt feature.
It contains two major changes to the migration logic:
(1) Postcopy requests are now sent via a different socket from precopy
background migration stream, so as to be isolated from very high page
request delays.
(2) For huge page enabled hosts: when there's postcopy requests, they can now
intercept a partial sending of huge host pages on src QEMU.
After this patch, we'll live migrate a VM with two channels for postcopy: (1)
PRECOPY channel, which is the default channel that transfers background pages;
and (2) POSTCOPY channel, which only transfers requested pages.
There's no strict rule of which channel to use, e.g., if a requested page is
already being transferred on precopy channel, then we will keep using the same
precopy channel to transfer the page even if it's explicitly requested. In 99%
of the cases we'll prioritize the channels so we send requested page via the
postcopy channel as long as possible.
On the source QEMU, when we found a postcopy request, we'll interrupt the
PRECOPY channel sending process and quickly switch to the POSTCOPY channel.
After we serviced all the high priority postcopy pages, we'll switch back to
PRECOPY channel so that we'll continue to send the interrupted huge page again.
There's no new thread introduced on src QEMU.
On the destination QEMU, one new thread is introduced to receive page data from
the postcopy specific socket (done in the preparation patch).
This patch has a side effect: after sending postcopy pages, previously we'll
assume the guest will access follow up pages so we'll keep sending from there.
Now it's changed. Instead of going on with a postcopy requested page, we'll go
back and continue sending the precopy huge page (which can be intercepted by a
postcopy request so the huge page can be sent partially before).
Whether that's a problem is debatable, because "assuming the guest will
continue to access the next page" may not really suite when huge pages are
used, especially if the huge page is large (e.g. 1GB pages). So that locality
hint is much meaningless if huge pages are used.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185504.27203-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:04 +02:00
|
|
|
trace_postcopy_preempt_enabled(migrate_postcopy_preempt());
|
|
|
|
|
2015-11-05 19:11:05 +01:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
fail_closefb:
|
|
|
|
qemu_fclose(fb);
|
|
|
|
fail:
|
2015-12-16 12:47:33 +01:00
|
|
|
migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
|
2015-11-05 19:11:05 +01:00
|
|
|
MIGRATION_STATUS_FAILED);
|
2017-02-02 16:59:09 +01:00
|
|
|
if (restart_block) {
|
|
|
|
/* A failure happened early enough that we know the destination hasn't
|
|
|
|
* accessed block devices, so we're safe to recover.
|
|
|
|
*/
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2022-02-09 11:54:51 +01:00
|
|
|
bdrv_activate_all(&local_err);
|
2017-02-02 16:59:09 +01:00
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
}
|
|
|
|
}
|
2015-11-05 19:11:05 +01:00
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-10-20 11:05:52 +02:00
|
|
|
/**
|
|
|
|
* migration_maybe_pause: Pause if required to by
|
|
|
|
* migrate_pause_before_switchover called with the iothread locked
|
|
|
|
* Returns: 0 on success
|
|
|
|
*/
|
2017-10-20 11:05:56 +02:00
|
|
|
static int migration_maybe_pause(MigrationState *s,
|
|
|
|
int *current_active_state,
|
|
|
|
int new_state)
|
2017-10-20 11:05:52 +02:00
|
|
|
{
|
|
|
|
if (!migrate_pause_before_switchover()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Since leaving this state is not atomic with posting the semaphore
|
|
|
|
* it's possible that someone could have issued multiple migrate_continue
|
|
|
|
* and the semaphore is incorrectly positive at this point;
|
|
|
|
* the docs say it's undefined to reinit a semaphore that's already
|
|
|
|
* init'd, so use timedwait to eat up any existing posts.
|
|
|
|
*/
|
|
|
|
while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
|
|
|
|
/* This block intentionally left blank */
|
|
|
|
}
|
|
|
|
|
2020-01-14 10:43:09 +01:00
|
|
|
/*
|
|
|
|
* If the migration is cancelled when it is in the completion phase,
|
|
|
|
* the migration state is set to MIGRATION_STATUS_CANCELLING.
|
|
|
|
* So we don't need to wait a semaphore, otherwise we would always
|
|
|
|
* wait for the 'pause_sem' semaphore.
|
|
|
|
*/
|
|
|
|
if (s->state != MIGRATION_STATUS_CANCELLING) {
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
migrate_set_state(&s->state, *current_active_state,
|
|
|
|
MIGRATION_STATUS_PRE_SWITCHOVER);
|
|
|
|
qemu_sem_wait(&s->pause_sem);
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
|
|
|
|
new_state);
|
|
|
|
*current_active_state = new_state;
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
}
|
2017-10-20 11:05:52 +02:00
|
|
|
|
2017-10-20 11:05:56 +02:00
|
|
|
return s->state == new_state ? 0 : -EINVAL;
|
2017-10-20 11:05:52 +02:00
|
|
|
}
|
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
static int migration_completion_precopy(MigrationState *s,
|
|
|
|
int *current_active_state)
|
2015-08-13 12:51:31 +02:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
|
2023-05-17 14:37:51 +02:00
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
s->vm_old_state = runstate_get();
|
|
|
|
global_state_store();
|
2023-05-17 14:37:49 +02:00
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
|
|
|
|
trace_migration_completion_vm_stop(ret);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2023-05-17 14:37:49 +02:00
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
ret = migration_maybe_pause(s, current_active_state,
|
|
|
|
MIGRATION_STATUS_DEVICE);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2015-08-13 12:51:31 +02:00
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
/*
|
|
|
|
* Inactivate disks except in COLO, and track that we have done so in order
|
|
|
|
* to remember to reactivate them if migration fails or is cancelled.
|
|
|
|
*/
|
|
|
|
s->block_inactive = !migrate_colo();
|
|
|
|
migration_rate_set(RATE_LIMIT_DISABLED);
|
|
|
|
ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
|
|
|
|
s->block_inactive);
|
|
|
|
out_unlock:
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
return ret;
|
|
|
|
}
|
2015-11-05 19:11:06 +01:00
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
static void migration_completion_postcopy(MigrationState *s)
|
|
|
|
{
|
|
|
|
trace_migration_completion_postcopy_end();
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
qemu_savevm_state_complete_postcopy(s->to_dst_file);
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shutdown the postcopy fast path thread. This is only needed when dest
|
|
|
|
* QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need this.
|
|
|
|
*/
|
|
|
|
if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
|
|
|
|
postcopy_preempt_shutdown_file(s);
|
|
|
|
}
|
2021-10-05 10:07:51 +02:00
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
trace_migration_completion_postcopy_end_after_complete();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void migration_completion_failed(MigrationState *s,
|
|
|
|
int current_active_state)
|
|
|
|
{
|
|
|
|
if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
|
|
|
|
s->state == MIGRATION_STATUS_DEVICE)) {
|
2023-03-26 19:25:39 +02:00
|
|
|
/*
|
2023-08-04 11:30:53 +02:00
|
|
|
* If not doing postcopy, vm_start() will be called: let's
|
|
|
|
* regain control on images.
|
2023-03-26 19:25:39 +02:00
|
|
|
*/
|
2023-08-04 11:30:53 +02:00
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
bdrv_activate_all(&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
} else {
|
|
|
|
s->block_inactive = false;
|
2022-07-07 20:55:02 +02:00
|
|
|
}
|
2023-08-04 11:30:53 +02:00
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
|
|
|
|
|
|
|
migrate_set_state(&s->state, current_active_state,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
}
|
2022-07-07 20:55:02 +02:00
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
/**
|
|
|
|
* migration_completion: Used by migration_thread when there's not much left.
|
|
|
|
* The caller 'breaks' the loop when this returns.
|
|
|
|
*
|
|
|
|
* @s: Current migration state
|
|
|
|
*/
|
|
|
|
static void migration_completion(MigrationState *s)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int current_active_state = s->state;
|
|
|
|
|
|
|
|
if (s->state == MIGRATION_STATUS_ACTIVE) {
|
|
|
|
ret = migration_completion_precopy(s, ¤t_active_state);
|
|
|
|
} else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
|
|
|
migration_completion_postcopy(s);
|
2021-12-31 06:59:33 +01:00
|
|
|
} else {
|
2023-08-04 11:30:53 +02:00
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0) {
|
migration: handle CANCELLING state in migration_completion()
The following sequence may cause the VM abort during migration:
1. RUN_STATE_RUNNING,MIGRATION_STATUS_ACTIVE
2. before call migration_completion(), we send migrate_cancel
QMP command, the state machine is changed to:
RUN_STATE_RUNNING,MIGRATION_STATUS_CANCELLING
3. call migration_completion(), and the state machine is
switch to: RUN_STATE_RUNNING,MIGRATION_STATUS_COMPLETED
4. call migration_iteration_finish(), because the migration
status is COMPLETED, so it will try to set the runstate
to POSTMIGRATE, but RUNNING-->POSTMIGRATE is an invalid
transition, so abort().
The migration_completion() should not change the migration state
to COMPLETED if it is already changed to CANCELLING.
Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Message-Id: <20201105091726.148-1-longpeng2@huawei.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2020-11-05 10:17:26 +01:00
|
|
|
goto fail;
|
2015-08-13 12:51:31 +02:00
|
|
|
}
|
|
|
|
|
2023-08-04 11:30:53 +02:00
|
|
|
if (close_return_path_on_source(s)) {
|
2023-09-18 19:28:20 +02:00
|
|
|
goto fail;
|
2015-08-13 12:51:31 +02:00
|
|
|
}
|
|
|
|
|
2016-01-15 04:37:42 +01:00
|
|
|
if (qemu_file_get_error(s->to_dst_file)) {
|
2015-08-13 12:51:31 +02:00
|
|
|
trace_migration_completion_file_err();
|
2023-05-02 22:52:12 +02:00
|
|
|
goto fail;
|
2015-08-13 12:51:31 +02:00
|
|
|
}
|
|
|
|
|
2023-03-01 22:00:16 +01:00
|
|
|
if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) {
|
2021-12-31 06:59:34 +01:00
|
|
|
/* COLO does not support postcopy */
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_COLO);
|
|
|
|
} else {
|
2016-10-27 08:42:54 +02:00
|
|
|
migrate_set_state(&s->state, current_active_state,
|
|
|
|
MIGRATION_STATUS_COMPLETED);
|
|
|
|
}
|
|
|
|
|
2015-08-13 12:51:31 +02:00
|
|
|
return;
|
|
|
|
|
2023-05-02 22:52:12 +02:00
|
|
|
fail:
|
2023-08-04 11:30:53 +02:00
|
|
|
migration_completion_failed(s, current_active_state);
|
2015-08-13 12:51:31 +02:00
|
|
|
}
|
|
|
|
|
2021-01-29 11:14:06 +01:00
|
|
|
/**
|
|
|
|
* bg_migration_completion: Used by bg_migration_thread when after all the
|
|
|
|
* RAM has been saved. The caller 'breaks' the loop when this returns.
|
|
|
|
*
|
|
|
|
* @s: Current migration state
|
|
|
|
*/
|
|
|
|
static void bg_migration_completion(MigrationState *s)
|
|
|
|
{
|
|
|
|
int current_active_state = s->state;
|
|
|
|
|
|
|
|
if (s->state == MIGRATION_STATUS_ACTIVE) {
|
|
|
|
/*
|
|
|
|
* By this moment we have RAM content saved into the migration stream.
|
|
|
|
* The next step is to flush the non-RAM content (device state)
|
|
|
|
* right after the ram content. The device state has been stored into
|
|
|
|
* the temporary buffer before RAM saving started.
|
|
|
|
*/
|
|
|
|
qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage);
|
|
|
|
qemu_fflush(s->to_dst_file);
|
|
|
|
} else if (s->state == MIGRATION_STATUS_CANCELLING) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemu_file_get_error(s->to_dst_file)) {
|
|
|
|
trace_migration_completion_file_err();
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
migrate_set_state(&s->state, current_active_state,
|
|
|
|
MIGRATION_STATUS_COMPLETED);
|
|
|
|
return;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
migrate_set_state(&s->state, current_active_state,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:19 +02:00
|
|
|
typedef enum MigThrError {
|
|
|
|
/* No error detected */
|
|
|
|
MIG_THR_ERR_NONE = 0,
|
|
|
|
/* Detected error, but resumed successfully */
|
|
|
|
MIG_THR_ERR_RECOVERED = 1,
|
|
|
|
/* Detected fatal error, need to exit */
|
|
|
|
MIG_THR_ERR_FATAL = 2,
|
|
|
|
} MigThrError;
|
|
|
|
|
2018-05-02 12:47:34 +02:00
|
|
|
static int postcopy_resume_handshake(MigrationState *s)
|
|
|
|
{
|
|
|
|
qemu_savevm_send_postcopy_resume(s->to_dst_file);
|
|
|
|
|
|
|
|
while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
|
2023-10-05 00:02:37 +02:00
|
|
|
migration_rp_wait(s);
|
2018-05-02 12:47:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:25 +02:00
|
|
|
/* Return zero if success, or <0 for error */
|
|
|
|
static int postcopy_do_resume(MigrationState *s)
|
|
|
|
{
|
2018-05-02 12:47:31 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call all the resume_prepare() hooks, so that modules can be
|
|
|
|
* ready for the migration resume.
|
|
|
|
*/
|
|
|
|
ret = qemu_savevm_state_resume_prepare(s);
|
|
|
|
if (ret) {
|
|
|
|
error_report("%s: resume_prepare() failure detected: %d",
|
|
|
|
__func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-02-08 21:28:13 +01:00
|
|
|
/*
|
|
|
|
* If preempt is enabled, re-establish the preempt channel. Note that
|
|
|
|
* we do it after resume prepare to make sure the main channel will be
|
|
|
|
* created before the preempt channel. E.g. with weak network, the
|
|
|
|
* dest QEMU may get messed up with the preempt and main channels on
|
|
|
|
* the order of connection setup. This guarantees the correct order.
|
|
|
|
*/
|
|
|
|
ret = postcopy_preempt_establish_channel(s);
|
|
|
|
if (ret) {
|
|
|
|
error_report("%s: postcopy_preempt_establish_channel(): %d",
|
|
|
|
__func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:31 +02:00
|
|
|
/*
|
2018-05-02 12:47:34 +02:00
|
|
|
* Last handshake with destination on the resume (destination will
|
|
|
|
* switch to postcopy-active afterwards)
|
2018-05-02 12:47:31 +02:00
|
|
|
*/
|
2018-05-02 12:47:34 +02:00
|
|
|
ret = postcopy_resume_handshake(s);
|
|
|
|
if (ret) {
|
|
|
|
error_report("%s: handshake failed: %d", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-05-02 12:47:31 +02:00
|
|
|
|
2018-05-02 12:47:25 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:19 +02:00
|
|
|
/*
|
|
|
|
* We don't return until we are in a safe state to continue current
|
|
|
|
* postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or
|
|
|
|
* MIG_THR_ERR_FATAL if unrecovery failure happened.
|
|
|
|
*/
|
|
|
|
static MigThrError postcopy_pause(MigrationState *s)
|
|
|
|
{
|
|
|
|
assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
|
2018-05-02 12:47:25 +02:00
|
|
|
while (true) {
|
2018-05-02 12:47:38 +02:00
|
|
|
QEMUFile *file;
|
|
|
|
|
2021-07-22 19:58:41 +02:00
|
|
|
/*
|
|
|
|
* Current channel is possibly broken. Release it. Note that this is
|
|
|
|
* guaranteed even without lock because to_dst_file should only be
|
|
|
|
* modified by the migration thread. That also guarantees that the
|
|
|
|
* unregister of yank is safe too without the lock. It should be safe
|
|
|
|
* even to be within the qemu_file_lock, but we didn't do that to avoid
|
|
|
|
* taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make
|
|
|
|
* the qemu_file_lock critical section as small as possible.
|
|
|
|
*/
|
2018-05-02 12:47:25 +02:00
|
|
|
assert(s->to_dst_file);
|
2021-07-22 19:58:41 +02:00
|
|
|
migration_ioc_unregister_yank_from_file(s->to_dst_file);
|
2018-05-02 12:47:38 +02:00
|
|
|
qemu_mutex_lock(&s->qemu_file_lock);
|
|
|
|
file = s->to_dst_file;
|
2018-05-02 12:47:25 +02:00
|
|
|
s->to_dst_file = NULL;
|
2018-05-02 12:47:38 +02:00
|
|
|
qemu_mutex_unlock(&s->qemu_file_lock);
|
|
|
|
|
|
|
|
qemu_file_shutdown(file);
|
|
|
|
qemu_fclose(file);
|
2018-05-02 12:47:19 +02:00
|
|
|
|
2023-09-18 19:28:21 +02:00
|
|
|
/*
|
|
|
|
* We're already pausing, so ignore any errors on the return
|
|
|
|
* path and just wait for the thread to finish. It will be
|
|
|
|
* re-created when we resume.
|
|
|
|
*/
|
2023-08-04 11:30:53 +02:00
|
|
|
close_return_path_on_source(s);
|
2023-09-18 19:28:21 +02:00
|
|
|
|
2020-10-21 23:27:20 +02:00
|
|
|
migrate_set_state(&s->state, s->state,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_PAUSED);
|
|
|
|
|
2018-05-02 12:47:25 +02:00
|
|
|
error_report("Detected IO failure for postcopy. "
|
|
|
|
"Migration paused.");
|
2018-05-02 12:47:19 +02:00
|
|
|
|
2018-05-02 12:47:25 +02:00
|
|
|
/*
|
|
|
|
* We wait until things fixed up. Then someone will setup the
|
|
|
|
* status back for us.
|
|
|
|
*/
|
|
|
|
while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
|
|
|
|
qemu_sem_wait(&s->postcopy_pause_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
|
|
|
|
/* Woken up by a recover procedure. Give it a shot */
|
2018-05-02 12:47:19 +02:00
|
|
|
|
2018-05-02 12:47:25 +02:00
|
|
|
/* Do the resume logic */
|
|
|
|
if (postcopy_do_resume(s) == 0) {
|
|
|
|
/* Let's continue! */
|
|
|
|
trace_postcopy_pause_continued();
|
|
|
|
return MIG_THR_ERR_RECOVERED;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Something wrong happened during the recovery, let's
|
|
|
|
* pause again. Pause is always better than throwing
|
|
|
|
* data away.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* This is not right... Time to quit. */
|
|
|
|
return MIG_THR_ERR_FATAL;
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 12:47:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static MigThrError migration_detect_error(MigrationState *s)
|
|
|
|
{
|
|
|
|
int ret;
|
2019-02-19 20:59:28 +01:00
|
|
|
int state = s->state;
|
2019-04-22 12:34:20 +02:00
|
|
|
Error *local_error = NULL;
|
2019-02-19 20:59:28 +01:00
|
|
|
|
|
|
|
if (state == MIGRATION_STATUS_CANCELLING ||
|
|
|
|
state == MIGRATION_STATUS_CANCELLED) {
|
|
|
|
/* End the migration, but don't set the state to failed */
|
|
|
|
return MIG_THR_ERR_FATAL;
|
|
|
|
}
|
2018-05-02 12:47:19 +02:00
|
|
|
|
2022-07-07 20:55:06 +02:00
|
|
|
/*
|
|
|
|
* Try to detect any file errors. Note that postcopy_qemufile_src will
|
|
|
|
* be NULL when postcopy preempt is not enabled.
|
|
|
|
*/
|
|
|
|
ret = qemu_file_get_error_obj_any(s->to_dst_file,
|
|
|
|
s->postcopy_qemufile_src,
|
|
|
|
&local_error);
|
2018-05-02 12:47:19 +02:00
|
|
|
if (!ret) {
|
|
|
|
/* Everything is fine */
|
2019-04-22 12:34:20 +02:00
|
|
|
assert(!local_error);
|
2018-05-02 12:47:19 +02:00
|
|
|
return MIG_THR_ERR_NONE;
|
|
|
|
}
|
|
|
|
|
2019-04-22 12:34:20 +02:00
|
|
|
if (local_error) {
|
|
|
|
migrate_set_error(s, local_error);
|
|
|
|
error_free(local_error);
|
|
|
|
}
|
|
|
|
|
2022-03-01 09:39:10 +01:00
|
|
|
if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) {
|
2018-05-02 12:47:19 +02:00
|
|
|
/*
|
|
|
|
* For postcopy, we allow the network to be down for a
|
|
|
|
* while. After that, it can be continued by a
|
|
|
|
* recovery phase.
|
|
|
|
*/
|
|
|
|
return postcopy_pause(s);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* For precopy (or postcopy with error outside IO), we fail
|
|
|
|
* with no time.
|
|
|
|
*/
|
2019-02-19 20:59:28 +01:00
|
|
|
migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
|
2018-05-02 12:47:19 +02:00
|
|
|
trace_migration_thread_file_err();
|
|
|
|
|
|
|
|
/* Time to stop the migration, now. */
|
|
|
|
return MIG_THR_ERR_FATAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-03 13:20:11 +01:00
|
|
|
static void migration_calculate_complete(MigrationState *s)
|
|
|
|
{
|
2023-05-15 21:56:59 +02:00
|
|
|
uint64_t bytes = migration_transferred_bytes(s->to_dst_file);
|
2018-01-03 13:20:11 +01:00
|
|
|
int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2018-06-26 15:26:35 +02:00
|
|
|
int64_t transfer_time;
|
2018-01-03 13:20:11 +01:00
|
|
|
|
|
|
|
s->total_time = end_time - s->start_time;
|
|
|
|
if (!s->downtime) {
|
|
|
|
/*
|
|
|
|
* It's still not set, so we are precopy migration. For
|
|
|
|
* postcopy, downtime is calculated during postcopy_start().
|
|
|
|
*/
|
|
|
|
s->downtime = end_time - s->downtime_start;
|
|
|
|
}
|
|
|
|
|
2018-06-26 15:26:35 +02:00
|
|
|
transfer_time = s->total_time - s->setup_time;
|
|
|
|
if (transfer_time) {
|
|
|
|
s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
|
2018-01-03 13:20:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-02 12:18:41 +02:00
|
|
|
static void update_iteration_initial_status(MigrationState *s)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Update these three fields at the same time to avoid mismatch info lead
|
|
|
|
* wrong speed calculation.
|
|
|
|
*/
|
|
|
|
s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2023-05-15 21:56:59 +02:00
|
|
|
s->iteration_initial_bytes = migration_transferred_bytes(s->to_dst_file);
|
2019-08-02 12:18:41 +02:00
|
|
|
s->iteration_initial_pages = ram_get_total_transferred_pages();
|
|
|
|
}
|
|
|
|
|
2018-01-03 13:20:13 +01:00
|
|
|
static void migration_update_counters(MigrationState *s,
|
|
|
|
int64_t current_time)
|
|
|
|
{
|
2019-01-11 07:37:30 +01:00
|
|
|
uint64_t transferred, transferred_pages, time_spent;
|
2018-06-26 15:38:00 +02:00
|
|
|
uint64_t current_bytes; /* bytes transferred since the beginning */
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 00:19:22 +02:00
|
|
|
uint64_t switchover_bw;
|
|
|
|
/* Expected bandwidth when switching over to destination QEMU */
|
|
|
|
double expected_bw_per_ms;
|
2018-01-03 13:20:13 +01:00
|
|
|
double bandwidth;
|
|
|
|
|
|
|
|
if (current_time < s->iteration_start_time + BUFFER_DELAY) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 00:19:22 +02:00
|
|
|
switchover_bw = migrate_avail_switchover_bandwidth();
|
2023-05-15 21:56:59 +02:00
|
|
|
current_bytes = migration_transferred_bytes(s->to_dst_file);
|
2018-06-26 15:38:00 +02:00
|
|
|
transferred = current_bytes - s->iteration_initial_bytes;
|
2018-01-03 13:20:13 +01:00
|
|
|
time_spent = current_time - s->iteration_start_time;
|
|
|
|
bandwidth = (double)transferred / time_spent;
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 00:19:22 +02:00
|
|
|
|
|
|
|
if (switchover_bw) {
|
|
|
|
/*
|
|
|
|
* If the user specified a switchover bandwidth, let's trust the
|
|
|
|
* user so that can be more accurate than what we estimated.
|
|
|
|
*/
|
|
|
|
expected_bw_per_ms = switchover_bw / 1000;
|
|
|
|
} else {
|
|
|
|
/* If the user doesn't specify bandwidth, we use the estimated */
|
|
|
|
expected_bw_per_ms = bandwidth;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->threshold_size = expected_bw_per_ms * migrate_downtime_limit();
|
2018-01-03 13:20:13 +01:00
|
|
|
|
|
|
|
s->mbps = (((double) transferred * 8.0) /
|
|
|
|
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
|
|
|
|
|
2019-01-11 07:37:30 +01:00
|
|
|
transferred_pages = ram_get_total_transferred_pages() -
|
|
|
|
s->iteration_initial_pages;
|
|
|
|
s->pages_per_second = (double) transferred_pages /
|
|
|
|
(((double) time_spent / 1000.0));
|
|
|
|
|
2018-01-03 13:20:13 +01:00
|
|
|
/*
|
|
|
|
* if we haven't sent anything, we don't want to
|
|
|
|
* recalculate. 10000 is a small enough number for our purposes
|
|
|
|
*/
|
2023-04-26 19:37:19 +02:00
|
|
|
if (stat64_get(&mig_stats.dirty_pages_rate) &&
|
2023-04-11 18:19:05 +02:00
|
|
|
transferred > 10000) {
|
2023-04-11 18:31:20 +02:00
|
|
|
s->expected_downtime =
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 00:19:22 +02:00
|
|
|
stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms;
|
2018-01-03 13:20:13 +01:00
|
|
|
}
|
|
|
|
|
2023-05-15 21:57:01 +02:00
|
|
|
migration_rate_reset(s->to_dst_file);
|
2018-01-03 13:20:13 +01:00
|
|
|
|
2019-08-02 12:18:41 +02:00
|
|
|
update_iteration_initial_status(s);
|
2018-01-03 13:20:13 +01:00
|
|
|
|
|
|
|
trace_migrate_transferred(transferred, time_spent,
|
migration: Allow user to specify available switchover bandwidth
Migration bandwidth is a very important value to live migration. It's
because it's one of the major factors that we'll make decision on when to
switchover to destination in a precopy process.
This value is currently estimated by QEMU during the whole live migration
process by monitoring how fast we were sending the data. This can be the
most accurate bandwidth if in the ideal world, where we're always feeding
unlimited data to the migration channel, and then it'll be limited to the
bandwidth that is available.
However in reality it may be very different, e.g., over a 10Gbps network we
can see query-migrate showing migration bandwidth of only a few tens of
MB/s just because there are plenty of other things the migration thread
might be doing. For example, the migration thread can be busy scanning
zero pages, or it can be fetching dirty bitmap from other external dirty
sources (like vhost or KVM). It means we may not be pushing data as much
as possible to migration channel, so the bandwidth estimated from "how many
data we sent in the channel" can be dramatically inaccurate sometimes.
With that, the decision to switchover will be affected, by assuming that we
may not be able to switchover at all with such a low bandwidth, but in
reality we can.
The migration may not even converge at all with the downtime specified,
with that wrong estimation of bandwidth, keeping iterations forever with a
low estimation of bandwidth.
The issue is QEMU itself may not be able to avoid those uncertainties on
measuing the real "available migration bandwidth". At least not something
I can think of so far.
One way to fix this is when the user is fully aware of the available
bandwidth, then we can allow the user to help providing an accurate value.
For example, if the user has a dedicated channel of 10Gbps for migration
for this specific VM, the user can specify this bandwidth so QEMU can
always do the calculation based on this fact, trusting the user as long as
specified. It may not be the exact bandwidth when switching over (in which
case qemu will push migration data as fast as possible), but much better
than QEMU trying to wildly guess, especially when very wrong.
A new parameter "avail-switchover-bandwidth" is introduced just for this.
So when the user specified this parameter, instead of trusting the
estimated value from QEMU itself (based on the QEMUFile send speed), it
trusts the user more by using this value to decide when to switchover,
assuming that we'll have such bandwidth available then.
Note that specifying this value will not throttle the bandwidth for
switchover yet, so QEMU will always use the full bandwidth possible for
sending switchover data, assuming that should always be the most important
way to use the network at that time.
This can resolve issues like "unconvergence migration" which is caused by
hilarious low "migration bandwidth" detected for whatever reason.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231010221922.40638-1-peterx@redhat.com>
2023-10-11 00:19:22 +02:00
|
|
|
/* Both in unit bytes/ms */
|
|
|
|
bandwidth, switchover_bw / 1000,
|
|
|
|
s->threshold_size);
|
2018-01-03 13:20:13 +01:00
|
|
|
}
|
|
|
|
|
2023-06-21 13:11:55 +02:00
|
|
|
static bool migration_can_switchover(MigrationState *s)
|
|
|
|
{
|
|
|
|
if (!migrate_switchover_ack()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No reason to wait for switchover ACK if VM is stopped */
|
|
|
|
if (!runstate_is_running()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return s->switchover_acked;
|
|
|
|
}
|
|
|
|
|
2018-01-03 13:20:14 +01:00
|
|
|
/* Migration thread iteration status */
|
|
|
|
typedef enum {
|
|
|
|
MIG_ITERATE_RESUME, /* Resume current iteration */
|
|
|
|
MIG_ITERATE_SKIP, /* Skip current iteration */
|
|
|
|
MIG_ITERATE_BREAK, /* Break the loop */
|
|
|
|
} MigIterateState;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if continue to the next iteration directly, false
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
static MigIterateState migration_iteration_run(MigrationState *s)
|
|
|
|
{
|
2023-02-08 14:48:02 +01:00
|
|
|
uint64_t must_precopy, can_postcopy;
|
2023-06-21 15:09:39 +02:00
|
|
|
Error *local_err = NULL;
|
2018-01-03 13:20:14 +01:00
|
|
|
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
|
2023-06-21 13:11:55 +02:00
|
|
|
bool can_switchover = migration_can_switchover(s);
|
2018-01-03 13:20:14 +01:00
|
|
|
|
2023-02-08 14:48:02 +01:00
|
|
|
qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
|
|
|
|
uint64_t pending_size = must_precopy + can_postcopy;
|
2018-01-03 13:20:14 +01:00
|
|
|
|
2023-02-08 14:48:02 +01:00
|
|
|
trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
|
2022-10-03 02:00:03 +02:00
|
|
|
|
2023-02-08 14:48:02 +01:00
|
|
|
if (must_precopy <= s->threshold_size) {
|
|
|
|
qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
|
|
|
|
pending_size = must_precopy + can_postcopy;
|
|
|
|
trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
|
2022-10-03 02:00:03 +02:00
|
|
|
}
|
2018-01-03 13:20:14 +01:00
|
|
|
|
2023-06-21 13:11:55 +02:00
|
|
|
if ((!pending_size || pending_size < s->threshold_size) && can_switchover) {
|
2018-01-03 13:20:14 +01:00
|
|
|
trace_migration_thread_low_pending(pending_size);
|
|
|
|
migration_completion(s);
|
|
|
|
return MIG_ITERATE_BREAK;
|
|
|
|
}
|
|
|
|
|
2022-10-03 02:54:57 +02:00
|
|
|
/* Still a significant amount to transfer */
|
2023-06-21 13:11:55 +02:00
|
|
|
if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover &&
|
2022-10-03 02:54:57 +02:00
|
|
|
qatomic_read(&s->start_postcopy)) {
|
2023-06-21 15:09:39 +02:00
|
|
|
if (postcopy_start(s, &local_err)) {
|
|
|
|
migrate_set_error(s, local_err);
|
|
|
|
error_report_err(local_err);
|
2022-10-03 02:54:57 +02:00
|
|
|
}
|
|
|
|
return MIG_ITERATE_SKIP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Just another iteration step */
|
|
|
|
qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
|
2018-01-03 13:20:14 +01:00
|
|
|
return MIG_ITERATE_RESUME;
|
|
|
|
}
|
|
|
|
|
2018-01-03 13:20:15 +01:00
|
|
|
static void migration_iteration_finish(MigrationState *s)
|
|
|
|
{
|
|
|
|
/* If we enabled cpu throttling for auto-converge, turn it off. */
|
|
|
|
cpu_throttle_stop();
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
switch (s->state) {
|
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
|
|
|
migration_calculate_complete(s);
|
|
|
|
runstate_set(RUN_STATE_POSTMIGRATE);
|
|
|
|
break;
|
2021-11-17 09:30:02 +01:00
|
|
|
case MIGRATION_STATUS_COLO:
|
2023-04-28 21:49:27 +02:00
|
|
|
assert(migrate_colo());
|
2018-01-03 13:20:15 +01:00
|
|
|
migrate_start_colo_process(s);
|
2023-05-17 14:37:51 +02:00
|
|
|
s->vm_old_state = RUN_STATE_RUNNING;
|
2018-01-03 13:20:15 +01:00
|
|
|
/* Fallthrough */
|
|
|
|
case MIGRATION_STATUS_FAILED:
|
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
2018-07-19 11:22:57 +02:00
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
2023-05-17 14:37:51 +02:00
|
|
|
if (s->vm_old_state == RUN_STATE_RUNNING) {
|
2021-11-01 08:57:00 +01:00
|
|
|
if (!runstate_check(RUN_STATE_SHUTDOWN)) {
|
|
|
|
vm_start();
|
|
|
|
}
|
2018-01-03 13:20:15 +01:00
|
|
|
} else {
|
|
|
|
if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
|
2023-05-17 14:37:52 +02:00
|
|
|
runstate_set(s->vm_old_state);
|
2018-01-03 13:20:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* Should not reach here, but if so, forgive the VM. */
|
|
|
|
error_report("%s: Unknown ending state %d", __func__, s->state);
|
|
|
|
break;
|
|
|
|
}
|
2019-04-08 13:33:43 +02:00
|
|
|
migrate_fd_cleanup_schedule(s);
|
2018-01-03 13:20:15 +01:00
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:14:06 +01:00
|
|
|
static void bg_migration_iteration_finish(MigrationState *s)
|
|
|
|
{
|
2023-05-26 13:59:08 +02:00
|
|
|
/*
|
|
|
|
* Stop tracking RAM writes - un-protect memory, un-register UFFD
|
|
|
|
* memory ranges, flush kernel wait queues and wake up threads
|
|
|
|
* waiting for write fault to be resolved.
|
|
|
|
*/
|
|
|
|
ram_write_tracking_stop();
|
|
|
|
|
2021-01-29 11:14:06 +01:00
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
switch (s->state) {
|
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
|
|
|
migration_calculate_complete(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_FAILED:
|
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* Should not reach here, but if so, forgive the VM. */
|
|
|
|
error_report("%s: Unknown ending state %d", __func__, s->state);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
migrate_fd_cleanup_schedule(s);
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if continue to the next iteration directly, false
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
static MigIterateState bg_migration_iteration_run(MigrationState *s)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
|
|
|
res = qemu_savevm_state_iterate(s->to_dst_file, false);
|
|
|
|
if (res > 0) {
|
|
|
|
bg_migration_completion(s);
|
|
|
|
return MIG_ITERATE_BREAK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MIG_ITERATE_RESUME;
|
|
|
|
}
|
|
|
|
|
2018-06-13 12:26:41 +02:00
|
|
|
void migration_make_urgent_request(void)
|
|
|
|
{
|
|
|
|
qemu_sem_post(&migrate_get_current()->rate_limit_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
void migration_consume_urgent_request(void)
|
|
|
|
{
|
|
|
|
qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
|
|
|
|
}
|
|
|
|
|
2019-12-05 11:29:18 +01:00
|
|
|
/* Returns true if the rate limiting was broken by an urgent request */
|
|
|
|
bool migration_rate_limit(void)
|
|
|
|
{
|
|
|
|
int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
|
|
|
bool urgent = false;
|
|
|
|
migration_update_counters(s, now);
|
2023-05-15 21:56:58 +02:00
|
|
|
if (migration_rate_exceeded(s->to_dst_file)) {
|
2020-05-20 22:42:32 +02:00
|
|
|
|
|
|
|
if (qemu_file_get_error(s->to_dst_file)) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-12-05 11:29:18 +01:00
|
|
|
/*
|
|
|
|
* Wait for a delay to do rate limiting OR
|
|
|
|
* something urgent to post the semaphore.
|
|
|
|
*/
|
|
|
|
int ms = s->iteration_start_time + BUFFER_DELAY - now;
|
|
|
|
trace_migration_rate_limit_pre(ms);
|
|
|
|
if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
|
|
|
|
/*
|
|
|
|
* We were woken by one or more urgent things but
|
|
|
|
* the timedwait will have consumed one of them.
|
|
|
|
* The service routine for the urgent wake will dec
|
|
|
|
* the semaphore itself for each item it consumes,
|
|
|
|
* so add this one we just eat back.
|
|
|
|
*/
|
|
|
|
qemu_sem_post(&s->rate_limit_sem);
|
|
|
|
urgent = true;
|
|
|
|
}
|
|
|
|
trace_migration_rate_limit_post(urgent);
|
|
|
|
}
|
|
|
|
return urgent;
|
|
|
|
}
|
|
|
|
|
2021-06-29 17:50:06 +02:00
|
|
|
/*
|
|
|
|
* if failover devices are present, wait they are completely
|
|
|
|
* unplugged
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void qemu_savevm_wait_unplug(MigrationState *s, int old_state,
|
|
|
|
int new_state)
|
|
|
|
{
|
|
|
|
if (qemu_savevm_state_guest_unplug_pending()) {
|
|
|
|
migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG);
|
|
|
|
|
|
|
|
while (s->state == MIGRATION_STATUS_WAIT_UNPLUG &&
|
|
|
|
qemu_savevm_state_guest_unplug_pending()) {
|
|
|
|
qemu_sem_timedwait(&s->wait_unplug_sem, 250);
|
|
|
|
}
|
2021-06-29 17:50:07 +02:00
|
|
|
if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) {
|
|
|
|
int timeout = 120; /* 30 seconds */
|
|
|
|
/*
|
|
|
|
* migration has been canceled
|
|
|
|
* but as we have started an unplug we must wait the end
|
|
|
|
* to be able to plug back the card
|
|
|
|
*/
|
|
|
|
while (timeout-- && qemu_savevm_state_guest_unplug_pending()) {
|
|
|
|
qemu_sem_timedwait(&s->wait_unplug_sem, 250);
|
|
|
|
}
|
2021-12-20 15:53:14 +01:00
|
|
|
if (qemu_savevm_state_guest_unplug_pending() &&
|
|
|
|
!qtest_enabled()) {
|
2021-07-01 15:14:58 +02:00
|
|
|
warn_report("migration: partially unplugged device on "
|
|
|
|
"failure");
|
|
|
|
}
|
2021-06-29 17:50:07 +02:00
|
|
|
}
|
2021-06-29 17:50:06 +02:00
|
|
|
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state);
|
|
|
|
} else {
|
|
|
|
migrate_set_state(&s->state, old_state, new_state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:49 +01:00
|
|
|
/*
|
|
|
|
* Master migration thread on the source VM.
|
|
|
|
* It drives the migration and pumps the data down the outgoing channel.
|
|
|
|
*/
|
2013-02-22 17:36:30 +01:00
|
|
|
static void *migration_thread(void *opaque)
|
2012-10-03 14:18:33 +02:00
|
|
|
{
|
2012-12-19 09:55:50 +01:00
|
|
|
MigrationState *s = opaque;
|
2023-02-03 08:35:19 +01:00
|
|
|
MigrationThread *thread = NULL;
|
2013-08-21 17:03:08 +02:00
|
|
|
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
2018-05-02 12:47:19 +02:00
|
|
|
MigThrError thr_error;
|
2018-06-13 12:26:41 +02:00
|
|
|
bool urgent = false;
|
2012-10-03 20:16:24 +02:00
|
|
|
|
2023-06-07 18:13:04 +02:00
|
|
|
thread = migration_threads_add("live_migration", qemu_get_thread_id());
|
2023-02-03 08:35:19 +01:00
|
|
|
|
2015-07-09 08:55:38 +02:00
|
|
|
rcu_register_thread();
|
|
|
|
|
2019-02-27 17:49:00 +01:00
|
|
|
object_ref(OBJECT(s));
|
2019-08-02 12:18:41 +02:00
|
|
|
update_iteration_initial_status(s);
|
2018-01-03 13:20:13 +01:00
|
|
|
|
migration: hold the BQL during setup
This is intended to be a semantic revert of commit 9b09503752
("migration: run setup callbacks out of big lock"). There have been so
many changes since that commit (e.g. a new setup callback
dirty_bitmap_save_setup() that also needs to be adapted now), it's
easier to do the revert manually.
For snapshots, the bdrv_writev_vmstate() function is used during setup
(in QIOChannelBlock backing the QEMUFile), but not holding the BQL
while calling it could lead to an assertion failure. To understand
how, first note the following:
1. Generated coroutine wrappers for block layer functions spawn the
coroutine and use AIO_WAIT_WHILE()/aio_poll() to wait for it.
2. If the host OS switches threads at an inconvenient time, it can
happen that a bottom half scheduled for the main thread's AioContext
is executed as part of a vCPU thread's aio_poll().
An example leading to the assertion failure is as follows:
main thread:
1. A snapshot-save QMP command gets issued.
2. snapshot_save_job_bh() is scheduled.
vCPU thread:
3. aio_poll() for the main thread's AioContext is called (e.g. when
the guest writes to a pflash device, as part of blk_pwrite which is a
generated coroutine wrapper).
4. snapshot_save_job_bh() is executed as part of aio_poll().
3. qemu_savevm_state() is called.
4. qemu_mutex_unlock_iothread() is called. Now
qemu_get_current_aio_context() returns 0x0.
5. bdrv_writev_vmstate() is executed during the usual savevm setup
via qemu_fflush(). But this function is a generated coroutine wrapper,
so it uses AIO_WAIT_WHILE. There, the assertion
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
will fail.
To fix it, ensure that the BQL is held during setup. While it would
only be needed for snapshots, adapting migration too avoids additional
logic for conditional locking/unlocking in the setup callbacks.
Writing the header could (in theory) also trigger qemu_fflush() and
thus bdrv_writev_vmstate(), so the locked section also covers the
qemu_savevm_state_header() call, even for migration for consistency.
The section around multifd_send_sync_main() needs to be unlocked to
avoid a deadlock. In particular, the multifd_save_setup() function calls
socket_send_channel_create() using multifd_new_send_channel_async() as a
callback and then waits for the callback to signal via the
channels_ready semaphore. The connection happens via
qio_task_run_in_thread(), but the callback is only executed via
qio_task_thread_result() which is scheduled for the main event loop.
Without unlocking the section, the main thread would never get to
process the task result and the callback meaning there would be no
signal via the channels_ready semaphore.
The comment in ram_init_bitmaps() was introduced by 4987783400
("migration: fix incorrect memory_global_dirty_log_start outside BQL")
and is removed, because it referred to the qemu_mutex_lock_iothread()
call.
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231013105839.415989-1-f.ebner@proxmox.com>
2023-10-13 12:58:39 +02:00
|
|
|
qemu_mutex_lock_iothread();
|
2016-01-15 04:37:42 +01:00
|
|
|
qemu_savevm_state_header(s->to_dst_file);
|
migration: hold the BQL during setup
This is intended to be a semantic revert of commit 9b09503752
("migration: run setup callbacks out of big lock"). There have been so
many changes since that commit (e.g. a new setup callback
dirty_bitmap_save_setup() that also needs to be adapted now), it's
easier to do the revert manually.
For snapshots, the bdrv_writev_vmstate() function is used during setup
(in QIOChannelBlock backing the QEMUFile), but not holding the BQL
while calling it could lead to an assertion failure. To understand
how, first note the following:
1. Generated coroutine wrappers for block layer functions spawn the
coroutine and use AIO_WAIT_WHILE()/aio_poll() to wait for it.
2. If the host OS switches threads at an inconvenient time, it can
happen that a bottom half scheduled for the main thread's AioContext
is executed as part of a vCPU thread's aio_poll().
An example leading to the assertion failure is as follows:
main thread:
1. A snapshot-save QMP command gets issued.
2. snapshot_save_job_bh() is scheduled.
vCPU thread:
3. aio_poll() for the main thread's AioContext is called (e.g. when
the guest writes to a pflash device, as part of blk_pwrite which is a
generated coroutine wrapper).
4. snapshot_save_job_bh() is executed as part of aio_poll().
3. qemu_savevm_state() is called.
4. qemu_mutex_unlock_iothread() is called. Now
qemu_get_current_aio_context() returns 0x0.
5. bdrv_writev_vmstate() is executed during the usual savevm setup
via qemu_fflush(). But this function is a generated coroutine wrapper,
so it uses AIO_WAIT_WHILE. There, the assertion
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
will fail.
To fix it, ensure that the BQL is held during setup. While it would
only be needed for snapshots, adapting migration too avoids additional
logic for conditional locking/unlocking in the setup callbacks.
Writing the header could (in theory) also trigger qemu_fflush() and
thus bdrv_writev_vmstate(), so the locked section also covers the
qemu_savevm_state_header() call, even for migration for consistency.
The section around multifd_send_sync_main() needs to be unlocked to
avoid a deadlock. In particular, the multifd_save_setup() function calls
socket_send_channel_create() using multifd_new_send_channel_async() as a
callback and then waits for the callback to signal via the
channels_ready semaphore. The connection happens via
qio_task_run_in_thread(), but the callback is only executed via
qio_task_thread_result() which is scheduled for the main event loop.
Without unlocking the section, the main thread would never get to
process the task result and the callback meaning there would be no
signal via the channels_ready semaphore.
The comment in ram_init_bitmaps() was introduced by 4987783400
("migration: fix incorrect memory_global_dirty_log_start outside BQL")
and is removed, because it referred to the qemu_mutex_lock_iothread()
call.
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231013105839.415989-1-f.ebner@proxmox.com>
2023-10-13 12:58:39 +02:00
|
|
|
qemu_mutex_unlock_iothread();
|
2015-11-05 19:11:05 +01:00
|
|
|
|
2017-06-14 09:55:58 +02:00
|
|
|
/*
|
|
|
|
* If we opened the return path, we need to make sure dst has it
|
|
|
|
* opened as well.
|
|
|
|
*/
|
2021-07-22 19:58:38 +02:00
|
|
|
if (s->rp_state.rp_thread_created) {
|
2015-11-05 19:11:05 +01:00
|
|
|
/* Now tell the dest that it should open its end so it can reply */
|
2016-01-15 04:37:42 +01:00
|
|
|
qemu_savevm_send_open_return_path(s->to_dst_file);
|
2015-11-05 19:11:05 +01:00
|
|
|
|
|
|
|
/* And do a ping that will make stuff easier to debug */
|
2016-01-15 04:37:42 +01:00
|
|
|
qemu_savevm_send_ping(s->to_dst_file, 1);
|
2017-05-31 12:35:34 +02:00
|
|
|
}
|
2015-11-05 19:11:05 +01:00
|
|
|
|
2017-07-10 18:30:16 +02:00
|
|
|
if (migrate_postcopy()) {
|
2015-11-05 19:11:05 +01:00
|
|
|
/*
|
|
|
|
* Tell the destination that we *might* want to do postcopy later;
|
|
|
|
* if the other end can't do postcopy it should fail now, nice and
|
|
|
|
* early.
|
|
|
|
*/
|
2016-01-15 04:37:42 +01:00
|
|
|
qemu_savevm_send_postcopy_advise(s->to_dst_file);
|
2015-11-05 19:11:05 +01:00
|
|
|
}
|
|
|
|
|
2023-03-01 22:00:16 +01:00
|
|
|
if (migrate_colo()) {
|
2018-09-03 06:38:47 +02:00
|
|
|
/* Notify migration destination that we enable COLO */
|
|
|
|
qemu_savevm_send_colo_enable(s->to_dst_file);
|
|
|
|
}
|
|
|
|
|
migration: hold the BQL during setup
This is intended to be a semantic revert of commit 9b09503752
("migration: run setup callbacks out of big lock"). There have been so
many changes since that commit (e.g. a new setup callback
dirty_bitmap_save_setup() that also needs to be adapted now), it's
easier to do the revert manually.
For snapshots, the bdrv_writev_vmstate() function is used during setup
(in QIOChannelBlock backing the QEMUFile), but not holding the BQL
while calling it could lead to an assertion failure. To understand
how, first note the following:
1. Generated coroutine wrappers for block layer functions spawn the
coroutine and use AIO_WAIT_WHILE()/aio_poll() to wait for it.
2. If the host OS switches threads at an inconvenient time, it can
happen that a bottom half scheduled for the main thread's AioContext
is executed as part of a vCPU thread's aio_poll().
An example leading to the assertion failure is as follows:
main thread:
1. A snapshot-save QMP command gets issued.
2. snapshot_save_job_bh() is scheduled.
vCPU thread:
3. aio_poll() for the main thread's AioContext is called (e.g. when
the guest writes to a pflash device, as part of blk_pwrite which is a
generated coroutine wrapper).
4. snapshot_save_job_bh() is executed as part of aio_poll().
3. qemu_savevm_state() is called.
4. qemu_mutex_unlock_iothread() is called. Now
qemu_get_current_aio_context() returns 0x0.
5. bdrv_writev_vmstate() is executed during the usual savevm setup
via qemu_fflush(). But this function is a generated coroutine wrapper,
so it uses AIO_WAIT_WHILE. There, the assertion
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
will fail.
To fix it, ensure that the BQL is held during setup. While it would
only be needed for snapshots, adapting migration too avoids additional
logic for conditional locking/unlocking in the setup callbacks.
Writing the header could (in theory) also trigger qemu_fflush() and
thus bdrv_writev_vmstate(), so the locked section also covers the
qemu_savevm_state_header() call, even for migration for consistency.
The section around multifd_send_sync_main() needs to be unlocked to
avoid a deadlock. In particular, the multifd_save_setup() function calls
socket_send_channel_create() using multifd_new_send_channel_async() as a
callback and then waits for the callback to signal via the
channels_ready semaphore. The connection happens via
qio_task_run_in_thread(), but the callback is only executed via
qio_task_thread_result() which is scheduled for the main event loop.
Without unlocking the section, the main thread would never get to
process the task result and the callback meaning there would be no
signal via the channels_ready semaphore.
The comment in ram_init_bitmaps() was introduced by 4987783400
("migration: fix incorrect memory_global_dirty_log_start outside BQL")
and is removed, because it referred to the qemu_mutex_lock_iothread()
call.
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231013105839.415989-1-f.ebner@proxmox.com>
2023-10-13 12:58:39 +02:00
|
|
|
qemu_mutex_lock_iothread();
|
2017-06-28 11:52:24 +02:00
|
|
|
qemu_savevm_state_setup(s->to_dst_file);
|
migration: hold the BQL during setup
This is intended to be a semantic revert of commit 9b09503752
("migration: run setup callbacks out of big lock"). There have been so
many changes since that commit (e.g. a new setup callback
dirty_bitmap_save_setup() that also needs to be adapted now), it's
easier to do the revert manually.
For snapshots, the bdrv_writev_vmstate() function is used during setup
(in QIOChannelBlock backing the QEMUFile), but not holding the BQL
while calling it could lead to an assertion failure. To understand
how, first note the following:
1. Generated coroutine wrappers for block layer functions spawn the
coroutine and use AIO_WAIT_WHILE()/aio_poll() to wait for it.
2. If the host OS switches threads at an inconvenient time, it can
happen that a bottom half scheduled for the main thread's AioContext
is executed as part of a vCPU thread's aio_poll().
An example leading to the assertion failure is as follows:
main thread:
1. A snapshot-save QMP command gets issued.
2. snapshot_save_job_bh() is scheduled.
vCPU thread:
3. aio_poll() for the main thread's AioContext is called (e.g. when
the guest writes to a pflash device, as part of blk_pwrite which is a
generated coroutine wrapper).
4. snapshot_save_job_bh() is executed as part of aio_poll().
3. qemu_savevm_state() is called.
4. qemu_mutex_unlock_iothread() is called. Now
qemu_get_current_aio_context() returns 0x0.
5. bdrv_writev_vmstate() is executed during the usual savevm setup
via qemu_fflush(). But this function is a generated coroutine wrapper,
so it uses AIO_WAIT_WHILE. There, the assertion
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
will fail.
To fix it, ensure that the BQL is held during setup. While it would
only be needed for snapshots, adapting migration too avoids additional
logic for conditional locking/unlocking in the setup callbacks.
Writing the header could (in theory) also trigger qemu_fflush() and
thus bdrv_writev_vmstate(), so the locked section also covers the
qemu_savevm_state_header() call, even for migration for consistency.
The section around multifd_send_sync_main() needs to be unlocked to
avoid a deadlock. In particular, the multifd_save_setup() function calls
socket_send_channel_create() using multifd_new_send_channel_async() as a
callback and then waits for the callback to signal via the
channels_ready semaphore. The connection happens via
qio_task_run_in_thread(), but the callback is only executed via
qio_task_thread_result() which is scheduled for the main event loop.
Without unlocking the section, the main thread would never get to
process the task result and the callback meaning there would be no
signal via the channels_ready semaphore.
The comment in ram_init_bitmaps() was introduced by 4987783400
("migration: fix incorrect memory_global_dirty_log_start outside BQL")
and is removed, because it referred to the qemu_mutex_lock_iothread()
call.
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231013105839.415989-1-f.ebner@proxmox.com>
2023-10-13 12:58:39 +02:00
|
|
|
qemu_mutex_unlock_iothread();
|
2012-10-03 14:18:33 +02:00
|
|
|
|
2021-06-29 17:50:06 +02:00
|
|
|
qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_ACTIVE);
|
2019-10-29 12:49:02 +01:00
|
|
|
|
2013-08-21 17:03:08 +02:00
|
|
|
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 16:01:57 +02:00
|
|
|
|
2015-11-05 19:10:58 +01:00
|
|
|
trace_migration_thread_setup_complete();
|
|
|
|
|
2019-07-17 02:53:41 +02:00
|
|
|
while (migration_is_active(s)) {
|
2023-05-15 21:56:58 +02:00
|
|
|
if (urgent || !migration_rate_exceeded(s->to_dst_file)) {
|
2018-01-03 13:20:14 +01:00
|
|
|
MigIterateState iter_state = migration_iteration_run(s);
|
|
|
|
if (iter_state == MIG_ITERATE_SKIP) {
|
|
|
|
continue;
|
|
|
|
} else if (iter_state == MIG_ITERATE_BREAK) {
|
2015-08-13 12:51:31 +02:00
|
|
|
break;
|
2012-10-03 20:33:34 +02:00
|
|
|
}
|
|
|
|
}
|
2013-02-22 17:36:20 +01:00
|
|
|
|
2018-05-02 12:47:19 +02:00
|
|
|
/*
|
|
|
|
* Try to detect any kind of failures, and see whether we
|
|
|
|
* should stop the migration now.
|
|
|
|
*/
|
|
|
|
thr_error = migration_detect_error(s);
|
|
|
|
if (thr_error == MIG_THR_ERR_FATAL) {
|
|
|
|
/* Stop migration */
|
2013-02-22 17:36:33 +01:00
|
|
|
break;
|
2018-05-02 12:47:19 +02:00
|
|
|
} else if (thr_error == MIG_THR_ERR_RECOVERED) {
|
|
|
|
/*
|
|
|
|
* Just recovered from a e.g. network failure, reset all
|
|
|
|
* the local variables. This is important to avoid
|
|
|
|
* breaking transferred_bytes and bandwidth calculation
|
|
|
|
*/
|
2019-08-02 12:18:41 +02:00
|
|
|
update_iteration_initial_status(s);
|
2013-02-22 17:36:33 +01:00
|
|
|
}
|
2018-01-03 13:20:13 +01:00
|
|
|
|
2019-12-05 11:29:18 +01:00
|
|
|
urgent = migration_rate_limit();
|
2013-02-22 17:36:18 +01:00
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:05 +01:00
|
|
|
trace_migration_thread_after_loop();
|
2018-01-03 13:20:15 +01:00
|
|
|
migration_iteration_finish(s);
|
2019-02-27 17:49:00 +01:00
|
|
|
object_unref(OBJECT(s));
|
2015-07-09 08:55:38 +02:00
|
|
|
rcu_unregister_thread();
|
2023-06-07 18:13:04 +02:00
|
|
|
migration_threads_remove(thread);
|
2012-10-03 14:18:33 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-01-29 11:14:06 +01:00
|
|
|
static void bg_migration_vm_start_bh(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationState *s = opaque;
|
|
|
|
|
|
|
|
qemu_bh_delete(s->vm_start_bh);
|
|
|
|
s->vm_start_bh = NULL;
|
|
|
|
|
|
|
|
vm_start();
|
|
|
|
s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Background snapshot thread, based on live migration code.
|
|
|
|
* This is an alternative implementation of live migration mechanism
|
|
|
|
* introduced specifically to support background snapshots.
|
|
|
|
*
|
|
|
|
* It takes advantage of userfault_fd write protection mechanism introduced
|
|
|
|
* in v5.7 kernel. Compared to existing dirty page logging migration much
|
|
|
|
* lesser stream traffic is produced resulting in smaller snapshot images,
|
|
|
|
* simply cause of no page duplicates can get into the stream.
|
|
|
|
*
|
|
|
|
* Another key point is that generated vmstate stream reflects machine state
|
|
|
|
* 'frozen' at the beginning of snapshot creation compared to dirty page logging
|
|
|
|
* mechanism, which effectively results in that saved snapshot is the state of VM
|
|
|
|
* at the end of the process.
|
|
|
|
*/
|
|
|
|
static void *bg_migration_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationState *s = opaque;
|
|
|
|
int64_t setup_start;
|
|
|
|
MigThrError thr_error;
|
|
|
|
QEMUFile *fb;
|
|
|
|
bool early_fail = true;
|
|
|
|
|
|
|
|
rcu_register_thread();
|
|
|
|
object_ref(OBJECT(s));
|
|
|
|
|
2023-05-15 21:56:58 +02:00
|
|
|
migration_rate_set(RATE_LIMIT_DISABLED);
|
2021-01-29 11:14:06 +01:00
|
|
|
|
|
|
|
setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
|
|
|
/*
|
|
|
|
* We want to save vmstate for the moment when migration has been
|
|
|
|
* initiated but also we want to save RAM content while VM is running.
|
|
|
|
* The RAM content should appear first in the vmstate. So, we first
|
|
|
|
* stash the non-RAM part of the vmstate to the temporary buffer,
|
|
|
|
* then write RAM part of the vmstate to the migration stream
|
|
|
|
* with vCPUs running and, finally, write stashed non-RAM part of
|
|
|
|
* the vmstate from the buffer to the migration stream.
|
|
|
|
*/
|
2021-04-01 11:22:23 +02:00
|
|
|
s->bioc = qio_channel_buffer_new(512 * 1024);
|
2021-01-29 11:14:06 +01:00
|
|
|
qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer");
|
2022-06-20 13:02:05 +02:00
|
|
|
fb = qemu_file_new_output(QIO_CHANNEL(s->bioc));
|
2021-01-29 11:14:06 +01:00
|
|
|
object_unref(OBJECT(s->bioc));
|
|
|
|
|
|
|
|
update_iteration_initial_status(s);
|
|
|
|
|
2021-04-01 11:22:25 +02:00
|
|
|
/*
|
|
|
|
* Prepare for tracking memory writes with UFFD-WP - populate
|
|
|
|
* RAM pages before protecting.
|
|
|
|
*/
|
|
|
|
#ifdef __linux__
|
|
|
|
ram_write_tracking_prepare();
|
|
|
|
#endif
|
|
|
|
|
migration: hold the BQL during setup
This is intended to be a semantic revert of commit 9b09503752
("migration: run setup callbacks out of big lock"). There have been so
many changes since that commit (e.g. a new setup callback
dirty_bitmap_save_setup() that also needs to be adapted now), it's
easier to do the revert manually.
For snapshots, the bdrv_writev_vmstate() function is used during setup
(in QIOChannelBlock backing the QEMUFile), but not holding the BQL
while calling it could lead to an assertion failure. To understand
how, first note the following:
1. Generated coroutine wrappers for block layer functions spawn the
coroutine and use AIO_WAIT_WHILE()/aio_poll() to wait for it.
2. If the host OS switches threads at an inconvenient time, it can
happen that a bottom half scheduled for the main thread's AioContext
is executed as part of a vCPU thread's aio_poll().
An example leading to the assertion failure is as follows:
main thread:
1. A snapshot-save QMP command gets issued.
2. snapshot_save_job_bh() is scheduled.
vCPU thread:
3. aio_poll() for the main thread's AioContext is called (e.g. when
the guest writes to a pflash device, as part of blk_pwrite which is a
generated coroutine wrapper).
4. snapshot_save_job_bh() is executed as part of aio_poll().
3. qemu_savevm_state() is called.
4. qemu_mutex_unlock_iothread() is called. Now
qemu_get_current_aio_context() returns 0x0.
5. bdrv_writev_vmstate() is executed during the usual savevm setup
via qemu_fflush(). But this function is a generated coroutine wrapper,
so it uses AIO_WAIT_WHILE. There, the assertion
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
will fail.
To fix it, ensure that the BQL is held during setup. While it would
only be needed for snapshots, adapting migration too avoids additional
logic for conditional locking/unlocking in the setup callbacks.
Writing the header could (in theory) also trigger qemu_fflush() and
thus bdrv_writev_vmstate(), so the locked section also covers the
qemu_savevm_state_header() call, even for migration for consistency.
The section around multifd_send_sync_main() needs to be unlocked to
avoid a deadlock. In particular, the multifd_save_setup() function calls
socket_send_channel_create() using multifd_new_send_channel_async() as a
callback and then waits for the callback to signal via the
channels_ready semaphore. The connection happens via
qio_task_run_in_thread(), but the callback is only executed via
qio_task_thread_result() which is scheduled for the main event loop.
Without unlocking the section, the main thread would never get to
process the task result and the callback meaning there would be no
signal via the channels_ready semaphore.
The comment in ram_init_bitmaps() was introduced by 4987783400
("migration: fix incorrect memory_global_dirty_log_start outside BQL")
and is removed, because it referred to the qemu_mutex_lock_iothread()
call.
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231013105839.415989-1-f.ebner@proxmox.com>
2023-10-13 12:58:39 +02:00
|
|
|
qemu_mutex_lock_iothread();
|
2021-01-29 11:14:06 +01:00
|
|
|
qemu_savevm_state_header(s->to_dst_file);
|
|
|
|
qemu_savevm_state_setup(s->to_dst_file);
|
migration: hold the BQL during setup
This is intended to be a semantic revert of commit 9b09503752
("migration: run setup callbacks out of big lock"). There have been so
many changes since that commit (e.g. a new setup callback
dirty_bitmap_save_setup() that also needs to be adapted now), it's
easier to do the revert manually.
For snapshots, the bdrv_writev_vmstate() function is used during setup
(in QIOChannelBlock backing the QEMUFile), but not holding the BQL
while calling it could lead to an assertion failure. To understand
how, first note the following:
1. Generated coroutine wrappers for block layer functions spawn the
coroutine and use AIO_WAIT_WHILE()/aio_poll() to wait for it.
2. If the host OS switches threads at an inconvenient time, it can
happen that a bottom half scheduled for the main thread's AioContext
is executed as part of a vCPU thread's aio_poll().
An example leading to the assertion failure is as follows:
main thread:
1. A snapshot-save QMP command gets issued.
2. snapshot_save_job_bh() is scheduled.
vCPU thread:
3. aio_poll() for the main thread's AioContext is called (e.g. when
the guest writes to a pflash device, as part of blk_pwrite which is a
generated coroutine wrapper).
4. snapshot_save_job_bh() is executed as part of aio_poll().
3. qemu_savevm_state() is called.
4. qemu_mutex_unlock_iothread() is called. Now
qemu_get_current_aio_context() returns 0x0.
5. bdrv_writev_vmstate() is executed during the usual savevm setup
via qemu_fflush(). But this function is a generated coroutine wrapper,
so it uses AIO_WAIT_WHILE. There, the assertion
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
will fail.
To fix it, ensure that the BQL is held during setup. While it would
only be needed for snapshots, adapting migration too avoids additional
logic for conditional locking/unlocking in the setup callbacks.
Writing the header could (in theory) also trigger qemu_fflush() and
thus bdrv_writev_vmstate(), so the locked section also covers the
qemu_savevm_state_header() call, even for migration for consistency.
The section around multifd_send_sync_main() needs to be unlocked to
avoid a deadlock. In particular, the multifd_save_setup() function calls
socket_send_channel_create() using multifd_new_send_channel_async() as a
callback and then waits for the callback to signal via the
channels_ready semaphore. The connection happens via
qio_task_run_in_thread(), but the callback is only executed via
qio_task_thread_result() which is scheduled for the main event loop.
Without unlocking the section, the main thread would never get to
process the task result and the callback meaning there would be no
signal via the channels_ready semaphore.
The comment in ram_init_bitmaps() was introduced by 4987783400
("migration: fix incorrect memory_global_dirty_log_start outside BQL")
and is removed, because it referred to the qemu_mutex_lock_iothread()
call.
Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231013105839.415989-1-f.ebner@proxmox.com>
2023-10-13 12:58:39 +02:00
|
|
|
qemu_mutex_unlock_iothread();
|
2021-01-29 11:14:06 +01:00
|
|
|
|
2021-06-29 17:50:06 +02:00
|
|
|
qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_ACTIVE);
|
2021-01-29 11:14:06 +01:00
|
|
|
|
|
|
|
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
|
|
|
|
|
|
|
trace_migration_thread_setup_complete();
|
|
|
|
s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If VM is currently in suspended state, then, to make a valid runstate
|
|
|
|
* transition in vm_stop_force_state() we need to wakeup it up.
|
|
|
|
*/
|
|
|
|
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
|
2023-05-17 14:37:51 +02:00
|
|
|
s->vm_old_state = runstate_get();
|
2021-01-29 11:14:06 +01:00
|
|
|
|
2023-05-17 14:37:49 +02:00
|
|
|
global_state_store();
|
2021-01-29 11:14:06 +01:00
|
|
|
/* Forcibly stop VM before saving state of vCPUs and devices */
|
|
|
|
if (vm_stop_force_state(RUN_STATE_PAUSED)) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Put vCPUs in sync with shadow context structures, then
|
|
|
|
* save their state to channel-buffer along with devices.
|
|
|
|
*/
|
|
|
|
cpu_synchronize_all_states();
|
|
|
|
if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2021-04-01 11:22:23 +02:00
|
|
|
/*
|
|
|
|
* Since we are going to get non-iterable state data directly
|
|
|
|
* from s->bioc->data, explicit flush is needed here.
|
|
|
|
*/
|
|
|
|
qemu_fflush(fb);
|
|
|
|
|
2021-01-29 11:14:06 +01:00
|
|
|
/* Now initialize UFFD context and start tracking RAM writes */
|
|
|
|
if (ram_write_tracking_start()) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
early_fail = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start VM from BH handler to avoid write-fault lock here.
|
|
|
|
* UFFD-WP protection for the whole RAM is already enabled so
|
|
|
|
* calling VM state change notifiers from vm_start() would initiate
|
|
|
|
* writes to virtio VQs memory which is in write-protected region.
|
|
|
|
*/
|
|
|
|
s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
|
|
|
|
qemu_bh_schedule(s->vm_start_bh);
|
|
|
|
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
|
|
|
|
while (migration_is_active(s)) {
|
|
|
|
MigIterateState iter_state = bg_migration_iteration_run(s);
|
|
|
|
if (iter_state == MIG_ITERATE_SKIP) {
|
|
|
|
continue;
|
|
|
|
} else if (iter_state == MIG_ITERATE_BREAK) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to detect any kind of failures, and see whether we
|
|
|
|
* should stop the migration now.
|
|
|
|
*/
|
|
|
|
thr_error = migration_detect_error(s);
|
|
|
|
if (thr_error == MIG_THR_ERR_FATAL) {
|
|
|
|
/* Stop migration */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_migration_thread_after_loop();
|
|
|
|
|
|
|
|
fail:
|
|
|
|
if (early_fail) {
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
|
|
|
|
|
|
|
bg_migration_iteration_finish(s);
|
|
|
|
|
|
|
|
qemu_fclose(fb);
|
|
|
|
object_unref(OBJECT(s));
|
|
|
|
rcu_unregister_thread();
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-12-15 18:16:54 +01:00
|
|
|
void migrate_fd_connect(MigrationState *s, Error *error_in)
|
2012-10-03 14:18:33 +02:00
|
|
|
{
|
2019-06-12 11:33:27 +02:00
|
|
|
Error *local_err = NULL;
|
2023-05-04 13:38:33 +02:00
|
|
|
uint64_t rate_limit;
|
2018-05-02 12:47:24 +02:00
|
|
|
bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
|
|
|
|
|
2021-07-08 21:06:53 +02:00
|
|
|
/*
|
|
|
|
* If there's a previous error, free it and prepare for another one.
|
|
|
|
* Meanwhile if migration completes successfully, there won't have an error
|
|
|
|
* dumped when calling migrate_fd_cleanup().
|
|
|
|
*/
|
|
|
|
migrate_error_free(s);
|
|
|
|
|
2023-03-02 12:00:43 +01:00
|
|
|
s->expected_downtime = migrate_downtime_limit();
|
2020-03-25 19:47:21 +01:00
|
|
|
if (resume) {
|
|
|
|
assert(s->cleanup_bh);
|
|
|
|
} else {
|
|
|
|
assert(!s->cleanup_bh);
|
|
|
|
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
|
|
|
|
}
|
2017-12-15 18:16:54 +01:00
|
|
|
if (error_in) {
|
|
|
|
migrate_fd_error(s, error_in);
|
2021-07-08 21:06:52 +02:00
|
|
|
if (resume) {
|
|
|
|
/*
|
|
|
|
* Don't do cleanup for resume if channel is invalid, but only dump
|
|
|
|
* the error. We wait for another channel connect from the user.
|
|
|
|
* The error_report still gives HMP user a hint on what failed.
|
|
|
|
* It's normally done in migrate_fd_cleanup(), but call it here
|
|
|
|
* explicitly.
|
|
|
|
*/
|
|
|
|
error_report_err(error_copy(s->error));
|
|
|
|
} else {
|
|
|
|
migrate_fd_cleanup(s);
|
|
|
|
}
|
2017-12-15 18:16:54 +01:00
|
|
|
return;
|
|
|
|
}
|
2012-10-03 14:18:33 +02:00
|
|
|
|
2018-05-02 12:47:24 +02:00
|
|
|
if (resume) {
|
|
|
|
/* This is a resumed migration */
|
2023-05-08 15:08:51 +02:00
|
|
|
rate_limit = migrate_max_postcopy_bandwidth();
|
2018-05-02 12:47:24 +02:00
|
|
|
} else {
|
|
|
|
/* This is a fresh new migration */
|
2023-05-08 15:08:51 +02:00
|
|
|
rate_limit = migrate_max_bandwidth();
|
2013-02-22 17:36:44 +01:00
|
|
|
|
2018-05-02 12:47:24 +02:00
|
|
|
/* Notify before starting migration thread */
|
|
|
|
notifier_list_notify(&migration_state_notifiers, s);
|
|
|
|
}
|
|
|
|
|
2023-05-15 21:56:58 +02:00
|
|
|
migration_rate_set(rate_limit);
|
2018-05-02 12:47:24 +02:00
|
|
|
qemu_file_set_blocking(s->to_dst_file, true);
|
2013-07-29 15:01:57 +02:00
|
|
|
|
2015-11-05 19:11:05 +01:00
|
|
|
/*
|
2017-06-26 12:28:55 +02:00
|
|
|
* Open the return path. For postcopy, it is used exclusively. For
|
|
|
|
* precopy, only if user specified "return-path" capability would
|
|
|
|
* QEMU uses the return path.
|
2015-11-05 19:11:05 +01:00
|
|
|
*/
|
2023-03-01 22:25:47 +01:00
|
|
|
if (migrate_postcopy_ram() || migrate_return_path()) {
|
2023-09-18 19:28:21 +02:00
|
|
|
if (open_return_path_on_source(s)) {
|
2023-06-21 15:09:39 +02:00
|
|
|
error_setg(&local_err, "Unable to open return-path for postcopy");
|
2018-05-02 12:47:24 +02:00
|
|
|
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
|
2023-06-21 15:09:39 +02:00
|
|
|
migrate_set_error(s, local_err);
|
|
|
|
error_report_err(local_err);
|
2015-11-05 19:11:05 +01:00
|
|
|
migrate_fd_cleanup(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-26 19:25:40 +02:00
|
|
|
/*
|
|
|
|
* This needs to be done before resuming a postcopy. Note: for newer
|
|
|
|
* QEMUs we will delay the channel creation until postcopy_start(), to
|
|
|
|
* avoid disorder of channel creations.
|
|
|
|
*/
|
|
|
|
if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
|
|
|
|
postcopy_preempt_setup(s);
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:24 +02:00
|
|
|
if (resume) {
|
2018-05-02 12:47:25 +02:00
|
|
|
/* Wakeup the main migration thread to do the recovery */
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_RECOVER);
|
|
|
|
qemu_sem_post(&s->postcopy_pause_sem);
|
2018-05-02 12:47:24 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-12 11:33:27 +02:00
|
|
|
if (multifd_save_setup(&local_err) != 0) {
|
2023-06-21 15:09:39 +02:00
|
|
|
migrate_set_error(s, local_err);
|
2019-06-12 11:33:27 +02:00
|
|
|
error_report_err(local_err);
|
2016-01-14 16:52:55 +01:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
migrate_fd_cleanup(s);
|
|
|
|
return;
|
|
|
|
}
|
2021-01-29 11:14:06 +01:00
|
|
|
|
|
|
|
if (migrate_background_snapshot()) {
|
|
|
|
qemu_thread_create(&s->thread, "bg_snapshot",
|
|
|
|
bg_migration_thread, s, QEMU_THREAD_JOINABLE);
|
|
|
|
} else {
|
|
|
|
qemu_thread_create(&s->thread, "live_migration",
|
|
|
|
migration_thread, s, QEMU_THREAD_JOINABLE);
|
|
|
|
}
|
2015-11-05 19:11:05 +01:00
|
|
|
s->migration_thread_running = true;
|
2012-10-03 14:18:33 +02:00
|
|
|
}
|
2015-11-05 19:10:52 +01:00
|
|
|
|
2017-06-27 06:10:13 +02:00
|
|
|
static void migration_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
|
|
|
|
dc->user_creatable = false;
|
2020-01-10 16:30:32 +01:00
|
|
|
device_class_set_props(dc, migration_properties);
|
2017-06-27 06:10:13 +02:00
|
|
|
}
|
|
|
|
|
2017-08-01 18:04:18 +02:00
|
|
|
static void migration_instance_finalize(Object *obj)
|
|
|
|
{
|
|
|
|
MigrationState *ms = MIGRATION_OBJ(obj);
|
|
|
|
|
2017-09-05 12:50:22 +02:00
|
|
|
qemu_mutex_destroy(&ms->error_mutex);
|
2018-05-02 12:47:38 +02:00
|
|
|
qemu_mutex_destroy(&ms->qemu_file_lock);
|
2019-10-29 12:49:02 +01:00
|
|
|
qemu_sem_destroy(&ms->wait_unplug_sem);
|
2018-06-13 12:26:41 +02:00
|
|
|
qemu_sem_destroy(&ms->rate_limit_sem);
|
2017-10-20 11:05:52 +02:00
|
|
|
qemu_sem_destroy(&ms->pause_sem);
|
2018-05-02 12:47:19 +02:00
|
|
|
qemu_sem_destroy(&ms->postcopy_pause_sem);
|
migration: synchronize dirty bitmap for resume
This patch implements the first part of core RAM resume logic for
postcopy. ram_resume_prepare() is provided for the work.
When the migration is interrupted by network failure, the dirty bitmap
on the source side will be meaningless, because even the dirty bit is
cleared, it is still possible that the sent page was lost along the way
to destination. Here instead of continue the migration with the old
dirty bitmap on source, we ask the destination side to send back its
received bitmap, then invert it to be our initial dirty bitmap.
The source side send thread will issue the MIG_CMD_RECV_BITMAP requests,
once per ramblock, to ask for the received bitmap. On destination side,
MIG_RP_MSG_RECV_BITMAP will be issued, along with the requested bitmap.
Data will be received on the return-path thread of source, and the main
migration thread will be notified when all the ramblock bitmaps are
synchronized.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20180502104740.12123-17-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2018-05-02 12:47:32 +02:00
|
|
|
qemu_sem_destroy(&ms->rp_state.rp_sem);
|
2023-02-08 21:28:12 +01:00
|
|
|
qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
qemu_sem_destroy(&ms->postcopy_qemufile_src_sem);
|
2018-03-06 18:09:59 +01:00
|
|
|
error_free(ms->error);
|
2017-08-01 18:04:18 +02:00
|
|
|
}
|
|
|
|
|
2017-06-27 06:10:13 +02:00
|
|
|
static void migration_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
MigrationState *ms = MIGRATION_OBJ(obj);
|
|
|
|
|
|
|
|
ms->state = MIGRATION_STATUS_NONE;
|
|
|
|
ms->mbps = -1;
|
2019-01-11 07:37:30 +01:00
|
|
|
ms->pages_per_second = -1;
|
2017-10-20 11:05:52 +02:00
|
|
|
qemu_sem_init(&ms->pause_sem, 0);
|
2017-09-05 12:50:22 +02:00
|
|
|
qemu_mutex_init(&ms->error_mutex);
|
2017-07-18 05:39:06 +02:00
|
|
|
|
2023-03-02 10:35:24 +01:00
|
|
|
migrate_params_init(&ms->parameters);
|
2018-05-02 12:47:19 +02:00
|
|
|
|
|
|
|
qemu_sem_init(&ms->postcopy_pause_sem, 0);
|
migration: synchronize dirty bitmap for resume
This patch implements the first part of core RAM resume logic for
postcopy. ram_resume_prepare() is provided for the work.
When the migration is interrupted by network failure, the dirty bitmap
on the source side will be meaningless, because even the dirty bit is
cleared, it is still possible that the sent page was lost along the way
to destination. Here instead of continue the migration with the old
dirty bitmap on source, we ask the destination side to send back its
received bitmap, then invert it to be our initial dirty bitmap.
The source side send thread will issue the MIG_CMD_RECV_BITMAP requests,
once per ramblock, to ask for the received bitmap. On destination side,
MIG_RP_MSG_RECV_BITMAP will be issued, along with the requested bitmap.
Data will be received on the return-path thread of source, and the main
migration thread will be notified when all the ramblock bitmaps are
synchronized.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20180502104740.12123-17-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2018-05-02 12:47:32 +02:00
|
|
|
qemu_sem_init(&ms->rp_state.rp_sem, 0);
|
2023-02-08 21:28:12 +01:00
|
|
|
qemu_sem_init(&ms->rp_state.rp_pong_acks, 0);
|
2018-06-13 12:26:41 +02:00
|
|
|
qemu_sem_init(&ms->rate_limit_sem, 0);
|
2019-10-29 12:49:02 +01:00
|
|
|
qemu_sem_init(&ms->wait_unplug_sem, 0);
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0);
|
2018-05-02 12:47:38 +02:00
|
|
|
qemu_mutex_init(&ms->qemu_file_lock);
|
2017-07-18 05:39:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if check pass, false otherwise. Error will be put
|
|
|
|
* inside errp if provided.
|
|
|
|
*/
|
|
|
|
static bool migration_object_check(MigrationState *ms, Error **errp)
|
|
|
|
{
|
2017-07-18 05:39:10 +02:00
|
|
|
/* Assuming all off */
|
2023-03-01 20:28:56 +01:00
|
|
|
bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 };
|
2017-07-18 05:39:10 +02:00
|
|
|
|
2017-07-18 05:39:06 +02:00
|
|
|
if (!migrate_params_check(&ms->parameters, errp)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-03-01 20:28:56 +01:00
|
|
|
return migrate_caps_check(old_caps, ms->capabilities, errp);
|
2017-06-27 06:10:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo migration_type = {
|
|
|
|
.name = TYPE_MIGRATION,
|
2017-06-28 09:15:44 +02:00
|
|
|
/*
|
2017-07-05 10:21:23 +02:00
|
|
|
* NOTE: TYPE_MIGRATION is not really a device, as the object is
|
2020-06-10 07:32:19 +02:00
|
|
|
* not created using qdev_new(), it is not attached to the qdev
|
2017-07-05 10:21:23 +02:00
|
|
|
* device tree, and it is never realized.
|
|
|
|
*
|
|
|
|
* TODO: Make this TYPE_OBJECT once QOM provides something like
|
|
|
|
* TYPE_DEVICE's "-global" properties.
|
2017-06-28 09:15:44 +02:00
|
|
|
*/
|
2017-06-27 06:10:13 +02:00
|
|
|
.parent = TYPE_DEVICE,
|
|
|
|
.class_init = migration_class_init,
|
|
|
|
.class_size = sizeof(MigrationClass),
|
|
|
|
.instance_size = sizeof(MigrationState),
|
|
|
|
.instance_init = migration_instance_init,
|
2017-08-01 18:04:18 +02:00
|
|
|
.instance_finalize = migration_instance_finalize,
|
2017-06-27 06:10:13 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static void register_migration_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&migration_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(register_migration_types);
|