2008-10-13 05:12:02 +02:00
|
|
|
/*
|
|
|
|
* QEMU live migration
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2008
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef QEMU_MIGRATION_H
|
|
|
|
#define QEMU_MIGRATION_H
|
|
|
|
|
2019-08-12 07:23:46 +02:00
|
|
|
#include "exec/cpu-common.h"
|
2019-08-12 07:23:51 +02:00
|
|
|
#include "hw/qdev-core.h"
|
2018-02-11 10:36:01 +01:00
|
|
|
#include "qapi/qapi-types-migration.h"
|
2012-12-19 09:55:50 +01:00
|
|
|
#include "qemu/thread.h"
|
2016-10-27 08:42:55 +02:00
|
|
|
#include "qemu/coroutine_int.h"
|
2017-07-24 12:42:02 +02:00
|
|
|
#include "io/channel.h"
|
2021-01-29 11:14:06 +01:00
|
|
|
#include "io/channel-buffer.h"
|
2019-02-27 14:24:08 +01:00
|
|
|
#include "net/announce.h"
|
2020-09-03 22:43:22 +02:00
|
|
|
#include "qom/object.h"
|
2009-03-06 00:01:23 +01:00
|
|
|
|
migration: add postcopy blocktime ctx into MigrationIncomingState
This patch adds request to kernel space for UFFD_FEATURE_THREAD_ID, in
case this feature is provided by kernel.
PostcopyBlocktimeContext is encapsulated inside postcopy-ram.c,
due to it being a postcopy-only feature.
Also it defines PostcopyBlocktimeContext's instance live time.
Information from PostcopyBlocktimeContext instance will be provided
much after postcopy migration end, instance of PostcopyBlocktimeContext
will live till QEMU exit, but part of it (vcpu_addr,
page_fault_vcpu_time) used only during calculation, will be released
when postcopy ended or failed.
To enable postcopy blocktime calculation on destination, need to
request proper compatibility (Patch for documentation will be at the
tail of the patch set).
As an example following command enable that capability, assume QEMU was
started with
-chardev socket,id=charmonitor,path=/var/lib/migrate-vm-monitor.sock
option to control it
[root@host]#printf "{\"execute\" : \"qmp_capabilities\"}\r\n \
{\"execute\": \"migrate-set-capabilities\" , \"arguments\": {
\"capabilities\": [ { \"capability\": \"postcopy-blocktime\", \"state\":
true } ] } }" | nc -U /var/lib/migrate-vm-monitor.sock
Or just with HMP
(qemu) migrate_set_capability postcopy-blocktime on
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1521742647-25550-3-git-send-email-a.perevalov@samsung.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2018-03-22 19:17:23 +01:00
|
|
|
struct PostcopyBlocktimeContext;
|
|
|
|
|
2018-05-02 12:47:30 +02:00
|
|
|
#define MIGRATION_RESUME_ACK_VALUE (1)
|
|
|
|
|
migration: Split log_clear() into smaller chunks
Currently we are doing log_clear() right after log_sync() which mostly
keeps the old behavior when log_clear() was still part of log_sync().
This patch tries to further optimize the migration log_clear() code
path to split huge log_clear()s into smaller chunks.
We do this by spliting the whole guest memory region into memory
chunks, whose size is decided by MigrationState.clear_bitmap_shift (an
example will be given below). With that, we don't do the dirty bitmap
clear operation on the remote node (e.g., KVM) when we fetch the dirty
bitmap, instead we explicitly clear the dirty bitmap for the memory
chunk for each of the first time we send a page in that chunk.
Here comes an example.
Assuming the guest has 64G memory, then before this patch the KVM
ioctl KVM_CLEAR_DIRTY_LOG will be a single one covering 64G memory.
If after the patch, let's assume when the clear bitmap shift is 18,
then the memory chunk size on x86_64 will be 1UL<<18 * 4K = 1GB. Then
instead of sending a big 64G ioctl, we'll send 64 small ioctls, each
of the ioctl will cover 1G of the guest memory. For each of the 64
small ioctls, we'll only send if any of the page in that small chunk
was going to be sent right away.
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20190603065056.25211-12-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-06-03 08:50:56 +02:00
|
|
|
/*
|
|
|
|
* 1<<6=64 pages -> 256K chunk when page size is 4K. This gives us
|
|
|
|
* the benefit that all the chunks are 64 pages aligned then the
|
|
|
|
* bitmaps are always aligned to LONG.
|
|
|
|
*/
|
|
|
|
#define CLEAR_BITMAP_SHIFT_MIN 6
|
|
|
|
/*
|
|
|
|
* 1<<18=256K pages -> 1G chunk when page size is 4K. This is the
|
|
|
|
* default value to use if no one specified.
|
|
|
|
*/
|
|
|
|
#define CLEAR_BITMAP_SHIFT_DEFAULT 18
|
|
|
|
/*
|
|
|
|
* 1<<31=2G pages -> 8T chunk when page size is 4K. This should be
|
|
|
|
* big enough and make sure we won't overflow easily.
|
|
|
|
*/
|
|
|
|
#define CLEAR_BITMAP_SHIFT_MAX 31
|
|
|
|
|
2015-05-21 14:24:14 +02:00
|
|
|
/* State for the incoming migration */
|
|
|
|
struct MigrationIncomingState {
|
2015-11-05 19:10:34 +01:00
|
|
|
QEMUFile *from_src_file;
|
2015-05-21 14:24:16 +02:00
|
|
|
|
2021-04-21 13:28:32 +02:00
|
|
|
/* A hook to allow cleanup at the end of incoming migration */
|
|
|
|
void *transport_data;
|
|
|
|
void (*transport_cleanup)(void *data);
|
|
|
|
|
2015-11-05 19:10:50 +01:00
|
|
|
/*
|
|
|
|
* Free at the start of the main state load, set as the main thread finishes
|
|
|
|
* loading state.
|
|
|
|
*/
|
|
|
|
QemuEvent main_thread_load_event;
|
|
|
|
|
2019-02-27 14:24:08 +01:00
|
|
|
/* For network announces */
|
|
|
|
AnnounceTimer announce_timer;
|
|
|
|
|
2017-02-24 19:28:34 +01:00
|
|
|
size_t largest_page_size;
|
2015-11-05 19:11:17 +01:00
|
|
|
bool have_fault_thread;
|
2015-11-05 19:11:04 +01:00
|
|
|
QemuThread fault_thread;
|
|
|
|
QemuSemaphore fault_thread_sem;
|
2018-02-08 11:31:06 +01:00
|
|
|
/* Set this when we want the fault thread to quit */
|
|
|
|
bool fault_thread_quit;
|
2015-11-05 19:11:04 +01:00
|
|
|
|
2015-11-05 19:11:18 +01:00
|
|
|
bool have_listen_thread;
|
|
|
|
QemuThread listen_thread;
|
|
|
|
QemuSemaphore listen_thread_sem;
|
|
|
|
|
2015-11-05 19:11:03 +01:00
|
|
|
/* For the kernel to send us notifications */
|
|
|
|
int userfault_fd;
|
2018-02-08 11:31:06 +01:00
|
|
|
/* To notify the fault_thread to wake, e.g., when need to quit */
|
|
|
|
int userfault_event_fd;
|
2015-11-05 19:10:46 +01:00
|
|
|
QEMUFile *to_src_file;
|
2015-11-05 19:10:47 +01:00
|
|
|
QemuMutex rp_mutex; /* We send replies from multiple threads */
|
2018-03-12 18:21:12 +01:00
|
|
|
/* RAMBlock of last request sent to source */
|
|
|
|
RAMBlock *last_rb;
|
2015-11-05 19:11:10 +01:00
|
|
|
void *postcopy_tmp_page;
|
2017-02-24 19:28:36 +01:00
|
|
|
void *postcopy_tmp_zero_page;
|
2018-03-12 18:21:04 +01:00
|
|
|
/* PostCopyFD's for external userfaultfds & handlers of shared memory */
|
|
|
|
GArray *postcopy_remote_fds;
|
2015-11-05 19:10:46 +01:00
|
|
|
|
2016-02-24 09:53:38 +01:00
|
|
|
QEMUBH *bh;
|
|
|
|
|
2015-12-16 12:47:34 +01:00
|
|
|
int state;
|
2016-10-27 08:42:55 +02:00
|
|
|
|
|
|
|
bool have_colo_incoming_thread;
|
|
|
|
QemuThread colo_incoming_thread;
|
|
|
|
/* The coroutine we should enter (back) after failover */
|
|
|
|
Coroutine *migration_incoming_co;
|
2017-01-17 13:57:43 +01:00
|
|
|
QemuSemaphore colo_incoming_sem;
|
migration: add postcopy blocktime ctx into MigrationIncomingState
This patch adds request to kernel space for UFFD_FEATURE_THREAD_ID, in
case this feature is provided by kernel.
PostcopyBlocktimeContext is encapsulated inside postcopy-ram.c,
due to it being a postcopy-only feature.
Also it defines PostcopyBlocktimeContext's instance live time.
Information from PostcopyBlocktimeContext instance will be provided
much after postcopy migration end, instance of PostcopyBlocktimeContext
will live till QEMU exit, but part of it (vcpu_addr,
page_fault_vcpu_time) used only during calculation, will be released
when postcopy ended or failed.
To enable postcopy blocktime calculation on destination, need to
request proper compatibility (Patch for documentation will be at the
tail of the patch set).
As an example following command enable that capability, assume QEMU was
started with
-chardev socket,id=charmonitor,path=/var/lib/migrate-vm-monitor.sock
option to control it
[root@host]#printf "{\"execute\" : \"qmp_capabilities\"}\r\n \
{\"execute\": \"migrate-set-capabilities\" , \"arguments\": {
\"capabilities\": [ { \"capability\": \"postcopy-blocktime\", \"state\":
true } ] } }" | nc -U /var/lib/migrate-vm-monitor.sock
Or just with HMP
(qemu) migrate_set_capability postcopy-blocktime on
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1521742647-25550-3-git-send-email-a.perevalov@samsung.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2018-03-22 19:17:23 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PostcopyBlocktimeContext to keep information for postcopy
|
|
|
|
* live migration, to calculate vCPU block time
|
|
|
|
* */
|
|
|
|
struct PostcopyBlocktimeContext *blocktime_ctx;
|
2018-05-02 12:47:20 +02:00
|
|
|
|
|
|
|
/* notify PAUSED postcopy incoming migrations to try to continue */
|
2018-05-02 12:47:36 +02:00
|
|
|
bool postcopy_recover_triggered;
|
2018-05-02 12:47:20 +02:00
|
|
|
QemuSemaphore postcopy_pause_sem_dst;
|
2018-05-02 12:47:22 +02:00
|
|
|
QemuSemaphore postcopy_pause_sem_fault;
|
2019-02-27 11:51:27 +01:00
|
|
|
|
|
|
|
/* List of listening socket addresses */
|
|
|
|
SocketAddressList *socket_address_list;
|
2020-10-21 23:27:18 +02:00
|
|
|
|
|
|
|
/* A tree of pages that we requested to the source VM */
|
|
|
|
GTree *page_requested;
|
|
|
|
/* For debugging purpose only, but would be nice to keep */
|
|
|
|
int page_requested_count;
|
|
|
|
/*
|
|
|
|
* The mutex helps to maintain the requested pages that we sent to the
|
|
|
|
* source, IOW, to guarantee coherent between the page_requests tree and
|
|
|
|
* the per-ramblock receivedmap. Note! This does not guarantee consistency
|
|
|
|
* of the real page copy procedures (using UFFDIO_[ZERO]COPY). E.g., even
|
|
|
|
* if one bit in receivedmap is cleared, UFFDIO_COPY could have happened
|
|
|
|
* for that page already. This is intended so that the mutex won't
|
|
|
|
* serialize and blocked by slow operations like UFFDIO_* ioctls. However
|
|
|
|
* this should be enough to make sure the page_requested tree always
|
|
|
|
* contains valid information.
|
|
|
|
*/
|
|
|
|
QemuMutex page_request_mutex;
|
2015-05-21 14:24:14 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
MigrationIncomingState *migration_incoming_get_current(void);
|
|
|
|
void migration_incoming_state_destroy(void);
|
2018-03-22 19:17:27 +01:00
|
|
|
/*
|
|
|
|
* Functions to work with blocktime context
|
|
|
|
*/
|
|
|
|
void fill_destination_postcopy_migration_info(MigrationInfo *info);
|
2015-05-21 14:24:14 +02:00
|
|
|
|
2017-06-27 06:10:13 +02:00
|
|
|
#define TYPE_MIGRATION "migration"
|
|
|
|
|
2020-09-03 22:43:22 +02:00
|
|
|
typedef struct MigrationClass MigrationClass;
|
2020-08-31 23:07:33 +02:00
|
|
|
DECLARE_OBJ_CHECKERS(MigrationState, MigrationClass,
|
|
|
|
MIGRATION_OBJ, TYPE_MIGRATION)
|
2017-06-27 06:10:13 +02:00
|
|
|
|
2020-09-03 22:43:22 +02:00
|
|
|
struct MigrationClass {
|
2017-06-27 06:10:13 +02:00
|
|
|
/*< private >*/
|
|
|
|
DeviceClass parent_class;
|
2020-09-03 22:43:22 +02:00
|
|
|
};
|
2017-06-27 06:10:13 +02:00
|
|
|
|
2020-10-20 05:10:44 +02:00
|
|
|
struct MigrationState {
|
2017-06-27 06:10:13 +02:00
|
|
|
/*< private >*/
|
|
|
|
DeviceState parent_obj;
|
|
|
|
|
|
|
|
/*< public >*/
|
2012-12-19 09:55:50 +01:00
|
|
|
QemuThread thread;
|
2021-01-29 11:14:06 +01:00
|
|
|
QEMUBH *vm_start_bh;
|
2013-02-22 17:36:21 +01:00
|
|
|
QEMUBH *cleanup_bh;
|
2021-07-22 19:58:38 +02:00
|
|
|
/* Protected by qemu_file_lock */
|
2016-01-15 04:37:42 +01:00
|
|
|
QEMUFile *to_dst_file;
|
2021-01-29 11:14:06 +01:00
|
|
|
QIOChannelBuffer *bioc;
|
2018-05-02 12:47:38 +02:00
|
|
|
/*
|
2021-07-22 19:58:38 +02:00
|
|
|
* Protects to_dst_file/from_dst_file pointers. We need to make sure we
|
|
|
|
* won't yield or hang during the critical section, since this lock will be
|
|
|
|
* used in OOB command handler.
|
2018-05-02 12:47:38 +02:00
|
|
|
*/
|
|
|
|
QemuMutex qemu_file_lock;
|
2016-04-27 12:05:14 +02:00
|
|
|
|
2018-06-13 12:26:41 +02:00
|
|
|
/*
|
|
|
|
* Used to allow urgent requests to override rate limiting.
|
|
|
|
*/
|
|
|
|
QemuSemaphore rate_limit_sem;
|
|
|
|
|
2019-01-11 07:37:30 +01:00
|
|
|
/* pages already send at the beginning of current iteration */
|
|
|
|
uint64_t iteration_initial_pages;
|
|
|
|
|
|
|
|
/* pages transferred per second */
|
|
|
|
double pages_per_second;
|
|
|
|
|
|
|
|
/* bytes already send at the beginning of current iteration */
|
2018-01-03 13:20:13 +01:00
|
|
|
uint64_t iteration_initial_bytes;
|
|
|
|
/* time at the start of current iteration */
|
|
|
|
int64_t iteration_start_time;
|
|
|
|
/*
|
|
|
|
* The final stage happens when the remaining data is smaller than
|
|
|
|
* this threshold; it's calculated from the requested downtime and
|
|
|
|
* measured bandwidth
|
|
|
|
*/
|
|
|
|
int64_t threshold_size;
|
|
|
|
|
2017-04-05 21:00:09 +02:00
|
|
|
/* params from 'migrate-set-parameters' */
|
2016-04-27 12:05:14 +02:00
|
|
|
MigrationParameters parameters;
|
2013-02-22 17:36:41 +01:00
|
|
|
|
|
|
|
int state;
|
2015-11-05 19:10:49 +01:00
|
|
|
|
|
|
|
/* State related to return path */
|
|
|
|
struct {
|
2021-07-22 19:58:38 +02:00
|
|
|
/* Protected by qemu_file_lock */
|
2015-11-05 19:10:49 +01:00
|
|
|
QEMUFile *from_dst_file;
|
|
|
|
QemuThread rp_thread;
|
|
|
|
bool error;
|
2021-07-22 19:58:37 +02:00
|
|
|
/*
|
|
|
|
* We can also check non-zero of rp_thread, but there's no "official"
|
|
|
|
* way to do this, so this bool makes it slightly more elegant.
|
|
|
|
* Checking from_dst_file for this is racy because from_dst_file will
|
|
|
|
* be cleared in the rp_thread!
|
|
|
|
*/
|
|
|
|
bool rp_thread_created;
|
migration: synchronize dirty bitmap for resume
This patch implements the first part of core RAM resume logic for
postcopy. ram_resume_prepare() is provided for the work.
When the migration is interrupted by network failure, the dirty bitmap
on the source side will be meaningless, because even the dirty bit is
cleared, it is still possible that the sent page was lost along the way
to destination. Here instead of continue the migration with the old
dirty bitmap on source, we ask the destination side to send back its
received bitmap, then invert it to be our initial dirty bitmap.
The source side send thread will issue the MIG_CMD_RECV_BITMAP requests,
once per ramblock, to ask for the received bitmap. On destination side,
MIG_RP_MSG_RECV_BITMAP will be issued, along with the requested bitmap.
Data will be received on the return-path thread of source, and the main
migration thread will be notified when all the ramblock bitmaps are
synchronized.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20180502104740.12123-17-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2018-05-02 12:47:32 +02:00
|
|
|
QemuSemaphore rp_sem;
|
2015-11-05 19:10:49 +01:00
|
|
|
} rp_state;
|
|
|
|
|
2013-06-26 03:35:30 +02:00
|
|
|
double mbps;
|
2018-01-03 13:20:08 +01:00
|
|
|
/* Timestamp when recent migration starts (ms) */
|
|
|
|
int64_t start_time;
|
|
|
|
/* Total time used by latest migration (ms) */
|
2012-05-21 22:01:07 +02:00
|
|
|
int64_t total_time;
|
2018-01-03 13:20:10 +01:00
|
|
|
/* Timestamp when VM is down (ms) to migrate the last stuff */
|
|
|
|
int64_t downtime_start;
|
2012-08-13 09:35:16 +02:00
|
|
|
int64_t downtime;
|
2012-08-13 09:53:12 +02:00
|
|
|
int64_t expected_downtime;
|
qapi: Don't let implicit enum MAX member collide
Now that we guarantee the user doesn't have any enum values
beginning with a single underscore, we can use that for our
own purposes. Renaming ENUM_MAX to ENUM__MAX makes it obvious
that the sentinel is generated.
This patch was mostly generated by applying a temporary patch:
|diff --git a/scripts/qapi.py b/scripts/qapi.py
|index e6d014b..b862ec9 100644
|--- a/scripts/qapi.py
|+++ b/scripts/qapi.py
|@@ -1570,6 +1570,7 @@ const char *const %(c_name)s_lookup[] = {
| max_index = c_enum_const(name, 'MAX', prefix)
| ret += mcgen('''
| [%(max_index)s] = NULL,
|+// %(max_index)s
| };
| ''',
| max_index=max_index)
then running:
$ cat qapi-{types,event}.c tests/test-qapi-types.c |
sed -n 's,^// \(.*\)MAX,s|\1MAX|\1_MAX|g,p' > list
$ git grep -l _MAX | xargs sed -i -f list
The only things not generated are the changes in scripts/qapi.py.
Rejecting enum members named 'MAX' is now useless, and will be dropped
in the next patch.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1447836791-369-23-git-send-email-eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
[Rebased to current master, commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2015-11-18 09:52:57 +01:00
|
|
|
bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
|
2013-07-22 16:01:58 +02:00
|
|
|
int64_t setup_time;
|
2018-01-03 13:20:09 +01:00
|
|
|
/*
|
|
|
|
* Whether guest was running when we enter the completion stage.
|
|
|
|
* If migration is interrupted by any reason, we need to continue
|
|
|
|
* running the guest on source.
|
|
|
|
*/
|
|
|
|
bool vm_was_running;
|
2015-11-05 19:10:56 +01:00
|
|
|
|
|
|
|
/* Flag set once the migration has been asked to enter postcopy */
|
|
|
|
bool start_postcopy;
|
2016-02-22 18:17:32 +01:00
|
|
|
/* Flag set after postcopy has sent the device state */
|
|
|
|
bool postcopy_after_devices;
|
2015-11-05 19:11:05 +01:00
|
|
|
|
|
|
|
/* Flag set once the migration thread is running (and needs joining) */
|
|
|
|
bool migration_thread_running;
|
2015-11-05 19:11:08 +01:00
|
|
|
|
2017-01-24 08:59:52 +01:00
|
|
|
/* Flag set once the migration thread called bdrv_inactivate_all */
|
|
|
|
bool block_inactive;
|
|
|
|
|
2019-10-29 12:49:02 +01:00
|
|
|
/* Migration is waiting for guest to unplug device */
|
|
|
|
QemuSemaphore wait_unplug_sem;
|
|
|
|
|
2017-10-20 11:05:52 +02:00
|
|
|
/* Migration is paused due to pause-before-switchover */
|
|
|
|
QemuSemaphore pause_sem;
|
|
|
|
|
2017-01-17 13:57:43 +01:00
|
|
|
/* The semaphore is used to notify COLO thread that failover is finished */
|
|
|
|
QemuSemaphore colo_exit_sem;
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 12:05:00 +02:00
|
|
|
|
2020-05-11 13:10:44 +02:00
|
|
|
/* The event is used to notify COLO thread to do checkpoint */
|
|
|
|
QemuEvent colo_checkpoint_event;
|
2017-01-17 13:57:42 +01:00
|
|
|
int64_t colo_checkpoint_time;
|
|
|
|
QEMUTimer *colo_delay_timer;
|
|
|
|
|
2017-09-05 12:50:22 +02:00
|
|
|
/* The first error that has occurred.
|
|
|
|
We used the mutex to be able to return the 1st error message */
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 12:05:00 +02:00
|
|
|
Error *error;
|
2017-09-05 12:50:22 +02:00
|
|
|
/* mutex to protect errp */
|
|
|
|
QemuMutex error_mutex;
|
|
|
|
|
2017-04-05 18:32:37 +02:00
|
|
|
/* Do we have to clean up -b/-i from old migrate parameters */
|
|
|
|
/* This feature is deprecated and will be removed */
|
|
|
|
bool must_remove_block_options;
|
2017-06-27 06:10:14 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Global switch on whether we need to store the global state
|
|
|
|
* during migration.
|
|
|
|
*/
|
|
|
|
bool store_global_state;
|
2017-06-27 06:10:15 +02:00
|
|
|
|
2017-06-27 06:10:16 +02:00
|
|
|
/* Whether we send QEMU_VM_CONFIGURATION during migration */
|
|
|
|
bool send_configuration;
|
2017-06-27 06:10:17 +02:00
|
|
|
/* Whether we send section footer during migration */
|
|
|
|
bool send_section_footer;
|
2018-05-02 12:47:19 +02:00
|
|
|
|
|
|
|
/* Needed by postcopy-pause state */
|
|
|
|
QemuSemaphore postcopy_pause_sem;
|
2018-05-02 12:47:21 +02:00
|
|
|
QemuSemaphore postcopy_pause_rp_sem;
|
2018-05-03 10:06:11 +02:00
|
|
|
/*
|
|
|
|
* Whether we abort the migration if decompression errors are
|
|
|
|
* detected at the destination. It is left at false for qemu
|
|
|
|
* older than 3.0, since only newer qemu sends streams that
|
|
|
|
* do not trigger spurious decompression errors.
|
|
|
|
*/
|
|
|
|
bool decompress_error_check;
|
migration: Split log_clear() into smaller chunks
Currently we are doing log_clear() right after log_sync() which mostly
keeps the old behavior when log_clear() was still part of log_sync().
This patch tries to further optimize the migration log_clear() code
path to split huge log_clear()s into smaller chunks.
We do this by spliting the whole guest memory region into memory
chunks, whose size is decided by MigrationState.clear_bitmap_shift (an
example will be given below). With that, we don't do the dirty bitmap
clear operation on the remote node (e.g., KVM) when we fetch the dirty
bitmap, instead we explicitly clear the dirty bitmap for the memory
chunk for each of the first time we send a page in that chunk.
Here comes an example.
Assuming the guest has 64G memory, then before this patch the KVM
ioctl KVM_CLEAR_DIRTY_LOG will be a single one covering 64G memory.
If after the patch, let's assume when the clear bitmap shift is 18,
then the memory chunk size on x86_64 will be 1UL<<18 * 4K = 1GB. Then
instead of sending a big 64G ioctl, we'll send 64 small ioctls, each
of the ioctl will cover 1G of the guest memory. For each of the 64
small ioctls, we'll only send if any of the page in that small chunk
was going to be sent right away.
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20190603065056.25211-12-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-06-03 08:50:56 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This decides the size of guest memory chunk that will be used
|
|
|
|
* to track dirty bitmap clearing. The size of memory chunk will
|
|
|
|
* be GUEST_PAGE_SIZE << N. Say, N=0 means we will clear dirty
|
|
|
|
* bitmap for each page to send (1<<0=1); N=10 means we will clear
|
|
|
|
* dirty bitmap only once for 1<<10=1K continuous guest pages
|
|
|
|
* (which is in 4M chunk).
|
|
|
|
*/
|
|
|
|
uint8_t clear_bitmap_shift;
|
2020-09-15 05:03:57 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This save hostname when out-going migration starts
|
|
|
|
*/
|
|
|
|
char *hostname;
|
2008-11-11 17:46:33 +01:00
|
|
|
};
|
|
|
|
|
2015-12-16 12:47:33 +01:00
|
|
|
void migrate_set_state(int *state, int old_state, int new_state);
|
|
|
|
|
2019-06-12 11:44:19 +02:00
|
|
|
void migration_fd_process_incoming(QEMUFile *f, Error **errp);
|
2019-01-13 15:08:46 +01:00
|
|
|
void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
|
2018-03-07 08:40:52 +01:00
|
|
|
void migration_incoming_process(void);
|
2010-06-09 14:10:55 +02:00
|
|
|
|
2017-07-24 13:06:25 +02:00
|
|
|
bool migration_has_all_channels(void);
|
|
|
|
|
2009-05-28 21:22:57 +02:00
|
|
|
uint64_t migrate_max_downtime(void);
|
|
|
|
|
2017-09-05 12:50:22 +02:00
|
|
|
void migrate_set_error(MigrationState *s, const Error *error);
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 12:05:00 +02:00
|
|
|
void migrate_fd_error(MigrationState *s, const Error *error);
|
2008-11-11 17:46:33 +01:00
|
|
|
|
2017-12-15 18:16:54 +01:00
|
|
|
void migrate_fd_connect(MigrationState *s, Error *error_in);
|
2008-11-11 17:46:33 +01:00
|
|
|
|
2018-10-26 10:36:20 +02:00
|
|
|
bool migration_is_setup_or_active(int state);
|
2020-01-21 15:39:23 +01:00
|
|
|
bool migration_is_running(int state);
|
2018-10-26 10:36:20 +02:00
|
|
|
|
2018-02-08 11:31:15 +01:00
|
|
|
void migrate_init(MigrationState *s);
|
2016-05-04 21:44:19 +02:00
|
|
|
bool migration_is_blocked(Error **errp);
|
2015-11-05 19:10:58 +01:00
|
|
|
/* True if outgoing migration has entered postcopy phase */
|
2017-03-20 22:25:28 +01:00
|
|
|
bool migration_in_postcopy(void);
|
2012-08-13 09:42:49 +02:00
|
|
|
MigrationState *migrate_get_current(void);
|
2010-12-13 17:30:12 +01:00
|
|
|
|
2017-07-10 18:30:16 +02:00
|
|
|
bool migrate_postcopy(void);
|
|
|
|
|
2017-02-03 16:23:20 +01:00
|
|
|
bool migrate_release_ram(void);
|
2015-11-05 19:10:51 +01:00
|
|
|
bool migrate_postcopy_ram(void);
|
2013-07-18 09:48:50 +02:00
|
|
|
bool migrate_zero_blocks(void);
|
2018-03-13 20:34:00 +01:00
|
|
|
bool migrate_dirty_bitmaps(void);
|
2019-02-15 18:45:45 +01:00
|
|
|
bool migrate_ignore_shared(void);
|
2019-09-03 18:22:44 +02:00
|
|
|
bool migrate_validate_uuid(void);
|
2013-06-26 03:35:36 +02:00
|
|
|
|
2013-06-24 11:49:42 +02:00
|
|
|
bool migrate_auto_converge(void);
|
2016-01-14 12:23:00 +01:00
|
|
|
bool migrate_use_multifd(void);
|
2017-10-20 11:05:50 +02:00
|
|
|
bool migrate_pause_before_switchover(void);
|
2016-01-15 08:56:17 +01:00
|
|
|
int migrate_multifd_channels(void);
|
2019-05-15 13:37:46 +02:00
|
|
|
MultiFDCompression migrate_multifd_compression(void);
|
2020-01-23 17:08:52 +01:00
|
|
|
int migrate_multifd_zlib_level(void);
|
2020-01-23 17:41:36 +01:00
|
|
|
int migrate_multifd_zstd_level(void);
|
2013-06-24 11:49:42 +02:00
|
|
|
|
2012-08-06 20:42:53 +02:00
|
|
|
int migrate_use_xbzrle(void);
|
2021-02-02 15:17:32 +01:00
|
|
|
uint64_t migrate_xbzrle_cache_size(void);
|
2016-10-27 08:42:52 +02:00
|
|
|
bool migrate_colo_enabled(void);
|
2012-08-06 20:42:53 +02:00
|
|
|
|
2017-04-05 18:32:37 +02:00
|
|
|
bool migrate_use_block(void);
|
|
|
|
bool migrate_use_block_incremental(void);
|
2018-08-01 15:00:20 +02:00
|
|
|
int migrate_max_cpu_throttle(void);
|
2017-06-26 12:28:55 +02:00
|
|
|
bool migrate_use_return_path(void);
|
2017-04-05 18:32:37 +02:00
|
|
|
|
2019-01-11 07:37:30 +01:00
|
|
|
uint64_t ram_get_total_transferred_pages(void);
|
|
|
|
|
2015-03-23 09:32:17 +01:00
|
|
|
bool migrate_use_compression(void);
|
|
|
|
int migrate_compress_level(void);
|
|
|
|
int migrate_compress_threads(void);
|
2018-08-21 10:10:20 +02:00
|
|
|
int migrate_compress_wait_thread(void);
|
2015-03-23 09:32:18 +01:00
|
|
|
int migrate_decompress_threads(void);
|
2015-07-07 14:44:05 +02:00
|
|
|
bool migrate_use_events(void);
|
2018-03-22 19:17:22 +01:00
|
|
|
bool migrate_postcopy_blocktime(void);
|
2021-01-29 11:14:03 +01:00
|
|
|
bool migrate_background_snapshot(void);
|
2015-03-23 09:32:17 +01:00
|
|
|
|
2015-11-05 19:10:47 +01:00
|
|
|
/* Sending on the return path - generic and then for each message type */
|
|
|
|
void migrate_send_rp_shut(MigrationIncomingState *mis,
|
|
|
|
uint32_t value);
|
|
|
|
void migrate_send_rp_pong(MigrationIncomingState *mis,
|
|
|
|
uint32_t value);
|
2020-09-08 22:30:18 +02:00
|
|
|
int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb,
|
2020-10-21 23:27:18 +02:00
|
|
|
ram_addr_t start, uint64_t haddr);
|
2020-10-21 23:27:17 +02:00
|
|
|
int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
|
|
|
|
RAMBlock *rb, ram_addr_t start);
|
2018-05-02 12:47:28 +02:00
|
|
|
void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
|
|
|
|
char *block_name);
|
2018-05-02 12:47:30 +02:00
|
|
|
void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
|
2015-11-05 19:10:47 +01:00
|
|
|
|
2018-03-13 20:34:01 +01:00
|
|
|
void dirty_bitmap_mig_before_vm_start(void);
|
2020-07-27 21:42:31 +02:00
|
|
|
void dirty_bitmap_mig_cancel_outgoing(void);
|
|
|
|
void dirty_bitmap_mig_cancel_incoming(void);
|
2020-08-20 17:07:23 +02:00
|
|
|
bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
|
|
|
|
Error **errp);
|
|
|
|
|
2019-02-27 11:51:27 +01:00
|
|
|
void migrate_add_address(SocketAddress *address);
|
2018-03-13 20:34:01 +01:00
|
|
|
|
2019-02-15 18:45:46 +01:00
|
|
|
int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
|
|
|
|
|
2018-06-05 18:25:45 +02:00
|
|
|
#define qemu_ram_foreach_block \
|
2019-02-15 18:45:46 +01:00
|
|
|
#warning "Use foreach_not_ignored_block in migration code"
|
2018-06-05 18:25:45 +02:00
|
|
|
|
2018-06-13 12:26:41 +02:00
|
|
|
void migration_make_urgent_request(void);
|
|
|
|
void migration_consume_urgent_request(void);
|
2019-12-05 11:29:18 +01:00
|
|
|
bool migration_rate_limit(void);
|
2021-09-29 16:43:10 +02:00
|
|
|
void migration_cancel(const Error *error);
|
2018-06-13 12:26:41 +02:00
|
|
|
|
2021-04-14 13:20:02 +02:00
|
|
|
void populate_vfio_info(MigrationInfo *info);
|
|
|
|
|
2008-10-13 05:12:02 +02:00
|
|
|
#endif
|