2015-11-05 19:10:55 +01:00
|
|
|
/*
|
|
|
|
* Postcopy migration for RAM
|
|
|
|
*
|
|
|
|
* Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Dave Gilbert <dgilbert@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Postcopy is a migration technique where the execution flips from the
|
|
|
|
* source to the destination before all the data has been copied.
|
|
|
|
*/
|
|
|
|
|
2016-01-26 19:16:54 +01:00
|
|
|
#include "qemu/osdep.h"
|
2022-02-08 21:08:52 +01:00
|
|
|
#include "qemu/madvise.h"
|
2017-04-24 20:50:19 +02:00
|
|
|
#include "exec/target_page.h"
|
2017-04-24 20:07:27 +02:00
|
|
|
#include "migration.h"
|
2017-04-20 18:52:18 +02:00
|
|
|
#include "qemu-file.h"
|
2017-04-20 14:48:46 +02:00
|
|
|
#include "savevm.h"
|
2017-04-20 13:12:24 +02:00
|
|
|
#include "postcopy-ram.h"
|
2017-04-17 20:26:27 +02:00
|
|
|
#include "ram.h"
|
2018-03-12 18:20:59 +01:00
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qemu/notify.h"
|
2019-08-12 07:23:46 +02:00
|
|
|
#include "qemu/rcu.h"
|
2015-11-05 19:10:55 +01:00
|
|
|
#include "sysemu/sysemu.h"
|
|
|
|
#include "qemu/error-report.h"
|
|
|
|
#include "trace.h"
|
2019-05-18 22:54:21 +02:00
|
|
|
#include "hw/boards.h"
|
2021-04-29 13:27:06 +02:00
|
|
|
#include "exec/ramblock.h"
|
2022-07-07 20:55:02 +02:00
|
|
|
#include "socket.h"
|
|
|
|
#include "yank_functions.h"
|
2022-07-07 20:55:18 +02:00
|
|
|
#include "tls.h"
|
2023-02-01 22:10:54 +01:00
|
|
|
#include "qemu/userfaultfd.h"
|
2023-04-19 18:17:38 +02:00
|
|
|
#include "qemu/mmap-alloc.h"
|
2015-11-05 19:10:55 +01:00
|
|
|
|
2015-11-05 19:11:02 +01:00
|
|
|
/* Arbitrary limit on size of each discard command,
|
|
|
|
* keeps them around ~200 bytes
|
|
|
|
*/
|
|
|
|
#define MAX_DISCARDS_PER_COMMAND 12
|
|
|
|
|
|
|
|
struct PostcopyDiscardState {
|
|
|
|
const char *ramblock_name;
|
|
|
|
uint16_t cur_entry;
|
|
|
|
/*
|
|
|
|
* Start and length of a discard range (bytes)
|
|
|
|
*/
|
|
|
|
uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
|
|
|
|
uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
|
|
|
|
unsigned int nsentwords;
|
|
|
|
unsigned int nsentcmds;
|
|
|
|
};
|
|
|
|
|
2018-03-12 18:20:59 +01:00
|
|
|
static NotifierWithReturnList postcopy_notifier_list;
|
|
|
|
|
|
|
|
void postcopy_infrastructure_init(void)
|
|
|
|
{
|
|
|
|
notifier_with_return_list_init(&postcopy_notifier_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
void postcopy_add_notifier(NotifierWithReturn *nn)
|
|
|
|
{
|
|
|
|
notifier_with_return_list_add(&postcopy_notifier_list, nn);
|
|
|
|
}
|
|
|
|
|
|
|
|
void postcopy_remove_notifier(NotifierWithReturn *n)
|
|
|
|
{
|
|
|
|
notifier_with_return_remove(n);
|
|
|
|
}
|
|
|
|
|
|
|
|
int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp)
|
|
|
|
{
|
|
|
|
struct PostcopyNotifyData pnd;
|
|
|
|
pnd.reason = reason;
|
|
|
|
pnd.errp = errp;
|
|
|
|
|
|
|
|
return notifier_with_return_list_notify(&postcopy_notifier_list,
|
|
|
|
&pnd);
|
|
|
|
}
|
|
|
|
|
2022-03-01 09:39:06 +01:00
|
|
|
/*
|
|
|
|
* NOTE: this routine is not thread safe, we can't call it concurrently. But it
|
|
|
|
* should be good enough for migration's purposes.
|
|
|
|
*/
|
|
|
|
void postcopy_thread_create(MigrationIncomingState *mis,
|
|
|
|
QemuThread *thread, const char *name,
|
|
|
|
void *(*fn)(void *), int joinable)
|
|
|
|
{
|
|
|
|
qemu_sem_init(&mis->thread_sync_sem, 0);
|
|
|
|
qemu_thread_create(thread, name, fn, mis, joinable);
|
|
|
|
qemu_sem_wait(&mis->thread_sync_sem);
|
|
|
|
qemu_sem_destroy(&mis->thread_sync_sem);
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:55 +01:00
|
|
|
/* Postcopy needs to detect accesses to pages that haven't yet been copied
|
|
|
|
* across, and efficiently map new pages in, the techniques for doing this
|
|
|
|
* are target OS specific.
|
|
|
|
*/
|
|
|
|
#if defined(__linux__)
|
|
|
|
|
2015-11-05 19:11:17 +01:00
|
|
|
#include <poll.h>
|
2015-11-05 19:10:55 +01:00
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/syscall.h>
|
|
|
|
#include <asm/types.h> /* for __u64 */
|
|
|
|
#endif
|
|
|
|
|
2016-02-23 17:09:15 +01:00
|
|
|
#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
|
|
|
|
#include <sys/eventfd.h>
|
2015-11-05 19:10:55 +01:00
|
|
|
#include <linux/userfaultfd.h>
|
|
|
|
|
migration: add postcopy blocktime ctx into MigrationIncomingState
This patch adds request to kernel space for UFFD_FEATURE_THREAD_ID, in
case this feature is provided by kernel.
PostcopyBlocktimeContext is encapsulated inside postcopy-ram.c,
due to it being a postcopy-only feature.
Also it defines PostcopyBlocktimeContext's instance live time.
Information from PostcopyBlocktimeContext instance will be provided
much after postcopy migration end, instance of PostcopyBlocktimeContext
will live till QEMU exit, but part of it (vcpu_addr,
page_fault_vcpu_time) used only during calculation, will be released
when postcopy ended or failed.
To enable postcopy blocktime calculation on destination, need to
request proper compatibility (Patch for documentation will be at the
tail of the patch set).
As an example following command enable that capability, assume QEMU was
started with
-chardev socket,id=charmonitor,path=/var/lib/migrate-vm-monitor.sock
option to control it
[root@host]#printf "{\"execute\" : \"qmp_capabilities\"}\r\n \
{\"execute\": \"migrate-set-capabilities\" , \"arguments\": {
\"capabilities\": [ { \"capability\": \"postcopy-blocktime\", \"state\":
true } ] } }" | nc -U /var/lib/migrate-vm-monitor.sock
Or just with HMP
(qemu) migrate_set_capability postcopy-blocktime on
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1521742647-25550-3-git-send-email-a.perevalov@samsung.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2018-03-22 19:17:23 +01:00
|
|
|
typedef struct PostcopyBlocktimeContext {
|
|
|
|
/* time when page fault initiated per vCPU */
|
|
|
|
uint32_t *page_fault_vcpu_time;
|
|
|
|
/* page address per vCPU */
|
|
|
|
uintptr_t *vcpu_addr;
|
|
|
|
uint32_t total_blocktime;
|
|
|
|
/* blocktime per vCPU */
|
|
|
|
uint32_t *vcpu_blocktime;
|
|
|
|
/* point in time when last page fault was initiated */
|
|
|
|
uint32_t last_begin;
|
|
|
|
/* number of vCPU are suspended */
|
|
|
|
int smp_cpus_down;
|
|
|
|
uint64_t start_time;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handler for exit event, necessary for
|
|
|
|
* releasing whole blocktime_ctx
|
|
|
|
*/
|
|
|
|
Notifier exit_notifier;
|
|
|
|
} PostcopyBlocktimeContext;
|
|
|
|
|
|
|
|
static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
|
|
|
|
{
|
|
|
|
g_free(ctx->page_fault_vcpu_time);
|
|
|
|
g_free(ctx->vcpu_addr);
|
|
|
|
g_free(ctx->vcpu_blocktime);
|
|
|
|
g_free(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void migration_exit_cb(Notifier *n, void *data)
|
|
|
|
{
|
|
|
|
PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext,
|
|
|
|
exit_notifier);
|
|
|
|
destroy_blocktime_context(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct PostcopyBlocktimeContext *blocktime_context_new(void)
|
|
|
|
{
|
2019-05-18 22:54:21 +02:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
|
|
|
unsigned int smp_cpus = ms->smp.cpus;
|
migration: add postcopy blocktime ctx into MigrationIncomingState
This patch adds request to kernel space for UFFD_FEATURE_THREAD_ID, in
case this feature is provided by kernel.
PostcopyBlocktimeContext is encapsulated inside postcopy-ram.c,
due to it being a postcopy-only feature.
Also it defines PostcopyBlocktimeContext's instance live time.
Information from PostcopyBlocktimeContext instance will be provided
much after postcopy migration end, instance of PostcopyBlocktimeContext
will live till QEMU exit, but part of it (vcpu_addr,
page_fault_vcpu_time) used only during calculation, will be released
when postcopy ended or failed.
To enable postcopy blocktime calculation on destination, need to
request proper compatibility (Patch for documentation will be at the
tail of the patch set).
As an example following command enable that capability, assume QEMU was
started with
-chardev socket,id=charmonitor,path=/var/lib/migrate-vm-monitor.sock
option to control it
[root@host]#printf "{\"execute\" : \"qmp_capabilities\"}\r\n \
{\"execute\": \"migrate-set-capabilities\" , \"arguments\": {
\"capabilities\": [ { \"capability\": \"postcopy-blocktime\", \"state\":
true } ] } }" | nc -U /var/lib/migrate-vm-monitor.sock
Or just with HMP
(qemu) migrate_set_capability postcopy-blocktime on
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1521742647-25550-3-git-send-email-a.perevalov@samsung.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2018-03-22 19:17:23 +01:00
|
|
|
PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
|
|
|
|
ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
|
|
|
|
ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
|
|
|
|
ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus);
|
|
|
|
|
|
|
|
ctx->exit_notifier.notify = migration_exit_cb;
|
|
|
|
ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
qemu_add_exit_notifier(&ctx->exit_notifier);
|
|
|
|
return ctx;
|
|
|
|
}
|
2017-10-30 14:16:30 +01:00
|
|
|
|
2018-03-22 19:17:27 +01:00
|
|
|
static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
|
|
|
|
{
|
2019-05-18 22:54:21 +02:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
2020-11-13 02:13:37 +01:00
|
|
|
uint32List *list = NULL;
|
2018-03-22 19:17:27 +01:00
|
|
|
int i;
|
|
|
|
|
2019-05-18 22:54:21 +02:00
|
|
|
for (i = ms->smp.cpus - 1; i >= 0; i--) {
|
2020-11-13 02:13:37 +01:00
|
|
|
QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]);
|
2018-03-22 19:17:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function just populates MigrationInfo from postcopy's
|
|
|
|
* blocktime context. It will not populate MigrationInfo,
|
|
|
|
* unless postcopy-blocktime capability was set.
|
|
|
|
*
|
|
|
|
* @info: pointer to MigrationInfo to populate
|
|
|
|
*/
|
|
|
|
void fill_destination_postcopy_migration_info(MigrationInfo *info)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
|
|
|
|
|
|
|
|
if (!bc) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->has_postcopy_blocktime = true;
|
|
|
|
info->postcopy_blocktime = bc->total_blocktime;
|
|
|
|
info->has_postcopy_vcpu_blocktime = true;
|
|
|
|
info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t get_postcopy_total_blocktime(void)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
|
|
|
|
|
|
|
|
if (!bc) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return bc->total_blocktime;
|
|
|
|
}
|
|
|
|
|
2017-09-19 18:47:58 +02:00
|
|
|
/**
|
|
|
|
* receive_ufd_features: check userfault fd features, to request only supported
|
|
|
|
* features in the future.
|
|
|
|
*
|
|
|
|
* Returns: true on success
|
|
|
|
*
|
|
|
|
* __NR_userfaultfd - should be checked before
|
|
|
|
* @features: out parameter will contain uffdio_api.features provided by kernel
|
|
|
|
* in case of success
|
|
|
|
*/
|
|
|
|
static bool receive_ufd_features(uint64_t *features)
|
2015-11-05 19:10:55 +01:00
|
|
|
{
|
2017-09-19 18:47:58 +02:00
|
|
|
struct uffdio_api api_struct = {0};
|
|
|
|
int ufd;
|
|
|
|
bool ret = true;
|
|
|
|
|
2023-02-01 22:10:54 +01:00
|
|
|
ufd = uffd_open(O_CLOEXEC);
|
2017-09-19 18:47:58 +02:00
|
|
|
if (ufd == -1) {
|
2023-02-01 22:10:54 +01:00
|
|
|
error_report("%s: uffd_open() failed: %s", __func__, strerror(errno));
|
2017-09-19 18:47:58 +02:00
|
|
|
return false;
|
|
|
|
}
|
2015-11-05 19:10:55 +01:00
|
|
|
|
2017-09-19 18:47:58 +02:00
|
|
|
/* ask features */
|
2015-11-05 19:10:55 +01:00
|
|
|
api_struct.api = UFFD_API;
|
|
|
|
api_struct.features = 0;
|
|
|
|
if (ioctl(ufd, UFFDIO_API, &api_struct)) {
|
2017-09-19 18:47:57 +02:00
|
|
|
error_report("%s: UFFDIO_API failed: %s", __func__,
|
2015-11-05 19:10:55 +01:00
|
|
|
strerror(errno));
|
2017-09-19 18:47:58 +02:00
|
|
|
ret = false;
|
|
|
|
goto release_ufd;
|
|
|
|
}
|
|
|
|
|
|
|
|
*features = api_struct.features;
|
|
|
|
|
|
|
|
release_ufd:
|
|
|
|
close(ufd);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* request_ufd_features: this function should be called only once on a newly
|
|
|
|
* opened ufd, subsequent calls will lead to error.
|
|
|
|
*
|
2020-09-17 09:50:21 +02:00
|
|
|
* Returns: true on success
|
2017-09-19 18:47:58 +02:00
|
|
|
*
|
|
|
|
* @ufd: fd obtained from userfaultfd syscall
|
|
|
|
* @features: bit mask see UFFD_API_FEATURES
|
|
|
|
*/
|
|
|
|
static bool request_ufd_features(int ufd, uint64_t features)
|
|
|
|
{
|
|
|
|
struct uffdio_api api_struct = {0};
|
|
|
|
uint64_t ioctl_mask;
|
|
|
|
|
|
|
|
api_struct.api = UFFD_API;
|
|
|
|
api_struct.features = features;
|
|
|
|
if (ioctl(ufd, UFFDIO_API, &api_struct)) {
|
|
|
|
error_report("%s failed: UFFDIO_API failed: %s", __func__,
|
|
|
|
strerror(errno));
|
2015-11-05 19:10:55 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
|
|
|
|
(__u64)1 << _UFFDIO_UNREGISTER;
|
|
|
|
if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
|
|
|
|
error_report("Missing userfault features: %" PRIx64,
|
|
|
|
(uint64_t)(~api_struct.ioctls & ioctl_mask));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-19 18:47:58 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
|
|
|
|
{
|
|
|
|
uint64_t asked_features = 0;
|
|
|
|
static uint64_t supported_features;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* it's not possible to
|
|
|
|
* request UFFD_API twice per one fd
|
|
|
|
* userfault fd features is persistent
|
|
|
|
*/
|
|
|
|
if (!supported_features) {
|
|
|
|
if (!receive_ufd_features(&supported_features)) {
|
|
|
|
error_report("%s failed", __func__);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
migration: add postcopy blocktime ctx into MigrationIncomingState
This patch adds request to kernel space for UFFD_FEATURE_THREAD_ID, in
case this feature is provided by kernel.
PostcopyBlocktimeContext is encapsulated inside postcopy-ram.c,
due to it being a postcopy-only feature.
Also it defines PostcopyBlocktimeContext's instance live time.
Information from PostcopyBlocktimeContext instance will be provided
much after postcopy migration end, instance of PostcopyBlocktimeContext
will live till QEMU exit, but part of it (vcpu_addr,
page_fault_vcpu_time) used only during calculation, will be released
when postcopy ended or failed.
To enable postcopy blocktime calculation on destination, need to
request proper compatibility (Patch for documentation will be at the
tail of the patch set).
As an example following command enable that capability, assume QEMU was
started with
-chardev socket,id=charmonitor,path=/var/lib/migrate-vm-monitor.sock
option to control it
[root@host]#printf "{\"execute\" : \"qmp_capabilities\"}\r\n \
{\"execute\": \"migrate-set-capabilities\" , \"arguments\": {
\"capabilities\": [ { \"capability\": \"postcopy-blocktime\", \"state\":
true } ] } }" | nc -U /var/lib/migrate-vm-monitor.sock
Or just with HMP
(qemu) migrate_set_capability postcopy-blocktime on
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1521742647-25550-3-git-send-email-a.perevalov@samsung.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2018-03-22 19:17:23 +01:00
|
|
|
#ifdef UFFD_FEATURE_THREAD_ID
|
2022-01-19 09:09:17 +01:00
|
|
|
if (UFFD_FEATURE_THREAD_ID & supported_features) {
|
migration: add postcopy blocktime ctx into MigrationIncomingState
This patch adds request to kernel space for UFFD_FEATURE_THREAD_ID, in
case this feature is provided by kernel.
PostcopyBlocktimeContext is encapsulated inside postcopy-ram.c,
due to it being a postcopy-only feature.
Also it defines PostcopyBlocktimeContext's instance live time.
Information from PostcopyBlocktimeContext instance will be provided
much after postcopy migration end, instance of PostcopyBlocktimeContext
will live till QEMU exit, but part of it (vcpu_addr,
page_fault_vcpu_time) used only during calculation, will be released
when postcopy ended or failed.
To enable postcopy blocktime calculation on destination, need to
request proper compatibility (Patch for documentation will be at the
tail of the patch set).
As an example following command enable that capability, assume QEMU was
started with
-chardev socket,id=charmonitor,path=/var/lib/migrate-vm-monitor.sock
option to control it
[root@host]#printf "{\"execute\" : \"qmp_capabilities\"}\r\n \
{\"execute\": \"migrate-set-capabilities\" , \"arguments\": {
\"capabilities\": [ { \"capability\": \"postcopy-blocktime\", \"state\":
true } ] } }" | nc -U /var/lib/migrate-vm-monitor.sock
Or just with HMP
(qemu) migrate_set_capability postcopy-blocktime on
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1521742647-25550-3-git-send-email-a.perevalov@samsung.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2018-03-22 19:17:23 +01:00
|
|
|
asked_features |= UFFD_FEATURE_THREAD_ID;
|
2022-01-19 09:09:17 +01:00
|
|
|
if (migrate_postcopy_blocktime()) {
|
|
|
|
if (!mis->blocktime_ctx) {
|
|
|
|
mis->blocktime_ctx = blocktime_context_new();
|
|
|
|
}
|
|
|
|
}
|
migration: add postcopy blocktime ctx into MigrationIncomingState
This patch adds request to kernel space for UFFD_FEATURE_THREAD_ID, in
case this feature is provided by kernel.
PostcopyBlocktimeContext is encapsulated inside postcopy-ram.c,
due to it being a postcopy-only feature.
Also it defines PostcopyBlocktimeContext's instance live time.
Information from PostcopyBlocktimeContext instance will be provided
much after postcopy migration end, instance of PostcopyBlocktimeContext
will live till QEMU exit, but part of it (vcpu_addr,
page_fault_vcpu_time) used only during calculation, will be released
when postcopy ended or failed.
To enable postcopy blocktime calculation on destination, need to
request proper compatibility (Patch for documentation will be at the
tail of the patch set).
As an example following command enable that capability, assume QEMU was
started with
-chardev socket,id=charmonitor,path=/var/lib/migrate-vm-monitor.sock
option to control it
[root@host]#printf "{\"execute\" : \"qmp_capabilities\"}\r\n \
{\"execute\": \"migrate-set-capabilities\" , \"arguments\": {
\"capabilities\": [ { \"capability\": \"postcopy-blocktime\", \"state\":
true } ] } }" | nc -U /var/lib/migrate-vm-monitor.sock
Or just with HMP
(qemu) migrate_set_capability postcopy-blocktime on
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Alexey Perevalov <a.perevalov@samsung.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1521742647-25550-3-git-send-email-a.perevalov@samsung.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2018-03-22 19:17:23 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-09-19 18:47:58 +02:00
|
|
|
/*
|
|
|
|
* request features, even if asked_features is 0, due to
|
|
|
|
* kernel expects UFFD_API before UFFDIO_REGISTER, per
|
|
|
|
* userfault file descriptor
|
|
|
|
*/
|
|
|
|
if (!request_ufd_features(ufd, asked_features)) {
|
|
|
|
error_report("%s failed: features %" PRIu64, __func__,
|
|
|
|
asked_features);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-03-23 16:57:22 +01:00
|
|
|
if (qemu_real_host_page_size() != ram_pagesize_summary()) {
|
2017-02-24 19:28:42 +01:00
|
|
|
bool have_hp = false;
|
|
|
|
/* We've got a huge page */
|
|
|
|
#ifdef UFFD_FEATURE_MISSING_HUGETLBFS
|
2017-09-19 18:47:58 +02:00
|
|
|
have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS;
|
2017-02-24 19:28:42 +01:00
|
|
|
#endif
|
|
|
|
if (!have_hp) {
|
|
|
|
error_report("Userfault on this host does not support huge pages");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2015-11-05 19:10:55 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-03-07 19:36:37 +01:00
|
|
|
/* Callback from postcopy_ram_supported_by_host block iterator.
|
|
|
|
*/
|
2023-04-19 18:17:38 +02:00
|
|
|
static int test_ramblock_postcopiable(RAMBlock *rb)
|
2017-03-07 19:36:37 +01:00
|
|
|
{
|
2019-02-15 18:45:44 +01:00
|
|
|
const char *block_name = qemu_ram_get_idstr(rb);
|
|
|
|
ram_addr_t length = qemu_ram_get_used_length(rb);
|
2017-05-17 18:58:10 +02:00
|
|
|
size_t pagesize = qemu_ram_pagesize(rb);
|
2023-04-19 18:17:38 +02:00
|
|
|
QemuFsType fs;
|
2017-05-17 18:58:10 +02:00
|
|
|
|
|
|
|
if (length % pagesize) {
|
|
|
|
error_report("Postcopy requires RAM blocks to be a page size multiple,"
|
|
|
|
" block %s is 0x" RAM_ADDR_FMT " bytes with a "
|
|
|
|
"page size of 0x%zx", block_name, length, pagesize);
|
|
|
|
return 1;
|
|
|
|
}
|
2023-04-19 18:17:38 +02:00
|
|
|
|
|
|
|
if (rb->fd >= 0) {
|
|
|
|
fs = qemu_fd_getfs(rb->fd);
|
|
|
|
if (fs != QEMU_FS_TYPE_TMPFS && fs != QEMU_FS_TYPE_HUGETLBFS) {
|
|
|
|
error_report("Host backend files need to be TMPFS or HUGETLBFS only");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-07 19:36:37 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:22 +01:00
|
|
|
/*
|
|
|
|
* Note: This has the side effect of munlock'ing all of RAM, that's
|
|
|
|
* normally fine since if the postcopy succeeds it gets turned back on at the
|
|
|
|
* end.
|
|
|
|
*/
|
2017-09-19 18:47:56 +02:00
|
|
|
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
|
2015-11-05 19:10:55 +01:00
|
|
|
{
|
2022-03-23 16:57:22 +01:00
|
|
|
long pagesize = qemu_real_host_page_size();
|
2015-11-05 19:10:55 +01:00
|
|
|
int ufd = -1;
|
|
|
|
bool ret = false; /* Error unless we change it */
|
|
|
|
void *testarea = NULL;
|
|
|
|
struct uffdio_register reg_struct;
|
|
|
|
struct uffdio_range range_struct;
|
|
|
|
uint64_t feature_mask;
|
2018-03-12 18:20:59 +01:00
|
|
|
Error *local_err = NULL;
|
2023-04-19 18:17:38 +02:00
|
|
|
RAMBlock *block;
|
2015-11-05 19:10:55 +01:00
|
|
|
|
2017-03-21 09:09:14 +01:00
|
|
|
if (qemu_target_page_size() > pagesize) {
|
2015-11-05 19:10:55 +01:00
|
|
|
error_report("Target page size bigger than host page size");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-02-01 22:10:54 +01:00
|
|
|
ufd = uffd_open(O_CLOEXEC);
|
2015-11-05 19:10:55 +01:00
|
|
|
if (ufd == -1) {
|
|
|
|
error_report("%s: userfaultfd not available: %s", __func__,
|
|
|
|
strerror(errno));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:20:59 +01:00
|
|
|
/* Give devices a chance to object */
|
|
|
|
if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, &local_err)) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:55 +01:00
|
|
|
/* Version and features check */
|
2017-09-19 18:47:58 +02:00
|
|
|
if (!ufd_check_and_apply(ufd, mis)) {
|
2015-11-05 19:10:55 +01:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-04-19 18:17:38 +02:00
|
|
|
/*
|
|
|
|
* We don't support postcopy with some type of ramblocks.
|
|
|
|
*
|
|
|
|
* NOTE: we explicitly ignored ramblock_is_ignored() instead we checked
|
|
|
|
* all possible ramblocks. This is because this function can be called
|
|
|
|
* when creating the migration object, during the phase RAM_MIGRATABLE
|
|
|
|
* is not even properly set for all the ramblocks.
|
|
|
|
*
|
|
|
|
* A side effect of this is we'll also check against RAM_SHARED
|
|
|
|
* ramblocks even if migrate_ignore_shared() is set (in which case
|
|
|
|
* we'll never migrate RAM_SHARED at all), but normally this shouldn't
|
|
|
|
* affect in reality, or we can revisit.
|
|
|
|
*/
|
|
|
|
RAMBLOCK_FOREACH(block) {
|
|
|
|
if (test_ramblock_postcopiable(block)) {
|
|
|
|
goto out;
|
|
|
|
}
|
2017-03-07 19:36:37 +01:00
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:22 +01:00
|
|
|
/*
|
|
|
|
* userfault and mlock don't go together; we'll put it back later if
|
|
|
|
* it was enabled.
|
|
|
|
*/
|
|
|
|
if (munlockall()) {
|
|
|
|
error_report("%s: munlockall: %s", __func__, strerror(errno));
|
2020-07-01 11:35:57 +02:00
|
|
|
goto out;
|
2015-11-05 19:11:22 +01:00
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:55 +01:00
|
|
|
/*
|
|
|
|
* We need to check that the ops we need are supported on anon memory
|
|
|
|
* To do that we need to register a chunk and see the flags that
|
|
|
|
* are returned.
|
|
|
|
*/
|
|
|
|
testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
|
|
|
|
MAP_ANONYMOUS, -1, 0);
|
|
|
|
if (testarea == MAP_FAILED) {
|
|
|
|
error_report("%s: Failed to map test area: %s", __func__,
|
|
|
|
strerror(errno));
|
|
|
|
goto out;
|
|
|
|
}
|
2021-10-11 19:53:44 +02:00
|
|
|
g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize));
|
2015-11-05 19:10:55 +01:00
|
|
|
|
|
|
|
reg_struct.range.start = (uintptr_t)testarea;
|
|
|
|
reg_struct.range.len = pagesize;
|
|
|
|
reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
|
|
|
|
|
|
|
|
if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) {
|
|
|
|
error_report("%s userfault register: %s", __func__, strerror(errno));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
range_struct.start = (uintptr_t)testarea;
|
|
|
|
range_struct.len = pagesize;
|
|
|
|
if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
|
|
|
|
error_report("%s userfault unregister: %s", __func__, strerror(errno));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
feature_mask = (__u64)1 << _UFFDIO_WAKE |
|
|
|
|
(__u64)1 << _UFFDIO_COPY |
|
|
|
|
(__u64)1 << _UFFDIO_ZEROPAGE;
|
|
|
|
if ((reg_struct.ioctls & feature_mask) != feature_mask) {
|
|
|
|
error_report("Missing userfault map features: %" PRIx64,
|
|
|
|
(uint64_t)(~reg_struct.ioctls & feature_mask));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Success! */
|
|
|
|
ret = true;
|
|
|
|
out:
|
|
|
|
if (testarea) {
|
|
|
|
munmap(testarea, pagesize);
|
|
|
|
}
|
|
|
|
if (ufd != -1) {
|
|
|
|
close(ufd);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:03 +01:00
|
|
|
/*
|
|
|
|
* Setup an area of RAM so that it *can* be used for postcopy later; this
|
|
|
|
* must be done right at the start prior to pre-copy.
|
|
|
|
* opaque should be the MIS.
|
|
|
|
*/
|
2019-02-15 18:45:44 +01:00
|
|
|
static int init_range(RAMBlock *rb, void *opaque)
|
2015-11-05 19:11:03 +01:00
|
|
|
{
|
2019-02-15 18:45:44 +01:00
|
|
|
const char *block_name = qemu_ram_get_idstr(rb);
|
|
|
|
void *host_addr = qemu_ram_get_host_addr(rb);
|
|
|
|
ram_addr_t offset = qemu_ram_get_offset(rb);
|
|
|
|
ram_addr_t length = qemu_ram_get_used_length(rb);
|
2015-11-05 19:11:03 +01:00
|
|
|
trace_postcopy_init_range(block_name, host_addr, offset, length);
|
|
|
|
|
2021-04-29 13:27:06 +02:00
|
|
|
/*
|
|
|
|
* Save the used_length before running the guest. In case we have to
|
|
|
|
* resize RAM blocks when syncing RAM block sizes from the source during
|
|
|
|
* precopy, we'll update it manually via the ram block notifier.
|
|
|
|
*/
|
|
|
|
rb->postcopy_length = length;
|
|
|
|
|
2015-11-05 19:11:03 +01:00
|
|
|
/*
|
|
|
|
* We need the whole of RAM to be truly empty for postcopy, so things
|
|
|
|
* like ROMs and any data tables built during init must be zero'd
|
|
|
|
* - we're going to get the copy from the source anyway.
|
|
|
|
* (Precopy will just overwrite this data, so doesn't need the discard)
|
|
|
|
*/
|
2017-03-21 11:35:24 +01:00
|
|
|
if (ram_discard_range(block_name, 0, length)) {
|
2015-11-05 19:11:03 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At the end of migration, undo the effects of init_range
|
|
|
|
* opaque should be the MIS.
|
|
|
|
*/
|
2019-02-15 18:45:44 +01:00
|
|
|
static int cleanup_range(RAMBlock *rb, void *opaque)
|
2015-11-05 19:11:03 +01:00
|
|
|
{
|
2019-02-15 18:45:44 +01:00
|
|
|
const char *block_name = qemu_ram_get_idstr(rb);
|
|
|
|
void *host_addr = qemu_ram_get_host_addr(rb);
|
|
|
|
ram_addr_t offset = qemu_ram_get_offset(rb);
|
2021-04-29 13:27:06 +02:00
|
|
|
ram_addr_t length = rb->postcopy_length;
|
2015-11-05 19:11:03 +01:00
|
|
|
MigrationIncomingState *mis = opaque;
|
|
|
|
struct uffdio_range range_struct;
|
|
|
|
trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We turned off hugepage for the precopy stage with postcopy enabled
|
|
|
|
* we can turn it back on now.
|
|
|
|
*/
|
2015-11-19 16:27:48 +01:00
|
|
|
qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
|
2015-11-05 19:11:03 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We can also turn off userfault now since we should have all the
|
|
|
|
* pages. It can be useful to leave it on to debug postcopy
|
|
|
|
* if you're not sure it's always getting every page.
|
|
|
|
*/
|
|
|
|
range_struct.start = (uintptr_t)host_addr;
|
|
|
|
range_struct.len = length;
|
|
|
|
|
|
|
|
if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
|
|
|
|
error_report("%s: userfault unregister %s", __func__, strerror(errno));
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialise postcopy-ram, setting the RAM to a state where we can go into
|
|
|
|
* postcopy later; must be called prior to any precopy.
|
|
|
|
* called from arch_init's similarly named ram_postcopy_incoming_init
|
|
|
|
*/
|
2018-06-20 22:27:36 +02:00
|
|
|
int postcopy_ram_incoming_init(MigrationIncomingState *mis)
|
2015-11-05 19:11:03 +01:00
|
|
|
{
|
2019-02-15 18:45:46 +01:00
|
|
|
if (foreach_not_ignored_block(init_range, NULL)) {
|
2015-11-05 19:11:03 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-01-19 09:09:20 +01:00
|
|
|
static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis)
|
|
|
|
{
|
migration: Introduce postcopy channels on dest node
Postcopy handles huge pages in a special way that currently we can only have
one "channel" to transfer the page.
It's because when we install pages using UFFDIO_COPY, we need to have the whole
huge page ready, it also means we need to have a temp huge page when trying to
receive the whole content of the page.
Currently all maintainance around this tmp page is global: firstly we'll
allocate a temp huge page, then we maintain its status mostly within
ram_load_postcopy().
To enable multiple channels for postcopy, the first thing we need to do is to
prepare N temp huge pages as caching, one for each channel.
Meanwhile we need to maintain the tmp huge page status per-channel too.
To give some example, some local variables maintained in ram_load_postcopy()
are listed; they are responsible for maintaining temp huge page status:
- all_zero: this keeps whether this huge page contains all zeros
- target_pages: this counts how many target pages have been copied
- host_page: this keeps the host ptr for the page to install
Move all these fields to be together with the temp huge pages to form a new
structure called PostcopyTmpPage. Then for each (future) postcopy channel, we
need one structure to keep the state around.
For vanilla postcopy, obviously there's only one channel. It contains both
precopy and postcopy pages.
This patch teaches the dest migration node to start realize the possible number
of postcopy channels by introducing the "postcopy_channels" variable. Its
value is calculated when setup postcopy on dest node (during POSTCOPY_LISTEN
phase).
Vanilla postcopy will have channels=1, but when postcopy-preempt capability is
enabled (in the future), we will boost it to 2 because even during partial
sending of a precopy huge page we still want to preempt it and start sending
the postcopy requested page right away (so we start to keep two temp huge
pages; more if we want to enable multifd). In this patch there's a TODO marked
for that; so far the channels is always set to 1.
We need to send one "host huge page" on one channel only and we cannot split
them, because otherwise the data upon the same huge page can locate on more
than one channel so we need more complicated logic to manage. One temp host
huge page for each channel will be enough for us for now.
Postcopy will still always use the index=0 huge page even after this patch.
However it prepares for the latter patches where it can start to use multiple
channels (which needs src intervention, because only src knows which channel we
should use).
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220301083925.33483-5-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: Fixed up long line
2022-03-01 09:39:04 +01:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (mis->postcopy_tmp_pages) {
|
|
|
|
for (i = 0; i < mis->postcopy_channels; i++) {
|
|
|
|
if (mis->postcopy_tmp_pages[i].tmp_huge_page) {
|
|
|
|
munmap(mis->postcopy_tmp_pages[i].tmp_huge_page,
|
|
|
|
mis->largest_page_size);
|
|
|
|
mis->postcopy_tmp_pages[i].tmp_huge_page = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
g_free(mis->postcopy_tmp_pages);
|
|
|
|
mis->postcopy_tmp_pages = NULL;
|
2022-01-19 09:09:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mis->postcopy_tmp_zero_page) {
|
|
|
|
munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
|
|
|
|
mis->postcopy_tmp_zero_page = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:03 +01:00
|
|
|
/*
|
|
|
|
* At the end of a migration where postcopy_ram_incoming_init was called.
|
|
|
|
*/
|
|
|
|
int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
|
|
|
|
{
|
2015-11-05 19:11:17 +01:00
|
|
|
trace_postcopy_ram_incoming_cleanup_entry();
|
|
|
|
|
2023-03-26 19:25:39 +02:00
|
|
|
if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) {
|
|
|
|
/* Notify the fast load thread to quit */
|
|
|
|
mis->preempt_thread_status = PREEMPT_THREAD_QUIT;
|
|
|
|
if (mis->postcopy_qemufile_dst) {
|
|
|
|
qemu_file_shutdown(mis->postcopy_qemufile_dst);
|
|
|
|
}
|
2022-07-07 20:55:02 +02:00
|
|
|
qemu_thread_join(&mis->postcopy_prio_thread);
|
2023-03-26 19:25:39 +02:00
|
|
|
mis->preempt_thread_status = PREEMPT_THREAD_NONE;
|
2022-07-07 20:55:02 +02:00
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:17 +01:00
|
|
|
if (mis->have_fault_thread) {
|
2018-03-12 18:21:20 +01:00
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2018-10-08 18:05:35 +02:00
|
|
|
/* Let the fault thread quit */
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_set(&mis->fault_thread_quit, 1);
|
2018-10-08 18:05:35 +02:00
|
|
|
postcopy_fault_thread_notify(mis);
|
|
|
|
trace_postcopy_ram_incoming_cleanup_join();
|
|
|
|
qemu_thread_join(&mis->fault_thread);
|
|
|
|
|
2018-03-12 18:21:20 +01:00
|
|
|
if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-02-15 18:45:46 +01:00
|
|
|
if (foreach_not_ignored_block(cleanup_range, mis)) {
|
2015-11-05 19:11:17 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2018-02-08 11:31:07 +01:00
|
|
|
|
2015-11-05 19:11:17 +01:00
|
|
|
trace_postcopy_ram_incoming_cleanup_closeuf();
|
|
|
|
close(mis->userfault_fd);
|
2018-02-08 11:31:06 +01:00
|
|
|
close(mis->userfault_event_fd);
|
2015-11-05 19:11:17 +01:00
|
|
|
mis->have_fault_thread = false;
|
2015-11-05 19:11:03 +01:00
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:22 +01:00
|
|
|
if (enable_mlock) {
|
|
|
|
if (os_mlock() < 0) {
|
|
|
|
error_report("mlock: %s", strerror(errno));
|
|
|
|
/*
|
|
|
|
* It doesn't feel right to fail at this point, we have a valid
|
|
|
|
* VM state.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-19 09:09:20 +01:00
|
|
|
postcopy_temp_pages_cleanup(mis);
|
|
|
|
|
2018-03-22 19:17:27 +01:00
|
|
|
trace_postcopy_ram_incoming_cleanup_blocktime(
|
|
|
|
get_postcopy_total_blocktime());
|
|
|
|
|
2015-11-05 19:11:17 +01:00
|
|
|
trace_postcopy_ram_incoming_cleanup_exit();
|
2015-11-05 19:11:03 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:20 +01:00
|
|
|
/*
|
|
|
|
* Disable huge pages on an area
|
|
|
|
*/
|
2019-02-15 18:45:44 +01:00
|
|
|
static int nhp_range(RAMBlock *rb, void *opaque)
|
2015-11-05 19:11:20 +01:00
|
|
|
{
|
2019-02-15 18:45:44 +01:00
|
|
|
const char *block_name = qemu_ram_get_idstr(rb);
|
|
|
|
void *host_addr = qemu_ram_get_host_addr(rb);
|
|
|
|
ram_addr_t offset = qemu_ram_get_offset(rb);
|
2021-04-29 13:27:06 +02:00
|
|
|
ram_addr_t length = rb->postcopy_length;
|
2015-11-05 19:11:20 +01:00
|
|
|
trace_postcopy_nhp_range(block_name, host_addr, offset, length);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Before we do discards we need to ensure those discards really
|
|
|
|
* do delete areas of the page, even if THP thinks a hugepage would
|
|
|
|
* be a good idea, so force hugepages off.
|
|
|
|
*/
|
2015-11-19 16:27:48 +01:00
|
|
|
qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
|
2015-11-05 19:11:20 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
|
|
|
|
* however leaving it until after precopy means that most of the precopy
|
|
|
|
* data is still THPd
|
|
|
|
*/
|
|
|
|
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
|
|
|
|
{
|
2019-02-15 18:45:46 +01:00
|
|
|
if (foreach_not_ignored_block(nhp_range, mis)) {
|
2015-11-05 19:11:20 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:04 +01:00
|
|
|
/*
|
|
|
|
* Mark the given area of RAM as requiring notification to unwritten areas
|
2019-02-15 18:45:46 +01:00
|
|
|
* Used as a callback on foreach_not_ignored_block.
|
2015-11-05 19:11:04 +01:00
|
|
|
* host_addr: Base of area to mark
|
|
|
|
* offset: Offset in the whole ram arena
|
|
|
|
* length: Length of the section
|
|
|
|
* opaque: MigrationIncomingState pointer
|
|
|
|
* Returns 0 on success
|
|
|
|
*/
|
2019-02-15 18:45:44 +01:00
|
|
|
static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
|
2015-11-05 19:11:04 +01:00
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = opaque;
|
|
|
|
struct uffdio_register reg_struct;
|
|
|
|
|
2019-02-15 18:45:44 +01:00
|
|
|
reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
|
2021-04-29 13:27:06 +02:00
|
|
|
reg_struct.range.len = rb->postcopy_length;
|
2015-11-05 19:11:04 +01:00
|
|
|
reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
|
|
|
|
|
|
|
|
/* Now tell our userfault_fd that it's responsible for this area */
|
|
|
|
if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) {
|
|
|
|
error_report("%s userfault register: %s", __func__, strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
2017-02-24 19:28:44 +01:00
|
|
|
if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
|
|
|
|
error_report("%s userfault: Region doesn't support COPY", __func__);
|
|
|
|
return -1;
|
|
|
|
}
|
2018-03-12 18:20:58 +01:00
|
|
|
if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) {
|
|
|
|
qemu_ram_set_uf_zeroable(rb);
|
|
|
|
}
|
2015-11-05 19:11:04 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:14 +01:00
|
|
|
int postcopy_wake_shared(struct PostCopyFD *pcfd,
|
|
|
|
uint64_t client_addr,
|
|
|
|
RAMBlock *rb)
|
|
|
|
{
|
|
|
|
size_t pagesize = qemu_ram_pagesize(rb);
|
|
|
|
struct uffdio_range range;
|
|
|
|
int ret;
|
|
|
|
trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
|
2021-10-11 19:53:44 +02:00
|
|
|
range.start = ROUND_DOWN(client_addr, pagesize);
|
2018-03-12 18:21:14 +01:00
|
|
|
range.len = pagesize;
|
|
|
|
ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
|
|
|
|
if (ret) {
|
|
|
|
error_report("%s: Failed to wake: %zx in %s (%s)",
|
|
|
|
__func__, (size_t)client_addr, qemu_ram_get_idstr(rb),
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
migration/postcopy: Handle RAMBlocks with a RamDiscardManager on the destination
Currently, when someone (i.e., the VM) accesses discarded parts inside a
RAMBlock with a RamDiscardManager managing the corresponding mapped memory
region, postcopy will request migration of the corresponding page from the
source. The source, however, will never answer, because it refuses to
migrate such pages with undefined content ("logically unplugged"): the
pages are never dirty, and get_queued_page() will consequently skip
processing these postcopy requests.
Especially reading discarded ("logically unplugged") ranges is supposed to
work in some setups (for example with current virtio-mem), although it
barely ever happens: still, not placing a page would currently stall the
VM, as it cannot make forward progress.
Let's check the state via the RamDiscardManager (the state e.g.,
of virtio-mem is migrated during precopy) and avoid sending a request
that will never get answered. Place a fresh zero page instead to keep
the VM working. This is the same behavior that would happen
automatically without userfaultfd being active, when accessing virtual
memory regions without populated pages -- "populate on demand".
For now, there are valid cases (as documented in the virtio-mem spec) where
a VM might read discarded memory; in the future, we will disallow that.
Then, we might want to handle that case differently, e.g., warning the
user that the VM seems to be mis-behaving.
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2021-10-11 19:53:43 +02:00
|
|
|
static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
|
|
|
|
ram_addr_t start, uint64_t haddr)
|
|
|
|
{
|
|
|
|
void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Discarded pages (via RamDiscardManager) are never migrated. On unlikely
|
|
|
|
* access, place a zeropage, which will also set the relevant bits in the
|
|
|
|
* recv_bitmap accordingly, so we won't try placing a zeropage twice.
|
|
|
|
*
|
|
|
|
* Checking a single bit is sufficient to handle pagesize > TPS as either
|
|
|
|
* all relevant bits are set or not.
|
|
|
|
*/
|
|
|
|
assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb)));
|
|
|
|
if (ramblock_page_is_discarded(rb, start)) {
|
|
|
|
bool received = ramblock_recv_bitmap_test_byte_offset(rb, start);
|
|
|
|
|
|
|
|
return received ? 0 : postcopy_place_page_zero(mis, aligned, rb);
|
|
|
|
}
|
|
|
|
|
|
|
|
return migrate_send_rp_req_pages(mis, rb, start, haddr);
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:12 +01:00
|
|
|
/*
|
|
|
|
* Callback from shared fault handlers to ask for a page,
|
|
|
|
* the page must be specified by a RAMBlock and an offset in that rb
|
|
|
|
* Note: Only for use by shared fault handlers (in fault thread)
|
|
|
|
*/
|
|
|
|
int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
|
|
|
|
uint64_t client_addr, uint64_t rb_offset)
|
|
|
|
{
|
2021-10-11 19:53:44 +02:00
|
|
|
uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
|
2018-03-12 18:21:12 +01:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
|
|
|
trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
|
|
|
|
rb_offset);
|
2018-03-12 18:21:17 +01:00
|
|
|
if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) {
|
|
|
|
trace_postcopy_request_shared_page_present(pcfd->idstr,
|
|
|
|
qemu_ram_get_idstr(rb), rb_offset);
|
|
|
|
return postcopy_wake_shared(pcfd, client_addr, rb);
|
|
|
|
}
|
migration/postcopy: Handle RAMBlocks with a RamDiscardManager on the destination
Currently, when someone (i.e., the VM) accesses discarded parts inside a
RAMBlock with a RamDiscardManager managing the corresponding mapped memory
region, postcopy will request migration of the corresponding page from the
source. The source, however, will never answer, because it refuses to
migrate such pages with undefined content ("logically unplugged"): the
pages are never dirty, and get_queued_page() will consequently skip
processing these postcopy requests.
Especially reading discarded ("logically unplugged") ranges is supposed to
work in some setups (for example with current virtio-mem), although it
barely ever happens: still, not placing a page would currently stall the
VM, as it cannot make forward progress.
Let's check the state via the RamDiscardManager (the state e.g.,
of virtio-mem is migrated during precopy) and avoid sending a request
that will never get answered. Place a fresh zero page instead to keep
the VM working. This is the same behavior that would happen
automatically without userfaultfd being active, when accessing virtual
memory regions without populated pages -- "populate on demand".
For now, there are valid cases (as documented in the virtio-mem spec) where
a VM might read discarded memory; in the future, we will disallow that.
Then, we might want to handle that case differently, e.g., warning the
user that the VM seems to be mis-behaving.
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2021-10-11 19:53:43 +02:00
|
|
|
postcopy_request_page(mis, rb, aligned_rbo, client_addr);
|
2018-03-12 18:21:12 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-22 19:17:24 +01:00
|
|
|
static int get_mem_fault_cpu_index(uint32_t pid)
|
|
|
|
{
|
|
|
|
CPUState *cpu_iter;
|
|
|
|
|
|
|
|
CPU_FOREACH(cpu_iter) {
|
|
|
|
if (cpu_iter->thread_id == pid) {
|
|
|
|
trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
|
|
|
|
return cpu_iter->cpu_index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
trace_get_mem_fault_cpu_index(-1, pid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc)
|
|
|
|
{
|
|
|
|
int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
|
|
|
|
dc->start_time;
|
|
|
|
return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is being called when pagefault occurs. It
|
|
|
|
* tracks down vCPU blocking time.
|
|
|
|
*
|
|
|
|
* @addr: faulted host virtual address
|
|
|
|
* @ptid: faulted process thread id
|
|
|
|
* @rb: ramblock appropriate to addr
|
|
|
|
*/
|
|
|
|
static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
|
|
|
|
RAMBlock *rb)
|
|
|
|
{
|
|
|
|
int cpu, already_received;
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
|
|
|
|
uint32_t low_time_offset;
|
|
|
|
|
|
|
|
if (!dc || ptid == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cpu = get_mem_fault_cpu_index(ptid);
|
|
|
|
if (cpu < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
low_time_offset = get_low_time_offset(dc);
|
|
|
|
if (dc->vcpu_addr[cpu] == 0) {
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_inc(&dc->smp_cpus_down);
|
2018-03-22 19:17:24 +01:00
|
|
|
}
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_xchg(&dc->last_begin, low_time_offset);
|
|
|
|
qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
|
|
|
|
qatomic_xchg(&dc->vcpu_addr[cpu], addr);
|
2018-03-22 19:17:24 +01:00
|
|
|
|
2019-10-06 00:05:15 +02:00
|
|
|
/*
|
|
|
|
* check it here, not at the beginning of the function,
|
|
|
|
* due to, check could occur early than bitmap_set in
|
|
|
|
* qemu_ufd_copy_ioctl
|
|
|
|
*/
|
2018-03-22 19:17:24 +01:00
|
|
|
already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
|
|
|
|
if (already_received) {
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_xchg(&dc->vcpu_addr[cpu], 0);
|
|
|
|
qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
|
|
|
|
qatomic_dec(&dc->smp_cpus_down);
|
2018-03-22 19:17:24 +01:00
|
|
|
}
|
|
|
|
trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
|
|
|
|
cpu, already_received);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function just provide calculated blocktime per cpu and trace it.
|
|
|
|
* Total blocktime is calculated in mark_postcopy_blocktime_end.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Assume we have 3 CPU
|
|
|
|
*
|
|
|
|
* S1 E1 S1 E1
|
|
|
|
* -----***********------------xxx***************------------------------> CPU1
|
|
|
|
*
|
|
|
|
* S2 E2
|
|
|
|
* ------------****************xxx---------------------------------------> CPU2
|
|
|
|
*
|
|
|
|
* S3 E3
|
|
|
|
* ------------------------****xxx********-------------------------------> CPU3
|
|
|
|
*
|
|
|
|
* We have sequence S1,S2,E1,S3,S1,E2,E3,E1
|
|
|
|
* S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
|
|
|
|
* S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
|
|
|
|
* it's a part of total blocktime.
|
|
|
|
* S1 - here is last_begin
|
|
|
|
* Legend of the picture is following:
|
|
|
|
* * - means blocktime per vCPU
|
|
|
|
* x - means overlapped blocktime (total blocktime)
|
|
|
|
*
|
|
|
|
* @addr: host virtual address
|
|
|
|
*/
|
|
|
|
static void mark_postcopy_blocktime_end(uintptr_t addr)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
|
2019-05-18 22:54:21 +02:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
|
|
|
unsigned int smp_cpus = ms->smp.cpus;
|
2018-03-22 19:17:24 +01:00
|
|
|
int i, affected_cpu = 0;
|
|
|
|
bool vcpu_total_blocktime = false;
|
|
|
|
uint32_t read_vcpu_time, low_time_offset;
|
|
|
|
|
|
|
|
if (!dc) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
low_time_offset = get_low_time_offset(dc);
|
|
|
|
/* lookup cpu, to clear it,
|
2020-09-17 09:50:21 +02:00
|
|
|
* that algorithm looks straightforward, but it's not
|
2018-03-22 19:17:24 +01:00
|
|
|
* optimal, more optimal algorithm is keeping tree or hash
|
|
|
|
* where key is address value is a list of */
|
|
|
|
for (i = 0; i < smp_cpus; i++) {
|
|
|
|
uint32_t vcpu_blocktime = 0;
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
|
|
|
|
if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
|
2018-03-22 19:17:24 +01:00
|
|
|
read_vcpu_time == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_xchg(&dc->vcpu_addr[i], 0);
|
2018-03-22 19:17:24 +01:00
|
|
|
vcpu_blocktime = low_time_offset - read_vcpu_time;
|
|
|
|
affected_cpu += 1;
|
|
|
|
/* we need to know is that mark_postcopy_end was due to
|
|
|
|
* faulted page, another possible case it's prefetched
|
|
|
|
* page and in that case we shouldn't be here */
|
|
|
|
if (!vcpu_total_blocktime &&
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
|
2018-03-22 19:17:24 +01:00
|
|
|
vcpu_total_blocktime = true;
|
|
|
|
}
|
|
|
|
/* continue cycle, due to one page could affect several vCPUs */
|
|
|
|
dc->vcpu_blocktime[i] += vcpu_blocktime;
|
|
|
|
}
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_sub(&dc->smp_cpus_down, affected_cpu);
|
2018-03-22 19:17:24 +01:00
|
|
|
if (vcpu_total_blocktime) {
|
2020-09-23 12:56:46 +02:00
|
|
|
dc->total_blocktime += low_time_offset - qatomic_fetch_add(
|
2018-03-22 19:17:24 +01:00
|
|
|
&dc->last_begin, 0);
|
|
|
|
}
|
|
|
|
trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
|
|
|
|
affected_cpu);
|
|
|
|
}
|
|
|
|
|
2022-03-01 09:39:11 +01:00
|
|
|
static void postcopy_pause_fault_thread(MigrationIncomingState *mis)
|
2018-05-02 12:47:22 +02:00
|
|
|
{
|
|
|
|
trace_postcopy_pause_fault_thread();
|
|
|
|
qemu_sem_wait(&mis->postcopy_pause_sem_fault);
|
|
|
|
trace_postcopy_pause_fault_thread_continued();
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:04 +01:00
|
|
|
/*
|
|
|
|
* Handle faults detected by the USERFAULT markings
|
|
|
|
*/
|
|
|
|
static void *postcopy_ram_fault_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = opaque;
|
2015-11-05 19:11:17 +01:00
|
|
|
struct uffd_msg msg;
|
|
|
|
int ret;
|
2018-03-12 18:21:04 +01:00
|
|
|
size_t index;
|
2015-11-05 19:11:17 +01:00
|
|
|
RAMBlock *rb = NULL;
|
2015-11-05 19:11:04 +01:00
|
|
|
|
2015-11-05 19:11:17 +01:00
|
|
|
trace_postcopy_ram_fault_thread_entry();
|
2018-08-06 15:29:29 +02:00
|
|
|
rcu_register_thread();
|
2018-03-12 18:21:12 +01:00
|
|
|
mis->last_rb = NULL; /* last RAMBlock we sent part of */
|
2022-03-01 09:39:06 +01:00
|
|
|
qemu_sem_post(&mis->thread_sync_sem);
|
2015-11-05 19:11:04 +01:00
|
|
|
|
2018-03-12 18:21:04 +01:00
|
|
|
struct pollfd *pfd;
|
|
|
|
size_t pfd_len = 2 + mis->postcopy_remote_fds->len;
|
|
|
|
|
|
|
|
pfd = g_new0(struct pollfd, pfd_len);
|
|
|
|
|
|
|
|
pfd[0].fd = mis->userfault_fd;
|
|
|
|
pfd[0].events = POLLIN;
|
|
|
|
pfd[1].fd = mis->userfault_event_fd;
|
|
|
|
pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
|
|
|
|
trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd);
|
|
|
|
for (index = 0; index < mis->postcopy_remote_fds->len; index++) {
|
|
|
|
struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds,
|
|
|
|
struct PostCopyFD, index);
|
|
|
|
pfd[2 + index].fd = pcfd->fd;
|
|
|
|
pfd[2 + index].events = POLLIN;
|
|
|
|
trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr,
|
|
|
|
pcfd->fd);
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:17 +01:00
|
|
|
while (true) {
|
|
|
|
ram_addr_t rb_offset;
|
2018-03-12 18:21:04 +01:00
|
|
|
int poll_result;
|
2015-11-05 19:11:17 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We're mainly waiting for the kernel to give us a faulting HVA,
|
|
|
|
* however we can be told to quit via userfault_quit_fd which is
|
|
|
|
* an eventfd
|
|
|
|
*/
|
2018-03-12 18:21:04 +01:00
|
|
|
|
|
|
|
poll_result = poll(pfd, pfd_len, -1 /* Wait forever */);
|
|
|
|
if (poll_result == -1) {
|
2015-11-05 19:11:17 +01:00
|
|
|
error_report("%s: userfault poll: %s", __func__, strerror(errno));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:47:22 +02:00
|
|
|
if (!mis->to_src_file) {
|
|
|
|
/*
|
|
|
|
* Possibly someone tells us that the return path is
|
|
|
|
* broken already using the event. We should hold until
|
|
|
|
* the channel is rebuilt.
|
|
|
|
*/
|
2022-03-01 09:39:11 +01:00
|
|
|
postcopy_pause_fault_thread(mis);
|
2018-05-02 12:47:22 +02:00
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:17 +01:00
|
|
|
if (pfd[1].revents) {
|
2018-02-08 11:31:06 +01:00
|
|
|
uint64_t tmp64 = 0;
|
|
|
|
|
|
|
|
/* Consume the signal */
|
|
|
|
if (read(mis->userfault_event_fd, &tmp64, 8) != 8) {
|
|
|
|
/* Nothing obviously nicer than posting this error. */
|
|
|
|
error_report("%s: read() failed", __func__);
|
|
|
|
}
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
if (qatomic_read(&mis->fault_thread_quit)) {
|
2018-02-08 11:31:06 +01:00
|
|
|
trace_postcopy_ram_fault_thread_quit();
|
|
|
|
break;
|
|
|
|
}
|
2015-11-05 19:11:17 +01:00
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:04 +01:00
|
|
|
if (pfd[0].revents) {
|
|
|
|
poll_result--;
|
|
|
|
ret = read(mis->userfault_fd, &msg, sizeof(msg));
|
|
|
|
if (ret != sizeof(msg)) {
|
|
|
|
if (errno == EAGAIN) {
|
|
|
|
/*
|
|
|
|
* if a wake up happens on the other thread just after
|
|
|
|
* the poll, there is nothing to read.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("%s: Failed to read full userfault "
|
|
|
|
"message: %s",
|
|
|
|
__func__, strerror(errno));
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
error_report("%s: Read %d bytes from userfaultfd "
|
|
|
|
"expected %zd",
|
|
|
|
__func__, ret, sizeof(msg));
|
|
|
|
break; /* Lost alignment, don't know what we'd read next */
|
|
|
|
}
|
2015-11-05 19:11:17 +01:00
|
|
|
}
|
2018-03-12 18:21:04 +01:00
|
|
|
if (msg.event != UFFD_EVENT_PAGEFAULT) {
|
|
|
|
error_report("%s: Read unexpected event %ud from userfaultfd",
|
|
|
|
__func__, msg.event);
|
|
|
|
continue; /* It's not a page fault, shouldn't happen */
|
2015-11-05 19:11:17 +01:00
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:04 +01:00
|
|
|
rb = qemu_ram_block_from_host(
|
|
|
|
(void *)(uintptr_t)msg.arg.pagefault.address,
|
|
|
|
true, &rb_offset);
|
|
|
|
if (!rb) {
|
|
|
|
error_report("postcopy_ram_fault_thread: Fault outside guest: %"
|
|
|
|
PRIx64, (uint64_t)msg.arg.pagefault.address);
|
|
|
|
break;
|
|
|
|
}
|
2015-11-05 19:11:17 +01:00
|
|
|
|
2021-10-11 19:53:44 +02:00
|
|
|
rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
|
2018-03-12 18:21:04 +01:00
|
|
|
trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
|
2015-11-05 19:11:17 +01:00
|
|
|
qemu_ram_get_idstr(rb),
|
2018-03-22 19:17:24 +01:00
|
|
|
rb_offset,
|
|
|
|
msg.arg.pagefault.feat.ptid);
|
|
|
|
mark_postcopy_blocktime_begin(
|
|
|
|
(uintptr_t)(msg.arg.pagefault.address),
|
|
|
|
msg.arg.pagefault.feat.ptid, rb);
|
|
|
|
|
2018-05-02 12:47:22 +02:00
|
|
|
retry:
|
2018-03-12 18:21:04 +01:00
|
|
|
/*
|
|
|
|
* Send the request to the source - we want to request one
|
|
|
|
* of our host page sizes (which is >= TPS)
|
|
|
|
*/
|
migration/postcopy: Handle RAMBlocks with a RamDiscardManager on the destination
Currently, when someone (i.e., the VM) accesses discarded parts inside a
RAMBlock with a RamDiscardManager managing the corresponding mapped memory
region, postcopy will request migration of the corresponding page from the
source. The source, however, will never answer, because it refuses to
migrate such pages with undefined content ("logically unplugged"): the
pages are never dirty, and get_queued_page() will consequently skip
processing these postcopy requests.
Especially reading discarded ("logically unplugged") ranges is supposed to
work in some setups (for example with current virtio-mem), although it
barely ever happens: still, not placing a page would currently stall the
VM, as it cannot make forward progress.
Let's check the state via the RamDiscardManager (the state e.g.,
of virtio-mem is migrated during precopy) and avoid sending a request
that will never get answered. Place a fresh zero page instead to keep
the VM working. This is the same behavior that would happen
automatically without userfaultfd being active, when accessing virtual
memory regions without populated pages -- "populate on demand".
For now, there are valid cases (as documented in the virtio-mem spec) where
a VM might read discarded memory; in the future, we will disallow that.
Then, we might want to handle that case differently, e.g., warning the
user that the VM seems to be mis-behaving.
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2021-10-11 19:53:43 +02:00
|
|
|
ret = postcopy_request_page(mis, rb, rb_offset,
|
|
|
|
msg.arg.pagefault.address);
|
2018-05-02 12:47:22 +02:00
|
|
|
if (ret) {
|
|
|
|
/* May be network failure, try to wait for recovery */
|
2022-03-01 09:39:11 +01:00
|
|
|
postcopy_pause_fault_thread(mis);
|
|
|
|
goto retry;
|
2018-03-12 18:21:04 +01:00
|
|
|
}
|
|
|
|
}
|
2015-11-05 19:11:17 +01:00
|
|
|
|
2018-03-12 18:21:04 +01:00
|
|
|
/* Now handle any requests from external processes on shared memory */
|
|
|
|
/* TODO: May need to handle devices deregistering during postcopy */
|
|
|
|
for (index = 2; index < pfd_len && poll_result; index++) {
|
|
|
|
if (pfd[index].revents) {
|
|
|
|
struct PostCopyFD *pcfd =
|
|
|
|
&g_array_index(mis->postcopy_remote_fds,
|
|
|
|
struct PostCopyFD, index - 2);
|
|
|
|
|
|
|
|
poll_result--;
|
|
|
|
if (pfd[index].revents & POLLERR) {
|
|
|
|
error_report("%s: POLLERR on poll %zd fd=%d",
|
|
|
|
__func__, index, pcfd->fd);
|
|
|
|
pfd[index].events = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = read(pcfd->fd, &msg, sizeof(msg));
|
|
|
|
if (ret != sizeof(msg)) {
|
|
|
|
if (errno == EAGAIN) {
|
|
|
|
/*
|
|
|
|
* if a wake up happens on the other thread just after
|
|
|
|
* the poll, there is nothing to read.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("%s: Failed to read full userfault "
|
|
|
|
"message: %s (shared) revents=%d",
|
|
|
|
__func__, strerror(errno),
|
|
|
|
pfd[index].revents);
|
|
|
|
/*TODO: Could just disable this sharer */
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
error_report("%s: Read %d bytes from userfaultfd "
|
|
|
|
"expected %zd (shared)",
|
|
|
|
__func__, ret, sizeof(msg));
|
|
|
|
/*TODO: Could just disable this sharer */
|
|
|
|
break; /*Lost alignment,don't know what we'd read next*/
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (msg.event != UFFD_EVENT_PAGEFAULT) {
|
|
|
|
error_report("%s: Read unexpected event %ud "
|
|
|
|
"from userfaultfd (shared)",
|
|
|
|
__func__, msg.event);
|
|
|
|
continue; /* It's not a page fault, shouldn't happen */
|
|
|
|
}
|
|
|
|
/* Call the device handler registered with us */
|
|
|
|
ret = pcfd->handler(pcfd, &msg);
|
|
|
|
if (ret) {
|
|
|
|
error_report("%s: Failed to resolve shared fault on %zd/%s",
|
|
|
|
__func__, index, pcfd->idstr);
|
|
|
|
/* TODO: Fail? Disable this sharer? */
|
|
|
|
}
|
|
|
|
}
|
2015-11-05 19:11:17 +01:00
|
|
|
}
|
|
|
|
}
|
2018-08-06 15:29:29 +02:00
|
|
|
rcu_unregister_thread();
|
2015-11-05 19:11:17 +01:00
|
|
|
trace_postcopy_ram_fault_thread_exit();
|
2018-03-21 12:36:44 +01:00
|
|
|
g_free(pfd);
|
2015-11-05 19:11:04 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-01-19 09:09:20 +01:00
|
|
|
static int postcopy_temp_pages_setup(MigrationIncomingState *mis)
|
|
|
|
{
|
migration: Introduce postcopy channels on dest node
Postcopy handles huge pages in a special way that currently we can only have
one "channel" to transfer the page.
It's because when we install pages using UFFDIO_COPY, we need to have the whole
huge page ready, it also means we need to have a temp huge page when trying to
receive the whole content of the page.
Currently all maintainance around this tmp page is global: firstly we'll
allocate a temp huge page, then we maintain its status mostly within
ram_load_postcopy().
To enable multiple channels for postcopy, the first thing we need to do is to
prepare N temp huge pages as caching, one for each channel.
Meanwhile we need to maintain the tmp huge page status per-channel too.
To give some example, some local variables maintained in ram_load_postcopy()
are listed; they are responsible for maintaining temp huge page status:
- all_zero: this keeps whether this huge page contains all zeros
- target_pages: this counts how many target pages have been copied
- host_page: this keeps the host ptr for the page to install
Move all these fields to be together with the temp huge pages to form a new
structure called PostcopyTmpPage. Then for each (future) postcopy channel, we
need one structure to keep the state around.
For vanilla postcopy, obviously there's only one channel. It contains both
precopy and postcopy pages.
This patch teaches the dest migration node to start realize the possible number
of postcopy channels by introducing the "postcopy_channels" variable. Its
value is calculated when setup postcopy on dest node (during POSTCOPY_LISTEN
phase).
Vanilla postcopy will have channels=1, but when postcopy-preempt capability is
enabled (in the future), we will boost it to 2 because even during partial
sending of a precopy huge page we still want to preempt it and start sending
the postcopy requested page right away (so we start to keep two temp huge
pages; more if we want to enable multifd). In this patch there's a TODO marked
for that; so far the channels is always set to 1.
We need to send one "host huge page" on one channel only and we cannot split
them, because otherwise the data upon the same huge page can locate on more
than one channel so we need more complicated logic to manage. One temp host
huge page for each channel will be enough for us for now.
Postcopy will still always use the index=0 huge page even after this patch.
However it prepares for the latter patches where it can start to use multiple
channels (which needs src intervention, because only src knows which channel we
should use).
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220301083925.33483-5-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: Fixed up long line
2022-03-01 09:39:04 +01:00
|
|
|
PostcopyTmpPage *tmp_page;
|
|
|
|
int err, i, channels;
|
|
|
|
void *temp_page;
|
|
|
|
|
2022-07-07 20:55:02 +02:00
|
|
|
if (migrate_postcopy_preempt()) {
|
|
|
|
/* If preemption enabled, need extra channel for urgent requests */
|
|
|
|
mis->postcopy_channels = RAM_CHANNEL_MAX;
|
|
|
|
} else {
|
|
|
|
/* Both precopy/postcopy on the same channel */
|
|
|
|
mis->postcopy_channels = 1;
|
|
|
|
}
|
migration: Introduce postcopy channels on dest node
Postcopy handles huge pages in a special way that currently we can only have
one "channel" to transfer the page.
It's because when we install pages using UFFDIO_COPY, we need to have the whole
huge page ready, it also means we need to have a temp huge page when trying to
receive the whole content of the page.
Currently all maintainance around this tmp page is global: firstly we'll
allocate a temp huge page, then we maintain its status mostly within
ram_load_postcopy().
To enable multiple channels for postcopy, the first thing we need to do is to
prepare N temp huge pages as caching, one for each channel.
Meanwhile we need to maintain the tmp huge page status per-channel too.
To give some example, some local variables maintained in ram_load_postcopy()
are listed; they are responsible for maintaining temp huge page status:
- all_zero: this keeps whether this huge page contains all zeros
- target_pages: this counts how many target pages have been copied
- host_page: this keeps the host ptr for the page to install
Move all these fields to be together with the temp huge pages to form a new
structure called PostcopyTmpPage. Then for each (future) postcopy channel, we
need one structure to keep the state around.
For vanilla postcopy, obviously there's only one channel. It contains both
precopy and postcopy pages.
This patch teaches the dest migration node to start realize the possible number
of postcopy channels by introducing the "postcopy_channels" variable. Its
value is calculated when setup postcopy on dest node (during POSTCOPY_LISTEN
phase).
Vanilla postcopy will have channels=1, but when postcopy-preempt capability is
enabled (in the future), we will boost it to 2 because even during partial
sending of a precopy huge page we still want to preempt it and start sending
the postcopy requested page right away (so we start to keep two temp huge
pages; more if we want to enable multifd). In this patch there's a TODO marked
for that; so far the channels is always set to 1.
We need to send one "host huge page" on one channel only and we cannot split
them, because otherwise the data upon the same huge page can locate on more
than one channel so we need more complicated logic to manage. One temp host
huge page for each channel will be enough for us for now.
Postcopy will still always use the index=0 huge page even after this patch.
However it prepares for the latter patches where it can start to use multiple
channels (which needs src intervention, because only src knows which channel we
should use).
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220301083925.33483-5-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: Fixed up long line
2022-03-01 09:39:04 +01:00
|
|
|
|
|
|
|
channels = mis->postcopy_channels;
|
|
|
|
mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels);
|
|
|
|
|
|
|
|
for (i = 0; i < channels; i++) {
|
|
|
|
tmp_page = &mis->postcopy_tmp_pages[i];
|
|
|
|
temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
|
|
if (temp_page == MAP_FAILED) {
|
|
|
|
err = errno;
|
|
|
|
error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s",
|
|
|
|
__func__, i, strerror(err));
|
|
|
|
/* Clean up will be done later */
|
|
|
|
return -err;
|
|
|
|
}
|
|
|
|
tmp_page->tmp_huge_page = temp_page;
|
|
|
|
/* Initialize default states for each tmp page */
|
|
|
|
postcopy_temp_page_reset(tmp_page);
|
2022-01-19 09:09:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
|
|
|
|
*/
|
|
|
|
mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
|
|
|
|
PROT_READ | PROT_WRITE,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
|
|
if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
|
|
|
|
err = errno;
|
|
|
|
mis->postcopy_tmp_zero_page = NULL;
|
|
|
|
error_report("%s: Failed to map large zero page %s",
|
|
|
|
__func__, strerror(err));
|
|
|
|
return -err;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-10 03:13:15 +02:00
|
|
|
int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
|
2015-11-05 19:11:04 +01:00
|
|
|
{
|
2015-11-05 19:11:17 +01:00
|
|
|
/* Open the fd for the kernel to give us userfaults */
|
2023-02-01 22:10:54 +01:00
|
|
|
mis->userfault_fd = uffd_open(O_CLOEXEC | O_NONBLOCK);
|
2015-11-05 19:11:17 +01:00
|
|
|
if (mis->userfault_fd == -1) {
|
|
|
|
error_report("%s: Failed to open userfault fd: %s", __func__,
|
|
|
|
strerror(errno));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Although the host check already tested the API, we need to
|
|
|
|
* do the check again as an ABI handshake on the new fd.
|
|
|
|
*/
|
2017-09-19 18:47:58 +02:00
|
|
|
if (!ufd_check_and_apply(mis->userfault_fd, mis)) {
|
2015-11-05 19:11:17 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now an eventfd we use to tell the fault-thread to quit */
|
2018-02-08 11:31:06 +01:00
|
|
|
mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC);
|
|
|
|
if (mis->userfault_event_fd == -1) {
|
|
|
|
error_report("%s: Opening userfault_event_fd: %s", __func__,
|
2015-11-05 19:11:17 +01:00
|
|
|
strerror(errno));
|
|
|
|
close(mis->userfault_fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-07-07 20:55:02 +02:00
|
|
|
postcopy_thread_create(mis, &mis->fault_thread, "fault-default",
|
2022-03-01 09:39:06 +01:00
|
|
|
postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE);
|
2015-11-05 19:11:17 +01:00
|
|
|
mis->have_fault_thread = true;
|
2015-11-05 19:11:04 +01:00
|
|
|
|
|
|
|
/* Mark so that we get notified of accesses to unwritten areas */
|
2019-02-15 18:45:46 +01:00
|
|
|
if (foreach_not_ignored_block(ram_block_enable_notify, mis)) {
|
2019-01-13 15:08:48 +01:00
|
|
|
error_report("ram_block_enable_notify failed");
|
2015-11-05 19:11:04 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-01-19 09:09:20 +01:00
|
|
|
if (postcopy_temp_pages_setup(mis)) {
|
|
|
|
/* Error dumped in the sub-function */
|
2019-10-05 15:50:20 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-07-07 20:55:02 +02:00
|
|
|
if (migrate_postcopy_preempt()) {
|
|
|
|
/*
|
|
|
|
* This thread needs to be created after the temp pages because
|
|
|
|
* it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately.
|
|
|
|
*/
|
|
|
|
postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast",
|
|
|
|
postcopy_preempt_thread, QEMU_THREAD_JOINABLE);
|
2023-03-26 19:25:39 +02:00
|
|
|
mis->preempt_thread_status = PREEMPT_THREAD_CREATED;
|
2022-07-07 20:55:02 +02:00
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:17 +01:00
|
|
|
trace_postcopy_ram_enable_notify();
|
|
|
|
|
2015-11-05 19:11:04 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-21 23:27:16 +02:00
|
|
|
static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr,
|
2017-10-05 13:13:20 +02:00
|
|
|
void *from_addr, uint64_t pagesize, RAMBlock *rb)
|
2017-10-05 13:13:19 +02:00
|
|
|
{
|
2020-10-21 23:27:16 +02:00
|
|
|
int userfault_fd = mis->userfault_fd;
|
2017-10-05 13:13:20 +02:00
|
|
|
int ret;
|
2020-10-21 23:27:16 +02:00
|
|
|
|
2017-10-05 13:13:19 +02:00
|
|
|
if (from_addr) {
|
|
|
|
struct uffdio_copy copy_struct;
|
|
|
|
copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
|
|
|
|
copy_struct.src = (uint64_t)(uintptr_t)from_addr;
|
|
|
|
copy_struct.len = pagesize;
|
|
|
|
copy_struct.mode = 0;
|
2017-10-05 13:13:20 +02:00
|
|
|
ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct);
|
2017-10-05 13:13:19 +02:00
|
|
|
} else {
|
|
|
|
struct uffdio_zeropage zero_struct;
|
|
|
|
zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
|
|
|
|
zero_struct.range.len = pagesize;
|
|
|
|
zero_struct.mode = 0;
|
2017-10-05 13:13:20 +02:00
|
|
|
ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
|
|
|
|
}
|
|
|
|
if (!ret) {
|
2020-10-21 23:27:18 +02:00
|
|
|
qemu_mutex_lock(&mis->page_request_mutex);
|
2017-10-05 13:13:20 +02:00
|
|
|
ramblock_recv_bitmap_set_range(rb, host_addr,
|
|
|
|
pagesize / qemu_target_page_size());
|
2020-10-21 23:27:18 +02:00
|
|
|
/*
|
|
|
|
* If this page resolves a page fault for a previous recorded faulted
|
|
|
|
* address, take a special note to maintain the requested page list.
|
|
|
|
*/
|
|
|
|
if (g_tree_lookup(mis->page_requested, host_addr)) {
|
|
|
|
g_tree_remove(mis->page_requested, host_addr);
|
|
|
|
mis->page_requested_count--;
|
|
|
|
trace_postcopy_page_req_del(host_addr, mis->page_requested_count);
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&mis->page_request_mutex);
|
2018-03-22 19:17:24 +01:00
|
|
|
mark_postcopy_blocktime_end((uintptr_t)host_addr);
|
2017-10-05 13:13:19 +02:00
|
|
|
}
|
2017-10-05 13:13:20 +02:00
|
|
|
return ret;
|
2017-10-05 13:13:19 +02:00
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:15 +01:00
|
|
|
int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
GArray *pcrfds = mis->postcopy_remote_fds;
|
|
|
|
|
|
|
|
for (i = 0; i < pcrfds->len; i++) {
|
|
|
|
struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
|
|
|
|
int ret = cur->waker(cur, rb, offset);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:10 +01:00
|
|
|
/*
|
|
|
|
* Place a host page (from) at (host) atomically
|
|
|
|
* returns 0 on success
|
|
|
|
*/
|
2017-02-24 19:28:35 +01:00
|
|
|
int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
|
2017-10-05 13:13:18 +02:00
|
|
|
RAMBlock *rb)
|
2015-11-05 19:11:10 +01:00
|
|
|
{
|
2017-10-05 13:13:18 +02:00
|
|
|
size_t pagesize = qemu_ram_pagesize(rb);
|
2015-11-05 19:11:10 +01:00
|
|
|
|
|
|
|
/* copy also acks to the kernel waking the stalled thread up
|
|
|
|
* TODO: We can inhibit that ack and only do it if it was requested
|
|
|
|
* which would be slightly cheaper, but we'd have to be careful
|
|
|
|
* of the order of updating our page state.
|
|
|
|
*/
|
2020-10-21 23:27:16 +02:00
|
|
|
if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) {
|
2015-11-05 19:11:10 +01:00
|
|
|
int e = errno;
|
2017-02-24 19:28:35 +01:00
|
|
|
error_report("%s: %s copy host: %p from: %p (size: %zd)",
|
|
|
|
__func__, strerror(e), host, from, pagesize);
|
2015-11-05 19:11:10 +01:00
|
|
|
|
|
|
|
return -e;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_postcopy_place_page(host);
|
2018-03-12 18:21:17 +01:00
|
|
|
return postcopy_notify_shared_wake(rb,
|
|
|
|
qemu_ram_block_host_offset(rb, host));
|
2015-11-05 19:11:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Place a zero page at (host) atomically
|
|
|
|
* returns 0 on success
|
|
|
|
*/
|
2017-02-24 19:28:35 +01:00
|
|
|
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
|
2017-10-05 13:13:18 +02:00
|
|
|
RAMBlock *rb)
|
2015-11-05 19:11:10 +01:00
|
|
|
{
|
2018-03-12 18:20:58 +01:00
|
|
|
size_t pagesize = qemu_ram_pagesize(rb);
|
2017-02-24 19:28:35 +01:00
|
|
|
trace_postcopy_place_page_zero(host);
|
2015-11-05 19:11:10 +01:00
|
|
|
|
2018-03-12 18:20:58 +01:00
|
|
|
/* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
|
|
|
|
* but it's not available for everything (e.g. hugetlbpages)
|
|
|
|
*/
|
|
|
|
if (qemu_ram_is_uf_zeroable(rb)) {
|
2020-10-21 23:27:16 +02:00
|
|
|
if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) {
|
2017-02-24 19:28:35 +01:00
|
|
|
int e = errno;
|
|
|
|
error_report("%s: %s zero host: %p",
|
|
|
|
__func__, strerror(e), host);
|
2015-11-05 19:11:10 +01:00
|
|
|
|
2017-02-24 19:28:35 +01:00
|
|
|
return -e;
|
|
|
|
}
|
2018-03-12 18:21:17 +01:00
|
|
|
return postcopy_notify_shared_wake(rb,
|
|
|
|
qemu_ram_block_host_offset(rb,
|
|
|
|
host));
|
2017-02-24 19:28:35 +01:00
|
|
|
} else {
|
2019-10-05 15:50:21 +02:00
|
|
|
return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb);
|
2015-11-05 19:11:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:10:55 +01:00
|
|
|
#else
|
|
|
|
/* No target OS support, stubs just fail */
|
2018-03-22 19:17:27 +01:00
|
|
|
void fill_destination_postcopy_migration_info(MigrationInfo *info)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-09-19 18:47:56 +02:00
|
|
|
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
|
2015-11-05 19:10:55 +01:00
|
|
|
{
|
|
|
|
error_report("%s: No OS support", __func__);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-06-20 22:27:36 +02:00
|
|
|
int postcopy_ram_incoming_init(MigrationIncomingState *mis)
|
2015-11-05 19:11:03 +01:00
|
|
|
{
|
|
|
|
error_report("postcopy_ram_incoming_init: No OS support");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
|
|
|
|
{
|
|
|
|
assert(0);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:20 +01:00
|
|
|
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
|
|
|
|
{
|
|
|
|
assert(0);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-20 15:26:10 +01:00
|
|
|
int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
|
|
|
|
uint64_t client_addr, uint64_t rb_offset)
|
|
|
|
{
|
|
|
|
assert(0);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-10-10 03:13:15 +02:00
|
|
|
int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
|
2015-11-05 19:11:04 +01:00
|
|
|
{
|
|
|
|
assert(0);
|
|
|
|
return -1;
|
|
|
|
}
|
2015-11-05 19:11:10 +01:00
|
|
|
|
2017-02-24 19:28:35 +01:00
|
|
|
int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
|
2017-10-05 13:13:18 +02:00
|
|
|
RAMBlock *rb)
|
2015-11-05 19:11:10 +01:00
|
|
|
{
|
|
|
|
assert(0);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-02-24 19:28:35 +01:00
|
|
|
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
|
2017-10-05 13:13:18 +02:00
|
|
|
RAMBlock *rb)
|
2015-11-05 19:11:10 +01:00
|
|
|
{
|
|
|
|
assert(0);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-12 18:21:14 +01:00
|
|
|
int postcopy_wake_shared(struct PostCopyFD *pcfd,
|
|
|
|
uint64_t client_addr,
|
|
|
|
RAMBlock *rb)
|
|
|
|
{
|
|
|
|
assert(0);
|
|
|
|
return -1;
|
|
|
|
}
|
2015-11-05 19:10:55 +01:00
|
|
|
#endif
|
|
|
|
|
2015-11-05 19:11:02 +01:00
|
|
|
/* ------------------------------------------------------------------------- */
|
migration: Introduce postcopy channels on dest node
Postcopy handles huge pages in a special way that currently we can only have
one "channel" to transfer the page.
It's because when we install pages using UFFDIO_COPY, we need to have the whole
huge page ready, it also means we need to have a temp huge page when trying to
receive the whole content of the page.
Currently all maintainance around this tmp page is global: firstly we'll
allocate a temp huge page, then we maintain its status mostly within
ram_load_postcopy().
To enable multiple channels for postcopy, the first thing we need to do is to
prepare N temp huge pages as caching, one for each channel.
Meanwhile we need to maintain the tmp huge page status per-channel too.
To give some example, some local variables maintained in ram_load_postcopy()
are listed; they are responsible for maintaining temp huge page status:
- all_zero: this keeps whether this huge page contains all zeros
- target_pages: this counts how many target pages have been copied
- host_page: this keeps the host ptr for the page to install
Move all these fields to be together with the temp huge pages to form a new
structure called PostcopyTmpPage. Then for each (future) postcopy channel, we
need one structure to keep the state around.
For vanilla postcopy, obviously there's only one channel. It contains both
precopy and postcopy pages.
This patch teaches the dest migration node to start realize the possible number
of postcopy channels by introducing the "postcopy_channels" variable. Its
value is calculated when setup postcopy on dest node (during POSTCOPY_LISTEN
phase).
Vanilla postcopy will have channels=1, but when postcopy-preempt capability is
enabled (in the future), we will boost it to 2 because even during partial
sending of a precopy huge page we still want to preempt it and start sending
the postcopy requested page right away (so we start to keep two temp huge
pages; more if we want to enable multifd). In this patch there's a TODO marked
for that; so far the channels is always set to 1.
We need to send one "host huge page" on one channel only and we cannot split
them, because otherwise the data upon the same huge page can locate on more
than one channel so we need more complicated logic to manage. One temp host
huge page for each channel will be enough for us for now.
Postcopy will still always use the index=0 huge page even after this patch.
However it prepares for the latter patches where it can start to use multiple
channels (which needs src intervention, because only src knows which channel we
should use).
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220301083925.33483-5-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
dgilbert: Fixed up long line
2022-03-01 09:39:04 +01:00
|
|
|
void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page)
|
|
|
|
{
|
|
|
|
tmp_page->target_pages = 0;
|
|
|
|
tmp_page->host_addr = NULL;
|
|
|
|
/*
|
|
|
|
* This is set to true when reset, and cleared as long as we received any
|
|
|
|
* of the non-zero small page within this huge page.
|
|
|
|
*/
|
|
|
|
tmp_page->all_zero = true;
|
|
|
|
}
|
2015-11-05 19:11:02 +01:00
|
|
|
|
2018-02-08 11:31:07 +01:00
|
|
|
void postcopy_fault_thread_notify(MigrationIncomingState *mis)
|
|
|
|
{
|
|
|
|
uint64_t tmp64 = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wakeup the fault_thread. It's an eventfd that should currently
|
|
|
|
* be at 0, we're going to increment it to 1
|
|
|
|
*/
|
|
|
|
if (write(mis->userfault_event_fd, &tmp64, 8) != 8) {
|
|
|
|
/* Not much we can do here, but may as well report it */
|
|
|
|
error_report("%s: incrementing failed: %s", __func__,
|
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-05 19:11:02 +01:00
|
|
|
/**
|
|
|
|
* postcopy_discard_send_init: Called at the start of each RAMBlock before
|
|
|
|
* asking to discard individual ranges.
|
|
|
|
*
|
|
|
|
* @ms: The current migration state.
|
2019-07-24 03:07:21 +02:00
|
|
|
* @offset: the bitmap offset of the named RAMBlock in the migration bitmap.
|
2015-11-05 19:11:02 +01:00
|
|
|
* @name: RAMBlock that discards will operate on.
|
|
|
|
*/
|
2019-07-24 03:07:21 +02:00
|
|
|
static PostcopyDiscardState pds = {0};
|
|
|
|
void postcopy_discard_send_init(MigrationState *ms, const char *name)
|
2015-11-05 19:11:02 +01:00
|
|
|
{
|
2019-07-24 03:07:21 +02:00
|
|
|
pds.ramblock_name = name;
|
|
|
|
pds.cur_entry = 0;
|
|
|
|
pds.nsentwords = 0;
|
|
|
|
pds.nsentcmds = 0;
|
2015-11-05 19:11:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* postcopy_discard_send_range: Called by the bitmap code for each chunk to
|
|
|
|
* discard. May send a discard message, may just leave it queued to
|
|
|
|
* be sent later.
|
|
|
|
*
|
|
|
|
* @ms: Current migration state.
|
|
|
|
* @start,@length: a range of pages in the migration bitmap in the
|
|
|
|
* RAM block passed to postcopy_discard_send_init() (length=1 is one page)
|
|
|
|
*/
|
2019-07-24 03:07:21 +02:00
|
|
|
void postcopy_discard_send_range(MigrationState *ms, unsigned long start,
|
|
|
|
unsigned long length)
|
2015-11-05 19:11:02 +01:00
|
|
|
{
|
2017-03-21 09:09:14 +01:00
|
|
|
size_t tp_size = qemu_target_page_size();
|
2015-11-05 19:11:02 +01:00
|
|
|
/* Convert to byte offsets within the RAM block */
|
2019-07-24 03:07:21 +02:00
|
|
|
pds.start_list[pds.cur_entry] = start * tp_size;
|
|
|
|
pds.length_list[pds.cur_entry] = length * tp_size;
|
|
|
|
trace_postcopy_discard_send_range(pds.ramblock_name, start, length);
|
|
|
|
pds.cur_entry++;
|
|
|
|
pds.nsentwords++;
|
2015-11-05 19:11:02 +01:00
|
|
|
|
2019-07-24 03:07:21 +02:00
|
|
|
if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) {
|
2015-11-05 19:11:02 +01:00
|
|
|
/* Full set, ship it! */
|
2016-01-15 04:37:42 +01:00
|
|
|
qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
|
2019-07-24 03:07:21 +02:00
|
|
|
pds.ramblock_name,
|
|
|
|
pds.cur_entry,
|
|
|
|
pds.start_list,
|
|
|
|
pds.length_list);
|
|
|
|
pds.nsentcmds++;
|
|
|
|
pds.cur_entry = 0;
|
2015-11-05 19:11:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* postcopy_discard_send_finish: Called at the end of each RAMBlock by the
|
|
|
|
* bitmap code. Sends any outstanding discard messages, frees the PDS
|
|
|
|
*
|
|
|
|
* @ms: Current migration state.
|
|
|
|
*/
|
2019-07-24 03:07:21 +02:00
|
|
|
void postcopy_discard_send_finish(MigrationState *ms)
|
2015-11-05 19:11:02 +01:00
|
|
|
{
|
|
|
|
/* Anything unsent? */
|
2019-07-24 03:07:21 +02:00
|
|
|
if (pds.cur_entry) {
|
2016-01-15 04:37:42 +01:00
|
|
|
qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
|
2019-07-24 03:07:21 +02:00
|
|
|
pds.ramblock_name,
|
|
|
|
pds.cur_entry,
|
|
|
|
pds.start_list,
|
|
|
|
pds.length_list);
|
|
|
|
pds.nsentcmds++;
|
2015-11-05 19:11:02 +01:00
|
|
|
}
|
|
|
|
|
2019-07-24 03:07:21 +02:00
|
|
|
trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords,
|
|
|
|
pds.nsentcmds);
|
2015-11-05 19:11:02 +01:00
|
|
|
}
|
2017-04-24 16:50:35 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Current state of incoming postcopy; note this is not part of
|
|
|
|
* MigrationIncomingState since it's state is used during cleanup
|
|
|
|
* at the end as MIS is being freed.
|
|
|
|
*/
|
|
|
|
static PostcopyState incoming_postcopy_state;
|
|
|
|
|
|
|
|
PostcopyState postcopy_state_get(void)
|
|
|
|
{
|
2023-03-03 11:15:28 +01:00
|
|
|
return qatomic_load_acquire(&incoming_postcopy_state);
|
2017-04-24 16:50:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the state and return the old state */
|
|
|
|
PostcopyState postcopy_state_set(PostcopyState new_state)
|
|
|
|
{
|
2020-09-23 12:56:46 +02:00
|
|
|
return qatomic_xchg(&incoming_postcopy_state, new_state);
|
2017-04-24 16:50:35 +02:00
|
|
|
}
|
2018-03-12 18:21:04 +01:00
|
|
|
|
|
|
|
/* Register a handler for external shared memory postcopy
|
|
|
|
* called on the destination.
|
|
|
|
*/
|
|
|
|
void postcopy_register_shared_ufd(struct PostCopyFD *pcfd)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
|
|
|
mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds,
|
|
|
|
*pcfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unregister a handler for external shared memory postcopy
|
|
|
|
*/
|
|
|
|
void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd)
|
|
|
|
{
|
|
|
|
guint i;
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
GArray *pcrfds = mis->postcopy_remote_fds;
|
|
|
|
|
2021-11-03 20:24:27 +01:00
|
|
|
if (!pcrfds) {
|
|
|
|
/* migration has already finished and freed the array */
|
|
|
|
return;
|
|
|
|
}
|
2018-03-12 18:21:04 +01:00
|
|
|
for (i = 0; i < pcrfds->len; i++) {
|
|
|
|
struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i);
|
|
|
|
if (cur->fd == pcfd->fd) {
|
|
|
|
mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-07-07 20:55:02 +02:00
|
|
|
|
2022-12-20 19:44:18 +01:00
|
|
|
void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file)
|
2022-07-07 20:55:02 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The new loading channel has its own threads, so it needs to be
|
|
|
|
* blocked too. It's by default true, just be explicit.
|
|
|
|
*/
|
|
|
|
qemu_file_set_blocking(file, true);
|
|
|
|
mis->postcopy_qemufile_dst = file;
|
2023-02-08 21:28:13 +01:00
|
|
|
qemu_sem_post(&mis->postcopy_qemufile_dst_done);
|
2022-07-07 20:55:02 +02:00
|
|
|
trace_postcopy_preempt_new_channel();
|
|
|
|
}
|
|
|
|
|
2022-07-07 20:55:18 +02:00
|
|
|
/*
|
|
|
|
* Setup the postcopy preempt channel with the IOC. If ERROR is specified,
|
|
|
|
* setup the error instead. This helper will free the ERROR if specified.
|
|
|
|
*/
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
static void
|
2022-07-07 20:55:18 +02:00
|
|
|
postcopy_preempt_send_channel_done(MigrationState *s,
|
|
|
|
QIOChannel *ioc, Error *local_err)
|
2022-07-07 20:55:02 +02:00
|
|
|
{
|
2022-07-07 20:55:18 +02:00
|
|
|
if (local_err) {
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
migrate_set_error(s, local_err);
|
|
|
|
error_free(local_err);
|
|
|
|
} else {
|
|
|
|
migration_ioc_register_yank(ioc);
|
|
|
|
s->postcopy_qemufile_src = qemu_file_new_output(ioc);
|
|
|
|
trace_postcopy_preempt_new_channel();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kick the waiter in all cases. The waiter should check upon
|
|
|
|
* postcopy_qemufile_src to know whether it failed or not.
|
|
|
|
*/
|
|
|
|
qemu_sem_post(&s->postcopy_qemufile_src_sem);
|
2022-07-07 20:55:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
postcopy_preempt_tls_handshake(QIOTask *task, gpointer opaque)
|
|
|
|
{
|
|
|
|
g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task));
|
|
|
|
MigrationState *s = opaque;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
qio_task_propagate_error(task, &local_err);
|
|
|
|
postcopy_preempt_send_channel_done(s, ioc, local_err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque)
|
|
|
|
{
|
|
|
|
g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task));
|
|
|
|
MigrationState *s = opaque;
|
|
|
|
QIOChannelTLS *tioc;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
if (qio_task_propagate_error(task, &local_err)) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (migrate_channel_requires_tls_upgrade(ioc)) {
|
|
|
|
tioc = migration_tls_client_create(s, ioc, s->hostname, &local_err);
|
|
|
|
if (!tioc) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
trace_postcopy_preempt_tls_handshake();
|
|
|
|
qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-preempt");
|
|
|
|
qio_channel_tls_handshake(tioc, postcopy_preempt_tls_handshake,
|
|
|
|
s, NULL, NULL);
|
|
|
|
/* Setup the channel until TLS handshake finished */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* This handles both good and error cases */
|
|
|
|
postcopy_preempt_send_channel_done(s, ioc, local_err);
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
}
|
2022-07-07 20:55:02 +02:00
|
|
|
|
2023-02-08 21:28:13 +01:00
|
|
|
/*
|
|
|
|
* This function will kick off an async task to establish the preempt
|
|
|
|
* channel, and wait until the connection setup completed. Returns 0 if
|
|
|
|
* channel established, -1 for error.
|
|
|
|
*/
|
|
|
|
int postcopy_preempt_establish_channel(MigrationState *s)
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
{
|
|
|
|
/* If preempt not enabled, no need to wait */
|
|
|
|
if (!migrate_postcopy_preempt()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-03-26 19:25:40 +02:00
|
|
|
/*
|
|
|
|
* Kick off async task to establish preempt channel. Only do so with
|
|
|
|
* 8.0+ machines, because 7.1/7.2 require the channel to be created in
|
|
|
|
* setup phase of migration (even if racy in an unreliable network).
|
|
|
|
*/
|
|
|
|
if (!s->preempt_pre_7_2) {
|
|
|
|
postcopy_preempt_setup(s);
|
|
|
|
}
|
2023-02-08 21:28:13 +01:00
|
|
|
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
/*
|
|
|
|
* We need the postcopy preempt channel to be established before
|
|
|
|
* starting doing anything.
|
|
|
|
*/
|
|
|
|
qemu_sem_wait(&s->postcopy_qemufile_src_sem);
|
|
|
|
|
|
|
|
return s->postcopy_qemufile_src ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
2023-02-08 21:28:11 +01:00
|
|
|
void postcopy_preempt_setup(MigrationState *s)
|
migration: Create the postcopy preempt channel asynchronously
This patch allows the postcopy preempt channel to be created
asynchronously. The benefit is that when the connection is slow, we won't
take the BQL (and potentially block all things like QMP) for a long time
without releasing.
A function postcopy_preempt_wait_channel() is introduced, allowing the
migration thread to be able to wait on the channel creation. The channel
is always created by the main thread, in which we'll kick a new semaphore
to tell the migration thread that the channel has created.
We'll need to wait for the new channel in two places: (1) when there's a
new postcopy migration that is starting, or (2) when there's a postcopy
migration to resume.
For the start of migration, we don't need to wait for this channel until
when we want to start postcopy, aka, postcopy_start(). We'll fail the
migration if we found that the channel creation failed (which should
probably not happen at all in 99% of the cases, because the main channel is
using the same network topology).
For a postcopy recovery, we'll need to wait in postcopy_pause(). In that
case if the channel creation failed, we can't fail the migration or we'll
crash the VM, instead we keep in PAUSED state, waiting for yet another
recovery.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Manish Mishra <manish.mishra@nutanix.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220707185509.27311-1-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
2022-07-07 20:55:09 +02:00
|
|
|
{
|
|
|
|
/* Kick an async task to connect */
|
|
|
|
socket_send_channel_create(postcopy_preempt_send_channel_new, s);
|
2022-07-07 20:55:02 +02:00
|
|
|
}
|
|
|
|
|
2022-07-07 20:55:06 +02:00
|
|
|
static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis)
|
|
|
|
{
|
|
|
|
trace_postcopy_pause_fast_load();
|
|
|
|
qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex);
|
|
|
|
qemu_sem_wait(&mis->postcopy_pause_sem_fast_load);
|
|
|
|
qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
|
|
|
|
trace_postcopy_pause_fast_load_continued();
|
|
|
|
}
|
|
|
|
|
2023-03-26 19:25:39 +02:00
|
|
|
static bool preempt_thread_should_run(MigrationIncomingState *mis)
|
|
|
|
{
|
|
|
|
return mis->preempt_thread_status != PREEMPT_THREAD_QUIT;
|
|
|
|
}
|
|
|
|
|
2022-07-07 20:55:02 +02:00
|
|
|
void *postcopy_preempt_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = opaque;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
trace_postcopy_preempt_thread_entry();
|
|
|
|
|
|
|
|
rcu_register_thread();
|
|
|
|
|
|
|
|
qemu_sem_post(&mis->thread_sync_sem);
|
|
|
|
|
2023-03-14 21:54:29 +01:00
|
|
|
/*
|
|
|
|
* The preempt channel is established in asynchronous way. Wait
|
|
|
|
* for its completion.
|
|
|
|
*/
|
|
|
|
qemu_sem_wait(&mis->postcopy_qemufile_dst_done);
|
|
|
|
|
2022-07-07 20:55:02 +02:00
|
|
|
/* Sending RAM_SAVE_FLAG_EOS to terminate this thread */
|
2022-07-07 20:55:06 +02:00
|
|
|
qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
|
2023-03-26 19:25:39 +02:00
|
|
|
while (preempt_thread_should_run(mis)) {
|
2022-07-07 20:55:06 +02:00
|
|
|
ret = ram_load_postcopy(mis->postcopy_qemufile_dst,
|
|
|
|
RAM_CHANNEL_POSTCOPY);
|
|
|
|
/* If error happened, go into recovery routine */
|
2023-03-26 19:25:39 +02:00
|
|
|
if (ret && preempt_thread_should_run(mis)) {
|
2022-07-07 20:55:06 +02:00
|
|
|
postcopy_pause_ram_fast_load(mis);
|
|
|
|
} else {
|
|
|
|
/* We're done */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex);
|
2022-07-07 20:55:02 +02:00
|
|
|
|
|
|
|
rcu_unregister_thread();
|
|
|
|
|
|
|
|
trace_postcopy_preempt_thread_exit();
|
|
|
|
|
2022-07-07 20:55:06 +02:00
|
|
|
return NULL;
|
2022-07-07 20:55:02 +02:00
|
|
|
}
|