migration/next for 20170504
-----BEGIN PGP SIGNATURE----- iQIcBAABCAAGBQJZCvXtAAoJEPSH7xhYctcjXM4P/igRf7yFkgp1cipE2u3xnGkF OBzuucG/WOAlyoWfOSOsyeb2sFC8KGTnBm4DRsJ6dlTaXxIXGe/BLfJnmsRThnJ+ NvQfNraI523gHsLpZm47XIkwpr96OQbmqHtFy/jRl1qSUTJPYe0o2HVnoS1zA5hY 0meTcivVeBes7TptCN0k638OAaC6yaXAn937JzTIic5oY1banR5o5aG61tsFaFhF OZHMU4CotJMDF2iZv5y0Q4Ui+0mpQfP6hJ/GxjwFKnLffXmcb3YlrjpQMzsfbBGl NyxTjD6DYMYSLvV6yVH9iYmXN0So2/VD3l6kkru8IeEtKHi8s4FQgDSElZbRtMy3 dMlqKFD436nmYw2wD1w3uUqidINMEJJ/LvC5fqGSfcME3N04DoiLJ3pc9QFVcW6z WnpgybdtOjJzmYtMiN65tHwZ/lYaBotAOP2GLOhE5YJlBY5+Vz4swy8krpppv8iP vGAwhakERW4Me4zajVAiNvO5TTxaIDAQEm+u5llWGijc/PgjTARWU2zoO6WUX9/y KaAbXETXBDfJ0cHvvtEmplAB4wdE+2VprznptutR9ewSdgkJRx295j7OlIQ4Gxyg ngFigR7+z5hKCbZrJ1ZGF+CM+hn6JgNWiMjJXvbbTZehV2LUe//RwvXab1VDLYOl uZriR9ZP9T/w3IjTYaPZ =FAsE -----END PGP SIGNATURE----- Merge remote-tracking branch 'quintela/tags/migration/20170504' into staging migration/next for 20170504 # gpg: Signature made Thu 04 May 2017 10:35:41 AM BST # gpg: using RSA key 0xF487EF185872D723 # gpg: Good signature from "Juan Quintela <quintela@redhat.com>" # gpg: aka "Juan Quintela <quintela@trasno.org>" # Primary key fingerprint: 1899 FF8E DEBF 58CC EE03 4B82 F487 EF18 5872 D723 * quintela/tags/migration/20170504: migration: Extra tracing migration: Move postcopy-ram.h to migration/ monitor: Move hmp_info_snapshots from savevm.c to hmp.c monitor: Move hmp_delvm from savevm.c to hmp.c monitor: Move hmp_savevm from savevm.c to hmp.c monitor: Move hmp_loadvm from monitor.c to hmp.c monitor: Remove monitor parameter from save_vmstate migration: to_dst_file at that point is NULL migration: setup bi-directional I/O channel for exec: protocol ram: Split dirty bitmap by RAMBlock Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
4aee86c60a
174
hmp.c
174
hmp.c
@ -19,6 +19,7 @@
|
||||
#include "net/eth.h"
|
||||
#include "sysemu/char.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/config-file.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/timer.h"
|
||||
@ -1268,6 +1269,179 @@ void hmp_snapshot_delete_blkdev_internal(Monitor *mon, const QDict *qdict)
|
||||
hmp_handle_error(mon, &err);
|
||||
}
|
||||
|
||||
void hmp_loadvm(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
int saved_vm_running = runstate_is_running();
|
||||
const char *name = qdict_get_str(qdict, "name");
|
||||
|
||||
vm_stop(RUN_STATE_RESTORE_VM);
|
||||
|
||||
if (load_vmstate(name) == 0 && saved_vm_running) {
|
||||
vm_start();
|
||||
}
|
||||
}
|
||||
|
||||
void hmp_savevm(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
save_vmstate(qdict_get_try_str(qdict, "name"));
|
||||
}
|
||||
|
||||
void hmp_delvm(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
Error *err;
|
||||
const char *name = qdict_get_str(qdict, "name");
|
||||
|
||||
if (bdrv_all_delete_snapshot(name, &bs, &err) < 0) {
|
||||
error_reportf_err(err,
|
||||
"Error while deleting snapshot on device '%s': ",
|
||||
bdrv_get_device_name(bs));
|
||||
}
|
||||
}
|
||||
|
||||
void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
BlockDriverState *bs, *bs1;
|
||||
BdrvNextIterator it1;
|
||||
QEMUSnapshotInfo *sn_tab, *sn;
|
||||
bool no_snapshot = true;
|
||||
int nb_sns, i;
|
||||
int total;
|
||||
int *global_snapshots;
|
||||
AioContext *aio_context;
|
||||
|
||||
typedef struct SnapshotEntry {
|
||||
QEMUSnapshotInfo sn;
|
||||
QTAILQ_ENTRY(SnapshotEntry) next;
|
||||
} SnapshotEntry;
|
||||
|
||||
typedef struct ImageEntry {
|
||||
const char *imagename;
|
||||
QTAILQ_ENTRY(ImageEntry) next;
|
||||
QTAILQ_HEAD(, SnapshotEntry) snapshots;
|
||||
} ImageEntry;
|
||||
|
||||
QTAILQ_HEAD(, ImageEntry) image_list =
|
||||
QTAILQ_HEAD_INITIALIZER(image_list);
|
||||
|
||||
ImageEntry *image_entry, *next_ie;
|
||||
SnapshotEntry *snapshot_entry;
|
||||
|
||||
bs = bdrv_all_find_vmstate_bs();
|
||||
if (!bs) {
|
||||
monitor_printf(mon, "No available block device supports snapshots\n");
|
||||
return;
|
||||
}
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
|
||||
aio_context_release(aio_context);
|
||||
|
||||
if (nb_sns < 0) {
|
||||
monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns);
|
||||
return;
|
||||
}
|
||||
|
||||
for (bs1 = bdrv_first(&it1); bs1; bs1 = bdrv_next(&it1)) {
|
||||
int bs1_nb_sns = 0;
|
||||
ImageEntry *ie;
|
||||
SnapshotEntry *se;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs1);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (bdrv_can_snapshot(bs1)) {
|
||||
sn = NULL;
|
||||
bs1_nb_sns = bdrv_snapshot_list(bs1, &sn);
|
||||
if (bs1_nb_sns > 0) {
|
||||
no_snapshot = false;
|
||||
ie = g_new0(ImageEntry, 1);
|
||||
ie->imagename = bdrv_get_device_name(bs1);
|
||||
QTAILQ_INIT(&ie->snapshots);
|
||||
QTAILQ_INSERT_TAIL(&image_list, ie, next);
|
||||
for (i = 0; i < bs1_nb_sns; i++) {
|
||||
se = g_new0(SnapshotEntry, 1);
|
||||
se->sn = sn[i];
|
||||
QTAILQ_INSERT_TAIL(&ie->snapshots, se, next);
|
||||
}
|
||||
}
|
||||
g_free(sn);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
if (no_snapshot) {
|
||||
monitor_printf(mon, "There is no snapshot available.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
global_snapshots = g_new0(int, nb_sns);
|
||||
total = 0;
|
||||
for (i = 0; i < nb_sns; i++) {
|
||||
SnapshotEntry *next_sn;
|
||||
if (bdrv_all_find_snapshot(sn_tab[i].name, &bs1) == 0) {
|
||||
global_snapshots[total] = i;
|
||||
total++;
|
||||
QTAILQ_FOREACH(image_entry, &image_list, next) {
|
||||
QTAILQ_FOREACH_SAFE(snapshot_entry, &image_entry->snapshots,
|
||||
next, next_sn) {
|
||||
if (!strcmp(sn_tab[i].name, snapshot_entry->sn.name)) {
|
||||
QTAILQ_REMOVE(&image_entry->snapshots, snapshot_entry,
|
||||
next);
|
||||
g_free(snapshot_entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
monitor_printf(mon, "List of snapshots present on all disks:\n");
|
||||
|
||||
if (total > 0) {
|
||||
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL);
|
||||
monitor_printf(mon, "\n");
|
||||
for (i = 0; i < total; i++) {
|
||||
sn = &sn_tab[global_snapshots[i]];
|
||||
/* The ID is not guaranteed to be the same on all images, so
|
||||
* overwrite it.
|
||||
*/
|
||||
pstrcpy(sn->id_str, sizeof(sn->id_str), "--");
|
||||
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, sn);
|
||||
monitor_printf(mon, "\n");
|
||||
}
|
||||
} else {
|
||||
monitor_printf(mon, "None\n");
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH(image_entry, &image_list, next) {
|
||||
if (QTAILQ_EMPTY(&image_entry->snapshots)) {
|
||||
continue;
|
||||
}
|
||||
monitor_printf(mon,
|
||||
"\nList of partial (non-loadable) snapshots on '%s':\n",
|
||||
image_entry->imagename);
|
||||
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL);
|
||||
monitor_printf(mon, "\n");
|
||||
QTAILQ_FOREACH(snapshot_entry, &image_entry->snapshots, next) {
|
||||
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon,
|
||||
&snapshot_entry->sn);
|
||||
monitor_printf(mon, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH_SAFE(image_entry, &image_list, next, next_ie) {
|
||||
SnapshotEntry *next_sn;
|
||||
QTAILQ_FOREACH_SAFE(snapshot_entry, &image_entry->snapshots, next,
|
||||
next_sn) {
|
||||
g_free(snapshot_entry);
|
||||
}
|
||||
g_free(image_entry);
|
||||
}
|
||||
g_free(sn_tab);
|
||||
g_free(global_snapshots);
|
||||
|
||||
}
|
||||
|
||||
void hmp_migrate_cancel(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
qmp_migrate_cancel(NULL);
|
||||
|
4
hmp.h
4
hmp.h
@ -63,6 +63,10 @@ void hmp_snapshot_blkdev_internal(Monitor *mon, const QDict *qdict);
|
||||
void hmp_snapshot_delete_blkdev_internal(Monitor *mon, const QDict *qdict);
|
||||
void hmp_drive_mirror(Monitor *mon, const QDict *qdict);
|
||||
void hmp_drive_backup(Monitor *mon, const QDict *qdict);
|
||||
void hmp_loadvm(Monitor *mon, const QDict *qdict);
|
||||
void hmp_savevm(Monitor *mon, const QDict *qdict);
|
||||
void hmp_delvm(Monitor *mon, const QDict *qdict);
|
||||
void hmp_info_snapshots(Monitor *mon, const QDict *qdict);
|
||||
void hmp_migrate_cancel(Monitor *mon, const QDict *qdict);
|
||||
void hmp_migrate_incoming(Monitor *mon, const QDict *qdict);
|
||||
void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict);
|
||||
|
@ -39,6 +39,14 @@ struct RAMBlock {
|
||||
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
|
||||
int fd;
|
||||
size_t page_size;
|
||||
/* dirty bitmap used during migration */
|
||||
unsigned long *bmap;
|
||||
/* bitmap of pages that haven't been sent even once
|
||||
* only maintained and used in postcopy at the moment
|
||||
* where it's used to send the dirtymap at the start
|
||||
* of the postcopy phase
|
||||
*/
|
||||
unsigned long *unsentmap;
|
||||
};
|
||||
|
||||
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
|
||||
@ -360,16 +368,15 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
|
||||
|
||||
|
||||
static inline
|
||||
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
|
||||
RAMBlock *rb,
|
||||
uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
|
||||
ram_addr_t start,
|
||||
ram_addr_t length,
|
||||
uint64_t *real_dirty_pages)
|
||||
{
|
||||
ram_addr_t addr;
|
||||
start = rb->offset + start;
|
||||
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
||||
uint64_t num_dirty = 0;
|
||||
unsigned long *dest = rb->bmap;
|
||||
|
||||
/* start address is aligned at the start of a word? */
|
||||
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
|
||||
|
@ -266,7 +266,8 @@ uint64_t xbzrle_mig_pages_cache_miss(void);
|
||||
double xbzrle_mig_cache_miss_rate(void);
|
||||
|
||||
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
|
||||
void ram_debug_dump_bitmap(unsigned long *todump, bool expected);
|
||||
void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
|
||||
unsigned long pages);
|
||||
/* For outgoing discard bitmap */
|
||||
int ram_postcopy_send_discard_bitmap(MigrationState *ms);
|
||||
/* For incoming postcopy discard */
|
||||
|
@ -75,11 +75,8 @@ void qemu_remove_exit_notifier(Notifier *notify);
|
||||
void qemu_add_machine_init_done_notifier(Notifier *notify);
|
||||
void qemu_remove_machine_init_done_notifier(Notifier *notify);
|
||||
|
||||
void hmp_savevm(Monitor *mon, const QDict *qdict);
|
||||
int save_vmstate(Monitor *mon, const char *name);
|
||||
int save_vmstate(const char *name);
|
||||
int load_vmstate(const char *name);
|
||||
void hmp_delvm(Monitor *mon, const QDict *qdict);
|
||||
void hmp_info_snapshots(Monitor *mon, const QDict *qdict);
|
||||
|
||||
void qemu_announce_self(void);
|
||||
|
||||
|
@ -32,7 +32,7 @@ void exec_start_outgoing_migration(MigrationState *s, const char *command, Error
|
||||
|
||||
trace_migration_exec_outgoing(command);
|
||||
ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv,
|
||||
O_WRONLY,
|
||||
O_RDWR,
|
||||
errp));
|
||||
if (!ioc) {
|
||||
return;
|
||||
@ -59,7 +59,7 @@ void exec_start_incoming_migration(const char *command, Error **errp)
|
||||
|
||||
trace_migration_exec_incoming(command);
|
||||
ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv,
|
||||
O_RDONLY,
|
||||
O_RDWR,
|
||||
errp));
|
||||
if (!ioc) {
|
||||
return;
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "qemu/sockets.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "migration/block.h"
|
||||
#include "migration/postcopy-ram.h"
|
||||
#include "postcopy-ram.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qmp-commands.h"
|
||||
#include "trace.h"
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "migration/migration.h"
|
||||
#include "migration/postcopy-ram.h"
|
||||
#include "postcopy-ram.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/balloon.h"
|
||||
#include "qemu/error-report.h"
|
||||
@ -33,7 +33,6 @@
|
||||
|
||||
struct PostcopyDiscardState {
|
||||
const char *ramblock_name;
|
||||
uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */
|
||||
uint16_t cur_entry;
|
||||
/*
|
||||
* Start and length of a discard range (bytes)
|
||||
@ -717,14 +716,12 @@ void *postcopy_get_tmp_page(MigrationIncomingState *mis)
|
||||
* returns: a new PDS.
|
||||
*/
|
||||
PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
|
||||
unsigned long offset,
|
||||
const char *name)
|
||||
{
|
||||
PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
|
||||
|
||||
if (res) {
|
||||
res->ramblock_name = name;
|
||||
res->offset = offset;
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -745,7 +742,7 @@ void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
|
||||
{
|
||||
size_t tp_size = qemu_target_page_size();
|
||||
/* Convert to byte offsets within the RAM block */
|
||||
pds->start_list[pds->cur_entry] = (start - pds->offset) * tp_size;
|
||||
pds->start_list[pds->cur_entry] = start * tp_size;
|
||||
pds->length_list[pds->cur_entry] = length * tp_size;
|
||||
trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
|
||||
pds->cur_entry++;
|
||||
|
@ -43,12 +43,9 @@ int postcopy_ram_prepare_discard(MigrationIncomingState *mis);
|
||||
|
||||
/*
|
||||
* Called at the start of each RAMBlock by the bitmap code.
|
||||
* 'offset' is the bitmap offset of the named RAMBlock in the migration
|
||||
* bitmap.
|
||||
* Returns a new PDS
|
||||
*/
|
||||
PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
|
||||
unsigned long offset,
|
||||
const char *name);
|
||||
|
||||
/*
|
271
migration/ram.c
271
migration/ram.c
@ -36,7 +36,7 @@
|
||||
#include "qemu/timer.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "migration/migration.h"
|
||||
#include "migration/postcopy-ram.h"
|
||||
#include "postcopy-ram.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "migration/page_cache.h"
|
||||
#include "qemu/error-report.h"
|
||||
@ -138,19 +138,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct RAMBitmap {
|
||||
struct rcu_head rcu;
|
||||
/* Main migration bitmap */
|
||||
unsigned long *bmap;
|
||||
/* bitmap of pages that haven't been sent even once
|
||||
* only maintained and used in postcopy at the moment
|
||||
* where it's used to send the dirtymap at the start
|
||||
* of the postcopy phase
|
||||
*/
|
||||
unsigned long *unsentmap;
|
||||
};
|
||||
typedef struct RAMBitmap RAMBitmap;
|
||||
|
||||
/*
|
||||
* An outstanding page request, on the source, having been received
|
||||
* and queued
|
||||
@ -220,8 +207,6 @@ struct RAMState {
|
||||
uint64_t postcopy_requests;
|
||||
/* protects modification of the bitmap */
|
||||
QemuMutex bitmap_mutex;
|
||||
/* Ram Bitmap protected by RCU */
|
||||
RAMBitmap *ram_bitmap;
|
||||
/* The RAMBlock used in the last src_page_requests */
|
||||
RAMBlock *last_req_rb;
|
||||
/* Queue of outstanding page requests from the destination */
|
||||
@ -614,22 +599,17 @@ static inline
|
||||
unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
||||
unsigned long start)
|
||||
{
|
||||
unsigned long base = rb->offset >> TARGET_PAGE_BITS;
|
||||
unsigned long nr = base + start;
|
||||
uint64_t rb_size = rb->used_length;
|
||||
unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
|
||||
unsigned long *bitmap;
|
||||
|
||||
unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
|
||||
unsigned long *bitmap = rb->bmap;
|
||||
unsigned long next;
|
||||
|
||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||
if (rs->ram_bulk_stage && nr > base) {
|
||||
next = nr + 1;
|
||||
if (rs->ram_bulk_stage && start > 0) {
|
||||
next = start + 1;
|
||||
} else {
|
||||
next = find_next_bit(bitmap, size, nr);
|
||||
next = find_next_bit(bitmap, size, start);
|
||||
}
|
||||
|
||||
return next - base;
|
||||
return next;
|
||||
}
|
||||
|
||||
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
||||
@ -637,10 +617,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
||||
unsigned long page)
|
||||
{
|
||||
bool ret;
|
||||
unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||
unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page;
|
||||
|
||||
ret = test_and_clear_bit(nr, bitmap);
|
||||
ret = test_and_clear_bit(page, rb->bmap);
|
||||
|
||||
if (ret) {
|
||||
rs->migration_dirty_pages--;
|
||||
@ -651,10 +629,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
||||
static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
|
||||
ram_addr_t start, ram_addr_t length)
|
||||
{
|
||||
unsigned long *bitmap;
|
||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||
rs->migration_dirty_pages +=
|
||||
cpu_physical_memory_sync_dirty_bitmap(bitmap, rb, start, length,
|
||||
cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
|
||||
&rs->num_dirty_pages_period);
|
||||
}
|
||||
|
||||
@ -812,6 +788,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
|
||||
ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
|
||||
|
||||
p = block->host + offset;
|
||||
trace_ram_save_page(block->idstr, (uint64_t)offset, p);
|
||||
|
||||
/* In doubt sent page as normal */
|
||||
bytes_xmit = 0;
|
||||
@ -1153,17 +1130,13 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
|
||||
* search already sent it.
|
||||
*/
|
||||
if (block) {
|
||||
unsigned long *bitmap;
|
||||
unsigned long page;
|
||||
|
||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||
page = (block->offset + offset) >> TARGET_PAGE_BITS;
|
||||
dirty = test_bit(page, bitmap);
|
||||
page = offset >> TARGET_PAGE_BITS;
|
||||
dirty = test_bit(page, block->bmap);
|
||||
if (!dirty) {
|
||||
trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
|
||||
page,
|
||||
test_bit(page,
|
||||
atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
|
||||
page, test_bit(page, block->unsentmap));
|
||||
} else {
|
||||
trace_get_queued_page(block->idstr, (uint64_t)offset, page);
|
||||
}
|
||||
@ -1301,16 +1274,13 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
||||
|
||||
/* Check the pages is dirty and if it is send it */
|
||||
if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
||||
unsigned long *unsentmap;
|
||||
/*
|
||||
* If xbzrle is on, stop using the data compression after first
|
||||
* round of migration even if compression is enabled. In theory,
|
||||
* xbzrle can do better than compression.
|
||||
*/
|
||||
unsigned long page =
|
||||
(pss->block->offset >> TARGET_PAGE_BITS) + pss->page;
|
||||
if (migrate_use_compression()
|
||||
&& (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
|
||||
if (migrate_use_compression() &&
|
||||
(rs->ram_bulk_stage || !migrate_use_xbzrle())) {
|
||||
res = ram_save_compressed_page(rs, pss, last_stage);
|
||||
} else {
|
||||
res = ram_save_page(rs, pss, last_stage);
|
||||
@ -1319,9 +1289,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
|
||||
if (res < 0) {
|
||||
return res;
|
||||
}
|
||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
||||
if (unsentmap) {
|
||||
clear_bit(page, unsentmap);
|
||||
if (pss->block->unsentmap) {
|
||||
clear_bit(pss->page, pss->block->unsentmap);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1451,25 +1420,20 @@ void free_xbzrle_decoded_buf(void)
|
||||
xbzrle_decoded_buf = NULL;
|
||||
}
|
||||
|
||||
static void migration_bitmap_free(RAMBitmap *bmap)
|
||||
{
|
||||
g_free(bmap->bmap);
|
||||
g_free(bmap->unsentmap);
|
||||
g_free(bmap);
|
||||
}
|
||||
|
||||
static void ram_migration_cleanup(void *opaque)
|
||||
{
|
||||
RAMState *rs = opaque;
|
||||
RAMBlock *block;
|
||||
|
||||
/* caller have hold iothread lock or is in a bh, so there is
|
||||
* no writing race against this migration_bitmap
|
||||
*/
|
||||
RAMBitmap *bitmap = rs->ram_bitmap;
|
||||
atomic_rcu_set(&rs->ram_bitmap, NULL);
|
||||
if (bitmap) {
|
||||
memory_global_dirty_log_stop();
|
||||
call_rcu(bitmap, migration_bitmap_free, rcu);
|
||||
memory_global_dirty_log_stop();
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
g_free(block->bmap);
|
||||
block->bmap = NULL;
|
||||
g_free(block->unsentmap);
|
||||
block->unsentmap = NULL;
|
||||
}
|
||||
|
||||
XBZRLE_cache_lock();
|
||||
@ -1501,27 +1465,22 @@ static void ram_state_reset(RAMState *rs)
|
||||
* of; it won't bother printing lines that are all this value.
|
||||
* If 'todump' is null the migration bitmap is dumped.
|
||||
*/
|
||||
void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
|
||||
void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
|
||||
unsigned long pages)
|
||||
{
|
||||
unsigned long ram_pages = last_ram_page();
|
||||
RAMState *rs = &ram_state;
|
||||
int64_t cur;
|
||||
int64_t linelen = 128;
|
||||
char linebuf[129];
|
||||
|
||||
if (!todump) {
|
||||
todump = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||
}
|
||||
|
||||
for (cur = 0; cur < ram_pages; cur += linelen) {
|
||||
for (cur = 0; cur < pages; cur += linelen) {
|
||||
int64_t curb;
|
||||
bool found = false;
|
||||
/*
|
||||
* Last line; catch the case where the line length
|
||||
* is longer than remaining ram
|
||||
*/
|
||||
if (cur + linelen > ram_pages) {
|
||||
linelen = ram_pages - cur;
|
||||
if (cur + linelen > pages) {
|
||||
linelen = pages - cur;
|
||||
}
|
||||
for (curb = 0; curb < linelen; curb++) {
|
||||
bool thisbit = test_bit(cur + curb, todump);
|
||||
@ -1539,14 +1498,12 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
|
||||
|
||||
void ram_postcopy_migrated_memory_release(MigrationState *ms)
|
||||
{
|
||||
RAMState *rs = &ram_state;
|
||||
struct RAMBlock *block;
|
||||
unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
unsigned long first = block->offset >> TARGET_PAGE_BITS;
|
||||
unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
|
||||
unsigned long run_start = find_next_zero_bit(bitmap, range, first);
|
||||
unsigned long *bitmap = block->bmap;
|
||||
unsigned long range = block->used_length >> TARGET_PAGE_BITS;
|
||||
unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
|
||||
|
||||
while (run_start < range) {
|
||||
unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
|
||||
@ -1573,16 +1530,13 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
|
||||
*/
|
||||
static int postcopy_send_discard_bm_ram(MigrationState *ms,
|
||||
PostcopyDiscardState *pds,
|
||||
unsigned long start,
|
||||
unsigned long length)
|
||||
RAMBlock *block)
|
||||
{
|
||||
RAMState *rs = &ram_state;
|
||||
unsigned long end = start + length; /* one after the end */
|
||||
unsigned long end = block->used_length >> TARGET_PAGE_BITS;
|
||||
unsigned long current;
|
||||
unsigned long *unsentmap;
|
||||
unsigned long *unsentmap = block->unsentmap;
|
||||
|
||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
||||
for (current = start; current < end; ) {
|
||||
for (current = 0; current < end; ) {
|
||||
unsigned long one = find_next_bit(unsentmap, end, current);
|
||||
|
||||
if (one <= end) {
|
||||
@ -1625,18 +1579,15 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
|
||||
int ret;
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
unsigned long first = block->offset >> TARGET_PAGE_BITS;
|
||||
PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
|
||||
first,
|
||||
block->idstr);
|
||||
PostcopyDiscardState *pds =
|
||||
postcopy_discard_send_init(ms, block->idstr);
|
||||
|
||||
/*
|
||||
* Postcopy sends chunks of bitmap over the wire, but it
|
||||
* just needs indexes at this point, avoids it having
|
||||
* target page specific code.
|
||||
*/
|
||||
ret = postcopy_send_discard_bm_ram(ms, pds, first,
|
||||
block->used_length >> TARGET_PAGE_BITS);
|
||||
ret = postcopy_send_discard_bm_ram(ms, pds, block);
|
||||
postcopy_discard_send_finish(ms, pds);
|
||||
if (ret) {
|
||||
return ret;
|
||||
@ -1667,12 +1618,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
||||
PostcopyDiscardState *pds)
|
||||
{
|
||||
RAMState *rs = &ram_state;
|
||||
unsigned long *bitmap;
|
||||
unsigned long *unsentmap;
|
||||
unsigned long *bitmap = block->bmap;
|
||||
unsigned long *unsentmap = block->unsentmap;
|
||||
unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
|
||||
unsigned long first = block->offset >> TARGET_PAGE_BITS;
|
||||
unsigned long len = block->used_length >> TARGET_PAGE_BITS;
|
||||
unsigned long last = first + (len - 1);
|
||||
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
|
||||
unsigned long run_start;
|
||||
|
||||
if (block->page_size == TARGET_PAGE_SIZE) {
|
||||
@ -1680,18 +1629,15 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
||||
return;
|
||||
}
|
||||
|
||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
||||
|
||||
if (unsent_pass) {
|
||||
/* Find a sent page */
|
||||
run_start = find_next_zero_bit(unsentmap, last + 1, first);
|
||||
run_start = find_next_zero_bit(unsentmap, pages, 0);
|
||||
} else {
|
||||
/* Find a dirty page */
|
||||
run_start = find_next_bit(bitmap, last + 1, first);
|
||||
run_start = find_next_bit(bitmap, pages, 0);
|
||||
}
|
||||
|
||||
while (run_start <= last) {
|
||||
while (run_start < pages) {
|
||||
bool do_fixup = false;
|
||||
unsigned long fixup_start_addr;
|
||||
unsigned long host_offset;
|
||||
@ -1711,9 +1657,9 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
||||
/* Find the end of this run */
|
||||
unsigned long run_end;
|
||||
if (unsent_pass) {
|
||||
run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
|
||||
run_end = find_next_bit(unsentmap, pages, run_start + 1);
|
||||
} else {
|
||||
run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
|
||||
run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
|
||||
}
|
||||
/*
|
||||
* If the end isn't at the start of a host page, then the
|
||||
@ -1770,11 +1716,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
||||
|
||||
if (unsent_pass) {
|
||||
/* Find the next sent page for the next iteration */
|
||||
run_start = find_next_zero_bit(unsentmap, last + 1,
|
||||
run_start);
|
||||
run_start = find_next_zero_bit(unsentmap, pages, run_start);
|
||||
} else {
|
||||
/* Find the next dirty page for the next iteration */
|
||||
run_start = find_next_bit(bitmap, last + 1, run_start);
|
||||
run_start = find_next_bit(bitmap, pages, run_start);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1791,34 +1736,22 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
|
||||
* Returns zero on success
|
||||
*
|
||||
* @ms: current migration state
|
||||
* @block: block we want to work with
|
||||
*/
|
||||
static int postcopy_chunk_hostpages(MigrationState *ms)
|
||||
static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
|
||||
{
|
||||
RAMState *rs = &ram_state;
|
||||
struct RAMBlock *block;
|
||||
PostcopyDiscardState *pds =
|
||||
postcopy_discard_send_init(ms, block->idstr);
|
||||
|
||||
/* Easiest way to make sure we don't resume in the middle of a host-page */
|
||||
rs->last_seen_block = NULL;
|
||||
rs->last_sent_block = NULL;
|
||||
rs->last_page = 0;
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
unsigned long first = block->offset >> TARGET_PAGE_BITS;
|
||||
|
||||
PostcopyDiscardState *pds =
|
||||
postcopy_discard_send_init(ms, first, block->idstr);
|
||||
|
||||
/* First pass: Discard all partially sent host pages */
|
||||
postcopy_chunk_hostpages_pass(ms, true, block, pds);
|
||||
/*
|
||||
* Second pass: Ensure that all partially dirty host pages are made
|
||||
* fully dirty.
|
||||
*/
|
||||
postcopy_chunk_hostpages_pass(ms, false, block, pds);
|
||||
|
||||
postcopy_discard_send_finish(ms, pds);
|
||||
} /* ram_list loop */
|
||||
/* First pass: Discard all partially sent host pages */
|
||||
postcopy_chunk_hostpages_pass(ms, true, block, pds);
|
||||
/*
|
||||
* Second pass: Ensure that all partially dirty host pages are made
|
||||
* fully dirty.
|
||||
*/
|
||||
postcopy_chunk_hostpages_pass(ms, false, block, pds);
|
||||
|
||||
postcopy_discard_send_finish(ms, pds);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1840,43 +1773,49 @@ static int postcopy_chunk_hostpages(MigrationState *ms)
|
||||
int ram_postcopy_send_discard_bitmap(MigrationState *ms)
|
||||
{
|
||||
RAMState *rs = &ram_state;
|
||||
RAMBlock *block;
|
||||
int ret;
|
||||
unsigned long *bitmap, *unsentmap;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* This should be our last sync, the src is now paused */
|
||||
migration_bitmap_sync(rs);
|
||||
|
||||
unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
|
||||
if (!unsentmap) {
|
||||
/* We don't have a safe way to resize the sentmap, so
|
||||
* if the bitmap was resized it will be NULL at this
|
||||
* point.
|
||||
/* Easiest way to make sure we don't resume in the middle of a host-page */
|
||||
rs->last_seen_block = NULL;
|
||||
rs->last_sent_block = NULL;
|
||||
rs->last_page = 0;
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
|
||||
unsigned long *bitmap = block->bmap;
|
||||
unsigned long *unsentmap = block->unsentmap;
|
||||
|
||||
if (!unsentmap) {
|
||||
/* We don't have a safe way to resize the sentmap, so
|
||||
* if the bitmap was resized it will be NULL at this
|
||||
* point.
|
||||
*/
|
||||
error_report("migration ram resized during precopy phase");
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Deal with TPS != HPS and huge pages */
|
||||
ret = postcopy_chunk_hostpages(ms, block);
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the unsentmap to be unsentmap = unsentmap | dirty
|
||||
*/
|
||||
error_report("migration ram resized during precopy phase");
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Deal with TPS != HPS and huge pages */
|
||||
ret = postcopy_chunk_hostpages(ms);
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the unsentmap to be unsentmap = unsentmap | dirty
|
||||
*/
|
||||
bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
|
||||
bitmap_or(unsentmap, unsentmap, bitmap, last_ram_page());
|
||||
|
||||
|
||||
trace_ram_postcopy_send_discard_bitmap();
|
||||
bitmap_or(unsentmap, unsentmap, bitmap, pages);
|
||||
#ifdef DEBUG_POSTCOPY
|
||||
ram_debug_dump_bitmap(unsentmap, true);
|
||||
ram_debug_dump_bitmap(unsentmap, true, pages);
|
||||
#endif
|
||||
}
|
||||
trace_ram_postcopy_send_discard_bitmap();
|
||||
|
||||
ret = postcopy_each_ram_send_discard(ms);
|
||||
rcu_read_unlock();
|
||||
@ -1918,8 +1857,6 @@ err:
|
||||
|
||||
static int ram_state_init(RAMState *rs)
|
||||
{
|
||||
unsigned long ram_bitmap_pages;
|
||||
|
||||
memset(rs, 0, sizeof(*rs));
|
||||
qemu_mutex_init(&rs->bitmap_mutex);
|
||||
qemu_mutex_init(&rs->src_page_req_mutex);
|
||||
@ -1961,16 +1898,19 @@ static int ram_state_init(RAMState *rs)
|
||||
rcu_read_lock();
|
||||
ram_state_reset(rs);
|
||||
|
||||
rs->ram_bitmap = g_new0(RAMBitmap, 1);
|
||||
/* Skip setting bitmap if there is no RAM */
|
||||
if (ram_bytes_total()) {
|
||||
ram_bitmap_pages = last_ram_page();
|
||||
rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages);
|
||||
bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages);
|
||||
RAMBlock *block;
|
||||
|
||||
if (migrate_postcopy_ram()) {
|
||||
rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages);
|
||||
bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages);
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
|
||||
|
||||
block->bmap = bitmap_new(pages);
|
||||
bitmap_set(block->bmap, 0, pages);
|
||||
if (migrate_postcopy_ram()) {
|
||||
block->unsentmap = bitmap_new(pages);
|
||||
bitmap_set(block->unsentmap, 0, pages);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2611,6 +2551,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
|
||||
}
|
||||
|
||||
switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
|
||||
|
@ -33,15 +33,12 @@
|
||||
#include "hw/qdev.h"
|
||||
#include "hw/xen/xen.h"
|
||||
#include "net/net.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "audio/audio.h"
|
||||
#include "migration/migration.h"
|
||||
#include "migration/postcopy-ram.h"
|
||||
#include "postcopy-ram.h"
|
||||
#include "qapi/qmp/qerror.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/sockets.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "exec/memory.h"
|
||||
@ -50,7 +47,6 @@
|
||||
#include "qemu/bitops.h"
|
||||
#include "qemu/iov.h"
|
||||
#include "block/snapshot.h"
|
||||
#include "block/qapi.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "io/channel-buffer.h"
|
||||
#include "io/channel-file.h"
|
||||
@ -2078,7 +2074,7 @@ int qemu_loadvm_state(QEMUFile *f)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int save_vmstate(Monitor *mon, const char *name)
|
||||
int save_vmstate(const char *name)
|
||||
{
|
||||
BlockDriverState *bs, *bs1;
|
||||
QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1;
|
||||
@ -2092,8 +2088,8 @@ int save_vmstate(Monitor *mon, const char *name)
|
||||
AioContext *aio_context;
|
||||
|
||||
if (!bdrv_all_can_snapshot(&bs)) {
|
||||
monitor_printf(mon, "Device '%s' is writable but does not "
|
||||
"support snapshots.\n", bdrv_get_device_name(bs));
|
||||
error_report("Device '%s' is writable but does not support snapshots",
|
||||
bdrv_get_device_name(bs));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2110,7 +2106,7 @@ int save_vmstate(Monitor *mon, const char *name)
|
||||
|
||||
bs = bdrv_all_find_vmstate_bs();
|
||||
if (bs == NULL) {
|
||||
monitor_printf(mon, "No block device can accept snapshots\n");
|
||||
error_report("No block device can accept snapshots");
|
||||
return ret;
|
||||
}
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
@ -2119,7 +2115,7 @@ int save_vmstate(Monitor *mon, const char *name)
|
||||
|
||||
ret = global_state_store();
|
||||
if (ret) {
|
||||
monitor_printf(mon, "Error saving global state\n");
|
||||
error_report("Error saving global state");
|
||||
return ret;
|
||||
}
|
||||
vm_stop(RUN_STATE_SAVE_VM);
|
||||
@ -2151,7 +2147,7 @@ int save_vmstate(Monitor *mon, const char *name)
|
||||
/* save the VM state */
|
||||
f = qemu_fopen_bdrv(bs, 1);
|
||||
if (!f) {
|
||||
monitor_printf(mon, "Could not open VM state file\n");
|
||||
error_report("Could not open VM state file");
|
||||
goto the_end;
|
||||
}
|
||||
ret = qemu_savevm_state(f, &local_err);
|
||||
@ -2164,8 +2160,8 @@ int save_vmstate(Monitor *mon, const char *name)
|
||||
|
||||
ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, &bs);
|
||||
if (ret < 0) {
|
||||
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
|
||||
bdrv_get_device_name(bs));
|
||||
error_report("Error while creating snapshot on '%s'",
|
||||
bdrv_get_device_name(bs));
|
||||
goto the_end;
|
||||
}
|
||||
|
||||
@ -2179,11 +2175,6 @@ int save_vmstate(Monitor *mon, const char *name)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hmp_savevm(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
save_vmstate(mon, qdict_get_try_str(qdict, "name"));
|
||||
}
|
||||
|
||||
void qmp_xen_save_devices_state(const char *filename, Error **errp)
|
||||
{
|
||||
QEMUFile *f;
|
||||
@ -2253,7 +2244,7 @@ int load_vmstate(const char *name)
|
||||
MigrationIncomingState *mis = migration_incoming_get_current();
|
||||
|
||||
if (!bdrv_all_can_snapshot(&bs)) {
|
||||
error_report("Device '%s' is writable but does not support snapshots.",
|
||||
error_report("Device '%s' is writable but does not support snapshots",
|
||||
bdrv_get_device_name(bs));
|
||||
return -ENOTSUP;
|
||||
}
|
||||
@ -2317,162 +2308,6 @@ int load_vmstate(const char *name)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hmp_delvm(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
Error *err;
|
||||
const char *name = qdict_get_str(qdict, "name");
|
||||
|
||||
if (bdrv_all_delete_snapshot(name, &bs, &err) < 0) {
|
||||
error_reportf_err(err,
|
||||
"Error while deleting snapshot on device '%s': ",
|
||||
bdrv_get_device_name(bs));
|
||||
}
|
||||
}
|
||||
|
||||
void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
BlockDriverState *bs, *bs1;
|
||||
BdrvNextIterator it1;
|
||||
QEMUSnapshotInfo *sn_tab, *sn;
|
||||
bool no_snapshot = true;
|
||||
int nb_sns, i;
|
||||
int total;
|
||||
int *global_snapshots;
|
||||
AioContext *aio_context;
|
||||
|
||||
typedef struct SnapshotEntry {
|
||||
QEMUSnapshotInfo sn;
|
||||
QTAILQ_ENTRY(SnapshotEntry) next;
|
||||
} SnapshotEntry;
|
||||
|
||||
typedef struct ImageEntry {
|
||||
const char *imagename;
|
||||
QTAILQ_ENTRY(ImageEntry) next;
|
||||
QTAILQ_HEAD(, SnapshotEntry) snapshots;
|
||||
} ImageEntry;
|
||||
|
||||
QTAILQ_HEAD(, ImageEntry) image_list =
|
||||
QTAILQ_HEAD_INITIALIZER(image_list);
|
||||
|
||||
ImageEntry *image_entry, *next_ie;
|
||||
SnapshotEntry *snapshot_entry;
|
||||
|
||||
bs = bdrv_all_find_vmstate_bs();
|
||||
if (!bs) {
|
||||
monitor_printf(mon, "No available block device supports snapshots\n");
|
||||
return;
|
||||
}
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
aio_context_acquire(aio_context);
|
||||
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
|
||||
aio_context_release(aio_context);
|
||||
|
||||
if (nb_sns < 0) {
|
||||
monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns);
|
||||
return;
|
||||
}
|
||||
|
||||
for (bs1 = bdrv_first(&it1); bs1; bs1 = bdrv_next(&it1)) {
|
||||
int bs1_nb_sns = 0;
|
||||
ImageEntry *ie;
|
||||
SnapshotEntry *se;
|
||||
AioContext *ctx = bdrv_get_aio_context(bs1);
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
if (bdrv_can_snapshot(bs1)) {
|
||||
sn = NULL;
|
||||
bs1_nb_sns = bdrv_snapshot_list(bs1, &sn);
|
||||
if (bs1_nb_sns > 0) {
|
||||
no_snapshot = false;
|
||||
ie = g_new0(ImageEntry, 1);
|
||||
ie->imagename = bdrv_get_device_name(bs1);
|
||||
QTAILQ_INIT(&ie->snapshots);
|
||||
QTAILQ_INSERT_TAIL(&image_list, ie, next);
|
||||
for (i = 0; i < bs1_nb_sns; i++) {
|
||||
se = g_new0(SnapshotEntry, 1);
|
||||
se->sn = sn[i];
|
||||
QTAILQ_INSERT_TAIL(&ie->snapshots, se, next);
|
||||
}
|
||||
}
|
||||
g_free(sn);
|
||||
}
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
if (no_snapshot) {
|
||||
monitor_printf(mon, "There is no snapshot available.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
global_snapshots = g_new0(int, nb_sns);
|
||||
total = 0;
|
||||
for (i = 0; i < nb_sns; i++) {
|
||||
SnapshotEntry *next_sn;
|
||||
if (bdrv_all_find_snapshot(sn_tab[i].name, &bs1) == 0) {
|
||||
global_snapshots[total] = i;
|
||||
total++;
|
||||
QTAILQ_FOREACH(image_entry, &image_list, next) {
|
||||
QTAILQ_FOREACH_SAFE(snapshot_entry, &image_entry->snapshots,
|
||||
next, next_sn) {
|
||||
if (!strcmp(sn_tab[i].name, snapshot_entry->sn.name)) {
|
||||
QTAILQ_REMOVE(&image_entry->snapshots, snapshot_entry,
|
||||
next);
|
||||
g_free(snapshot_entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
monitor_printf(mon, "List of snapshots present on all disks:\n");
|
||||
|
||||
if (total > 0) {
|
||||
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL);
|
||||
monitor_printf(mon, "\n");
|
||||
for (i = 0; i < total; i++) {
|
||||
sn = &sn_tab[global_snapshots[i]];
|
||||
/* The ID is not guaranteed to be the same on all images, so
|
||||
* overwrite it.
|
||||
*/
|
||||
pstrcpy(sn->id_str, sizeof(sn->id_str), "--");
|
||||
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, sn);
|
||||
monitor_printf(mon, "\n");
|
||||
}
|
||||
} else {
|
||||
monitor_printf(mon, "None\n");
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH(image_entry, &image_list, next) {
|
||||
if (QTAILQ_EMPTY(&image_entry->snapshots)) {
|
||||
continue;
|
||||
}
|
||||
monitor_printf(mon,
|
||||
"\nList of partial (non-loadable) snapshots on '%s':\n",
|
||||
image_entry->imagename);
|
||||
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL);
|
||||
monitor_printf(mon, "\n");
|
||||
QTAILQ_FOREACH(snapshot_entry, &image_entry->snapshots, next) {
|
||||
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon,
|
||||
&snapshot_entry->sn);
|
||||
monitor_printf(mon, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH_SAFE(image_entry, &image_list, next, next_ie) {
|
||||
SnapshotEntry *next_sn;
|
||||
QTAILQ_FOREACH_SAFE(snapshot_entry, &image_entry->snapshots, next,
|
||||
next_sn) {
|
||||
g_free(snapshot_entry);
|
||||
}
|
||||
g_free(image_entry);
|
||||
}
|
||||
g_free(sn_tab);
|
||||
g_free(global_snapshots);
|
||||
|
||||
}
|
||||
|
||||
void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev)
|
||||
{
|
||||
qemu_ram_set_idstr(mr->ram_block,
|
||||
|
@ -79,7 +79,6 @@ static void socket_outgoing_migration(QIOTask *task,
|
||||
|
||||
if (qio_task_propagate_error(task, &err)) {
|
||||
trace_migration_socket_outgoing_error(error_get_pretty(err));
|
||||
data->s->to_dst_file = NULL;
|
||||
migrate_fd_error(data->s, err);
|
||||
error_free(err);
|
||||
} else {
|
||||
|
@ -116,7 +116,6 @@ static void migration_tls_outgoing_handshake(QIOTask *task,
|
||||
|
||||
if (qio_task_propagate_error(task, &err)) {
|
||||
trace_migration_tls_outgoing_handshake_error(error_get_pretty(err));
|
||||
s->to_dst_file = NULL;
|
||||
migrate_fd_error(s, err);
|
||||
error_free(err);
|
||||
} else {
|
||||
|
@ -69,8 +69,10 @@ migration_bitmap_sync_start(void) ""
|
||||
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
|
||||
migration_throttle(void) ""
|
||||
ram_discard_range(const char *rbname, uint64_t start, size_t len) "%s: start: %" PRIx64 " %zx"
|
||||
ram_load_loop(const char *rbname, uint64_t addr, int flags, void *host) "%s: addr: %" PRIx64 " flags: %x host: %p"
|
||||
ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x"
|
||||
ram_postcopy_send_discard_bitmap(void) ""
|
||||
ram_save_page(const char *rbname, uint64_t offset, void *host) "%s: offset: %" PRIx64 " host: %p"
|
||||
ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: %zx len: %zx"
|
||||
|
||||
# migration/migration.c
|
||||
|
13
monitor.c
13
monitor.c
@ -37,7 +37,6 @@
|
||||
#include "net/slirp.h"
|
||||
#include "sysemu/char.h"
|
||||
#include "ui/qemu-spice.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/numa.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "qemu/config-file.h"
|
||||
@ -1954,18 +1953,6 @@ void qmp_closefd(const char *fdname, Error **errp)
|
||||
error_setg(errp, QERR_FD_NOT_FOUND, fdname);
|
||||
}
|
||||
|
||||
static void hmp_loadvm(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
int saved_vm_running = runstate_is_running();
|
||||
const char *name = qdict_get_str(qdict, "name");
|
||||
|
||||
vm_stop(RUN_STATE_RESTORE_VM);
|
||||
|
||||
if (load_vmstate(name) == 0 && saved_vm_running) {
|
||||
vm_start();
|
||||
}
|
||||
}
|
||||
|
||||
int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp)
|
||||
{
|
||||
mon_fd_t *monfd;
|
||||
|
@ -64,7 +64,7 @@ void replay_vmstate_init(void)
|
||||
{
|
||||
if (replay_snapshot) {
|
||||
if (replay_mode == REPLAY_MODE_RECORD) {
|
||||
if (save_vmstate(cur_mon, replay_snapshot) != 0) {
|
||||
if (save_vmstate(replay_snapshot) != 0) {
|
||||
error_report("Could not create snapshot for icount record");
|
||||
exit(1);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user