Migration/virtio/hmp pull 2020-06-01

A mixed pull with:
   - RDMA migration fix (CID 1428762)
   - HMP qom-get addition and qom-set cleanup
   - a virtiofsd fix
   - COLO fixes
 
 Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEERfXHG0oMt/uXep+pBRYzHrxb/ecFAl7VStsACgkQBRYzHrxb
 /eccxw/8CdvL/6qaswsFxgbNVZWftv60IcGzI6i76yIJXKTSIAfv31nB482KSwXv
 d8F0pUYWETNyKk8JeSv3WahCtqDVPKQXtIiQzhVRjqRG2XBgo/Cx1MJCdC1SYtjI
 r04txBcGfgzmIMTgQd3IpdD/0B37/uJw2h+gZiWisgCBOZhHxDRRtvQGDsL29BtQ
 3Sjlxh1+l8uI5CwUczo4mPhoBB9liCHleaA2yZ+q4qX3qWHMLb16KKl5wV0V0f13
 ajuiA20PksuNfIJsYY1b26fNmFtT+iaXFja99L9t3oN7FLFtlvw7JASibxau/keJ
 dFZSQC9BVrNPg1muK82jqqA7NM9Sh7REhovpKh/isqoM40TGAUvWm5NpZY0w6jSQ
 pAvE/jkHJApCfcpqh1lJHFk9IoWZsKvwYlBush6NC2Hlh4QHIN7j/lnH6AerWcAR
 hXaPAX2vfdUd+Lbfaer7vIHcO7wB9TjjrukfxnHxrexsjVK0r4kwoT1bfFCMAMpD
 XL/LWzsg3S/84NNuFxeAt9Et2x67RYqe4JK5DRBvn4EdfNA2yvs4Y0D+uZNsbZmM
 x2vVDBss/NyamsK1wG0RG48lvJEP+tmXDOup8ZzfabCe7FELYce127PajGvl2TSE
 I4DgGrcLlW7iy1+yGALqHpPJ24VH0gXc/mhmN2KEK2LSzazIRzw=
 =rTJr
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20200601a' into staging

Migration/virtio/hmp pull 2020-06-01

A mixed pull with:
  - RDMA migration fix (CID 1428762)
  - HMP qom-get addition and qom-set cleanup
  - a virtiofsd fix
  - COLO fixes

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

# gpg: Signature made Mon 01 Jun 2020 19:37:15 BST
# gpg:                using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full]
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A  9FA9 0516 331E BC5B FDE7

* remotes/dgilbert/tags/pull-migration-20200601a:
  migration/migration.c: Fix hang in ram_save_host_page
  migration/colo.c: Move colo_notify_compares_event to the right place
  migration/colo.c: Relaunch failover even if there was an error
  migration/colo.c: Flush ram cache only after receiving device state
  migration/colo.c: Use cpu_synchronize_all_states()
  migration/colo.c: Use event instead of semaphore
  migration/vmstate: Remove unnecessary MemoryRegion forward declaration
  virtiofsd: remove symlink fallbacks
  hmp: Simplify qom-set
  hmp: Implement qom-get HMP command
  migration/rdma: cleanup rdma context before g_free to avoid memleaks
  migration/rdma: fix potential nullptr access in rdma_start_incoming_migration

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-06-01 21:34:47 +01:00
commit 853a60b870
12 changed files with 86 additions and 207 deletions

View File

@ -1790,9 +1790,23 @@ SRST
Print QOM properties of object at location *path*
ERST
{
.name = "qom-get",
.args_type = "path:s,property:s",
.params = "path property",
.help = "print QOM property",
.cmd = hmp_qom_get,
.flags = "p",
},
SRST
``qom-get`` *path* *property*
Print QOM property *property* of object at location *path*
ERST
{
.name = "qom-set",
.args_type = "path:s,property:s,value:s",
.args_type = "path:s,property:s,value:S",
.params = "path property value",
.help = "set QOM property",
.cmd = hmp_qom_set,

View File

@ -1199,7 +1199,6 @@ static inline int vmstate_register(VMStateIf *obj, int instance_id,
void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd,
void *opaque);
struct MemoryRegion;
void vmstate_register_ram(struct MemoryRegion *memory, DeviceState *dev);
void vmstate_unregister_ram(struct MemoryRegion *memory, DeviceState *dev);
void vmstate_register_ram_global(struct MemoryRegion *memory);

View File

@ -96,6 +96,7 @@ void hmp_info_memdev(Monitor *mon, const QDict *qdict);
void hmp_info_numa(Monitor *mon, const QDict *qdict);
void hmp_info_memory_devices(Monitor *mon, const QDict *qdict);
void hmp_qom_list(Monitor *mon, const QDict *qdict);
void hmp_qom_get(Monitor *mon, const QDict *qdict);
void hmp_qom_set(Monitor *mon, const QDict *qdict);
void hmp_info_qom_tree(Monitor *mon, const QDict *dict);
void object_add_completion(ReadLineState *rs, int nb_args, const char *str);

View File

@ -436,11 +436,6 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
goto out;
}
colo_notify_compares_event(NULL, COLO_EVENT_CHECKPOINT, &local_err);
if (local_err) {
goto out;
}
/* Disable block migration */
migrate_set_block_enabled(false, &local_err);
if (local_err) {
@ -502,6 +497,12 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
goto out;
}
qemu_event_reset(&s->colo_checkpoint_event);
colo_notify_compares_event(NULL, COLO_EVENT_CHECKPOINT, &local_err);
if (local_err) {
goto out;
}
colo_receive_check_message(s->rp_state.from_dst_file,
COLO_MESSAGE_VMSTATE_LOADED, &local_err);
if (local_err) {
@ -589,7 +590,7 @@ static void colo_process_checkpoint(MigrationState *s)
goto out;
}
qemu_sem_wait(&s->colo_checkpoint_sem);
qemu_event_wait(&s->colo_checkpoint_event);
if (s->state != MIGRATION_STATUS_COLO) {
goto out;
@ -637,7 +638,7 @@ out:
colo_compare_unregister_notifier(&packets_compare_notifier);
timer_del(s->colo_delay_timer);
timer_free(s->colo_delay_timer);
qemu_sem_destroy(&s->colo_checkpoint_sem);
qemu_event_destroy(&s->colo_checkpoint_event);
/*
* Must be called after failover BH is completed,
@ -654,7 +655,7 @@ void colo_checkpoint_notify(void *opaque)
MigrationState *s = opaque;
int64_t next_notify_time;
qemu_sem_post(&s->colo_checkpoint_sem);
qemu_event_set(&s->colo_checkpoint_event);
s->colo_checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST);
next_notify_time = s->colo_checkpoint_time +
s->parameters.x_checkpoint_delay;
@ -664,7 +665,7 @@ void colo_checkpoint_notify(void *opaque)
void migrate_start_colo_process(MigrationState *s)
{
qemu_mutex_unlock_iothread();
qemu_sem_init(&s->colo_checkpoint_sem, 0);
qemu_event_init(&s->colo_checkpoint_event, false);
s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST,
colo_checkpoint_notify, s);
@ -704,7 +705,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
}
qemu_mutex_lock_iothread();
cpu_synchronize_all_pre_loadvm();
cpu_synchronize_all_states();
ret = qemu_loadvm_state_main(mis->from_src_file, mis);
qemu_mutex_unlock_iothread();
@ -747,9 +748,11 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
qemu_mutex_lock_iothread();
vmstate_loading = true;
colo_flush_ram_cache();
ret = qemu_load_device_state(fb);
if (ret < 0) {
error_setg(errp, "COLO: load device state failed");
vmstate_loading = false;
qemu_mutex_unlock_iothread();
return;
}
@ -758,6 +761,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
replication_get_error_all(&local_err);
if (local_err) {
error_propagate(errp, local_err);
vmstate_loading = false;
qemu_mutex_unlock_iothread();
return;
}
@ -766,6 +770,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
replication_do_checkpoint_all(&local_err);
if (local_err) {
error_propagate(errp, local_err);
vmstate_loading = false;
qemu_mutex_unlock_iothread();
return;
}
@ -777,6 +782,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
if (local_err) {
error_propagate(errp, local_err);
vmstate_loading = false;
qemu_mutex_unlock_iothread();
return;
}
@ -787,9 +793,6 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis,
qemu_mutex_unlock_iothread();
if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) {
failover_set_state(FAILOVER_STATUS_RELAUNCH,
FAILOVER_STATUS_NONE);
failover_request_active(NULL);
return;
}
@ -888,6 +891,14 @@ void *colo_process_incoming_thread(void *opaque)
error_report_err(local_err);
break;
}
if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) {
failover_set_state(FAILOVER_STATUS_RELAUNCH,
FAILOVER_STATUS_NONE);
failover_request_active(NULL);
break;
}
if (failover_get_state() != FAILOVER_STATUS_NONE) {
error_report("failover request");
break;
@ -895,8 +906,6 @@ void *colo_process_incoming_thread(void *opaque)
}
out:
vmstate_loading = false;
/*
* There are only two reasons we can get here, some error happened
* or the user triggered failover.

View File

@ -3361,6 +3361,10 @@ bool migration_rate_limit(void)
bool urgent = false;
migration_update_counters(s, now);
if (qemu_file_rate_limit(s->to_dst_file)) {
if (qemu_file_get_error(s->to_dst_file)) {
return false;
}
/*
* Wait for a delay to do rate limiting OR
* something urgent to post the semaphore.

View File

@ -215,8 +215,8 @@ struct MigrationState
/* The semaphore is used to notify COLO thread that failover is finished */
QemuSemaphore colo_exit_sem;
/* The semaphore is used to notify COLO thread to do checkpoint */
QemuSemaphore colo_checkpoint_sem;
/* The event is used to notify COLO thread to do checkpoint */
QemuEvent colo_checkpoint_event;
int64_t colo_checkpoint_time;
QEMUTimer *colo_delay_timer;

View File

@ -3360,7 +3360,7 @@ static bool postcopy_is_running(void)
* Flush content of RAM cache into SVM's memory.
* Only flush the pages that be dirtied by PVM or SVM or both.
*/
static void colo_flush_ram_cache(void)
void colo_flush_ram_cache(void)
{
RAMBlock *block = NULL;
void *dst_host;
@ -3632,9 +3632,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
trace_ram_load_complete(ret, seq_iter);
if (!ret && migration_incoming_in_colo_state()) {
colo_flush_ram_cache();
}
return ret;
}

View File

@ -65,6 +65,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb);
/* ram cache */
int colo_init_ram_cache(void);
void colo_flush_ram_cache(void);
void colo_release_ram_cache(void);
void colo_incoming_start_dirty_log(void);

View File

@ -4056,7 +4056,9 @@ void rdma_start_incoming_migration(const char *host_port, Error **errp)
return;
err:
error_propagate(errp, local_err);
if (rdma) {
g_free(rdma->host);
}
g_free(rdma);
g_free(rdma_return_path);
}
@ -4092,20 +4094,20 @@ void rdma_start_outgoing_migration(void *opaque,
rdma_return_path = qemu_rdma_data_init(host_port, errp);
if (rdma_return_path == NULL) {
goto err;
goto return_path_err;
}
ret = qemu_rdma_source_init(rdma_return_path,
s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL], errp);
if (ret) {
goto err;
goto return_path_err;
}
ret = qemu_rdma_connect(rdma_return_path, errp);
if (ret) {
goto err;
goto return_path_err;
}
rdma->return_path = rdma_return_path;
@ -4118,6 +4120,8 @@ void rdma_start_outgoing_migration(void *opaque,
s->to_dst_file = qemu_fopen_rdma(rdma, "wb");
migrate_fd_connect(s, NULL);
return;
return_path_err:
qemu_rdma_cleanup(rdma);
err:
g_free(rdma);
g_free(rdma_return_path);

View File

@ -12,6 +12,8 @@
#include "qapi/error.h"
#include "qapi/qapi-commands-qom.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qjson.h"
#include "qapi/qmp/qstring.h"
#include "qom/object.h"
void hmp_qom_list(Monitor *mon, const QDict *qdict)
@ -46,19 +48,29 @@ void hmp_qom_set(Monitor *mon, const QDict *qdict)
const char *property = qdict_get_str(qdict, "property");
const char *value = qdict_get_str(qdict, "value");
Error *err = NULL;
bool ambiguous = false;
Object *obj;
QObject *obj;
obj = object_resolve_path(path, &ambiguous);
if (obj == NULL) {
error_set(&err, ERROR_CLASS_DEVICE_NOT_FOUND,
"Device '%s' not found", path);
} else {
if (ambiguous) {
monitor_printf(mon, "Warning: Path '%s' is ambiguous\n", path);
obj = qobject_from_json(value, &err);
if (err == NULL) {
qmp_qom_set(path, property, obj, &err);
}
object_property_parse(obj, value, property, &err);
hmp_handle_error(mon, err);
}
void hmp_qom_get(Monitor *mon, const QDict *qdict)
{
const char *path = qdict_get_str(qdict, "path");
const char *property = qdict_get_str(qdict, "property");
Error *err = NULL;
QObject *obj = qmp_qom_get(path, property, &err);
if (err == NULL) {
QString *str = qobject_to_json_pretty(obj);
monitor_printf(mon, "%s\n", qstring_get_str(str));
qobject_unref(str);
}
hmp_handle_error(mon, err);
}

View File

@ -61,6 +61,7 @@ static const char *hmp_cmds[] = {
"p $pc + 8",
"qom-list /",
"qom-set /machine initrd test",
"qom-get /machine initrd",
"screendump /dev/null",
"sendkey x",
"singlestep on",

View File

@ -140,7 +140,6 @@ enum {
struct lo_data {
pthread_mutex_t mutex;
int debug;
int norace;
int writeback;
int flock;
int posix_lock;
@ -176,7 +175,6 @@ static const struct fuse_opt lo_opts[] = {
{ "cache=none", offsetof(struct lo_data, cache), CACHE_NONE },
{ "cache=auto", offsetof(struct lo_data, cache), CACHE_AUTO },
{ "cache=always", offsetof(struct lo_data, cache), CACHE_ALWAYS },
{ "norace", offsetof(struct lo_data, norace), 1 },
{ "readdirplus", offsetof(struct lo_data, readdirplus_set), 1 },
{ "no_readdirplus", offsetof(struct lo_data, readdirplus_clear), 1 },
FUSE_OPT_END
@ -592,136 +590,6 @@ static void lo_getattr(fuse_req_t req, fuse_ino_t ino,
fuse_reply_attr(req, &buf, lo->timeout);
}
/*
* Increments parent->nlookup and caller must release refcount using
* lo_inode_put(&parent).
*/
static int lo_parent_and_name(struct lo_data *lo, struct lo_inode *inode,
char path[PATH_MAX], struct lo_inode **parent)
{
char procname[64];
char *last;
struct stat stat;
struct lo_inode *p;
int retries = 2;
int res;
retry:
sprintf(procname, "%i", inode->fd);
res = readlinkat(lo->proc_self_fd, procname, path, PATH_MAX);
if (res < 0) {
fuse_log(FUSE_LOG_WARNING, "%s: readlink failed: %m\n", __func__);
goto fail_noretry;
}
if (res >= PATH_MAX) {
fuse_log(FUSE_LOG_WARNING, "%s: readlink overflowed\n", __func__);
goto fail_noretry;
}
path[res] = '\0';
last = strrchr(path, '/');
if (last == NULL) {
/* Shouldn't happen */
fuse_log(
FUSE_LOG_WARNING,
"%s: INTERNAL ERROR: bad path read from proc\n", __func__);
goto fail_noretry;
}
if (last == path) {
p = &lo->root;
pthread_mutex_lock(&lo->mutex);
p->nlookup++;
g_atomic_int_inc(&p->refcount);
pthread_mutex_unlock(&lo->mutex);
} else {
*last = '\0';
res = fstatat(AT_FDCWD, last == path ? "/" : path, &stat, 0);
if (res == -1) {
if (!retries) {
fuse_log(FUSE_LOG_WARNING,
"%s: failed to stat parent: %m\n", __func__);
}
goto fail;
}
p = lo_find(lo, &stat);
if (p == NULL) {
if (!retries) {
fuse_log(FUSE_LOG_WARNING,
"%s: failed to find parent\n", __func__);
}
goto fail;
}
}
last++;
res = fstatat(p->fd, last, &stat, AT_SYMLINK_NOFOLLOW);
if (res == -1) {
if (!retries) {
fuse_log(FUSE_LOG_WARNING,
"%s: failed to stat last\n", __func__);
}
goto fail_unref;
}
if (stat.st_dev != inode->key.dev || stat.st_ino != inode->key.ino) {
if (!retries) {
fuse_log(FUSE_LOG_WARNING,
"%s: failed to match last\n", __func__);
}
goto fail_unref;
}
*parent = p;
memmove(path, last, strlen(last) + 1);
return 0;
fail_unref:
unref_inode_lolocked(lo, p, 1);
lo_inode_put(lo, &p);
fail:
if (retries) {
retries--;
goto retry;
}
fail_noretry:
errno = EIO;
return -1;
}
static int utimensat_empty(struct lo_data *lo, struct lo_inode *inode,
const struct timespec *tv)
{
int res;
struct lo_inode *parent;
char path[PATH_MAX];
if (S_ISLNK(inode->filetype)) {
res = utimensat(inode->fd, "", tv, AT_EMPTY_PATH);
if (res == -1 && errno == EINVAL) {
/* Sorry, no race free way to set times on symlink. */
if (lo->norace) {
errno = EPERM;
} else {
goto fallback;
}
}
return res;
}
sprintf(path, "%i", inode->fd);
return utimensat(lo->proc_self_fd, path, tv, 0);
fallback:
res = lo_parent_and_name(lo, inode, path, &parent);
if (res != -1) {
res = utimensat(parent->fd, path, tv, AT_SYMLINK_NOFOLLOW);
unref_inode_lolocked(lo, parent, 1);
lo_inode_put(lo, &parent);
}
return res;
}
static int lo_fi_fd(fuse_req_t req, struct fuse_file_info *fi)
{
struct lo_data *lo = lo_data(req);
@ -828,7 +696,8 @@ static void lo_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr,
if (fi) {
res = futimens(fd, tv);
} else {
res = utimensat_empty(lo, inode, tv);
sprintf(procname, "%i", inode->fd);
res = utimensat(lo->proc_self_fd, procname, tv, 0);
}
if (res == -1) {
goto out_err;
@ -1129,41 +998,6 @@ static void lo_symlink(fuse_req_t req, const char *link, fuse_ino_t parent,
lo_mknod_symlink(req, parent, name, S_IFLNK, 0, link);
}
static int linkat_empty_nofollow(struct lo_data *lo, struct lo_inode *inode,
int dfd, const char *name)
{
int res;
struct lo_inode *parent;
char path[PATH_MAX];
if (S_ISLNK(inode->filetype)) {
res = linkat(inode->fd, "", dfd, name, AT_EMPTY_PATH);
if (res == -1 && (errno == ENOENT || errno == EINVAL)) {
/* Sorry, no race free way to hard-link a symlink. */
if (lo->norace) {
errno = EPERM;
} else {
goto fallback;
}
}
return res;
}
sprintf(path, "%i", inode->fd);
return linkat(lo->proc_self_fd, path, dfd, name, AT_SYMLINK_FOLLOW);
fallback:
res = lo_parent_and_name(lo, inode, path, &parent);
if (res != -1) {
res = linkat(parent->fd, path, dfd, name, 0);
unref_inode_lolocked(lo, parent, 1);
lo_inode_put(lo, &parent);
}
return res;
}
static void lo_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t parent,
const char *name)
{
@ -1172,6 +1006,7 @@ static void lo_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t parent,
struct lo_inode *parent_inode;
struct lo_inode *inode;
struct fuse_entry_param e;
char procname[64];
int saverr;
if (!is_safe_path_component(name)) {
@ -1190,7 +1025,9 @@ static void lo_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t parent,
e.attr_timeout = lo->timeout;
e.entry_timeout = lo->timeout;
res = linkat_empty_nofollow(lo, inode, parent_inode->fd, name);
sprintf(procname, "%i", inode->fd);
res = linkat(lo->proc_self_fd, procname, parent_inode->fd, name,
AT_SYMLINK_FOLLOW);
if (res == -1) {
goto out_err;
}