diff --git a/migration/multifd.c b/migration/multifd.c index 5e85c3ea9b..cbc0dfe39b 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -677,7 +677,7 @@ static void *multifd_send_thread(void *opaque) if (p->pending_job) { uint64_t packet_num = p->packet_num; - uint32_t flags = p->flags; + uint32_t flags; p->normal_num = 0; if (use_zero_copy_send) { @@ -699,6 +699,7 @@ static void *multifd_send_thread(void *opaque) } } multifd_send_fill_packet(p); + flags = p->flags; p->flags = 0; p->num_packets++; p->total_normal_pages += p->normal_num; diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index f54f44d899..41c0713650 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -1197,11 +1197,6 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) } if (migrate_postcopy_preempt()) { - /* - * The preempt channel is established in asynchronous way. Wait - * for its completion. - */ - qemu_sem_wait(&mis->postcopy_qemufile_dst_done); /* * This thread needs to be created after the temp pages because * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. @@ -1668,6 +1663,12 @@ void *postcopy_preempt_thread(void *opaque) qemu_sem_post(&mis->thread_sync_sem); + /* + * The preempt channel is established in asynchronous way. Wait + * for its completion. + */ + qemu_sem_wait(&mis->postcopy_qemufile_dst_done); + /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); while (1) { diff --git a/migration/rdma.c b/migration/rdma.c index 288eadc2d2..df646be35e 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -3373,7 +3373,8 @@ static int qemu_rdma_accept(RDMAContext *rdma) * initialize the RDMAContext for return path for postcopy after first * connection request reached. */ - if (migrate_postcopy() && !rdma->is_return_path) { + if ((migrate_postcopy() || migrate_use_return_path()) + && !rdma->is_return_path) { rdma_return_path = qemu_rdma_data_init(rdma->host_port, NULL); if (rdma_return_path == NULL) { rdma_ack_cm_event(cm_event); @@ -3455,7 +3456,8 @@ static int qemu_rdma_accept(RDMAContext *rdma) } /* Accept the second connection request for return path */ - if (migrate_postcopy() && !rdma->is_return_path) { + if ((migrate_postcopy() || migrate_use_return_path()) + && !rdma->is_return_path) { qemu_set_fd_handler(rdma->channel->fd, rdma_accept_incoming_migration, NULL, (void *)(intptr_t)rdma->return_path); @@ -4109,7 +4111,7 @@ static void rdma_accept_incoming_migration(void *opaque) void rdma_start_incoming_migration(const char *host_port, Error **errp) { int ret; - RDMAContext *rdma, *rdma_return_path = NULL; + RDMAContext *rdma; Error *local_err = NULL; trace_rdma_start_incoming_migration(); @@ -4155,7 +4157,6 @@ err: g_free(rdma->host_port); } g_free(rdma); - g_free(rdma_return_path); } void rdma_start_outgoing_migration(void *opaque, @@ -4192,7 +4193,7 @@ void rdma_start_outgoing_migration(void *opaque, } /* RDMA postcopy need a separate queue pair for return path */ - if (migrate_postcopy()) { + if (migrate_postcopy() || migrate_use_return_path()) { rdma_return_path = qemu_rdma_data_init(host_port, errp); if (rdma_return_path == NULL) { diff --git a/migration/target.c b/migration/target.c index 907ebf0a0a..00ca007f97 100644 --- a/migration/target.c +++ b/migration/target.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "qapi/qapi-types-migration.h" #include "migration.h" +#include CONFIG_DEVICES #ifdef CONFIG_VFIO #include "hw/vfio/vfio-common.h" @@ -17,7 +18,6 @@ void populate_vfio_info(MigrationInfo *info) { #ifdef CONFIG_VFIO if (vfio_mig_active()) { - info->has_vfio = true; info->vfio = g_malloc0(sizeof(*info->vfio)); info->vfio->transferred = vfio_mig_bytes_transferred(); } diff --git a/migration/xbzrle.c b/migration/xbzrle.c index 05366e86c0..c6f8b20917 100644 --- a/migration/xbzrle.c +++ b/migration/xbzrle.c @@ -12,6 +12,7 @@ */ #include "qemu/osdep.h" #include "qemu/cutils.h" +#include "qemu/host-utils.h" #include "xbzrle.h" /* @@ -196,10 +197,6 @@ int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, __m512i r = _mm512_set1_epi32(0); while (count512s) { - if (d + 2 > dlen) { - return -1; - } - int bytes_to_check = 64; uint64_t mask = 0xffffffffffffffff; if (count512s == 1) { @@ -215,6 +212,9 @@ int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, bool is_same = (comp & 0x1); while (bytes_to_check) { + if (d + 2 > dlen) { + return -1; + } if (is_same) { if (nzrun_len) { d += uleb128_encode_small(dst + d, nzrun_len); @@ -233,7 +233,7 @@ int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, break; } never_same = false; - num = __builtin_ctzll(~comp); + num = ctz64(~comp); num = (num < bytes_to_check) ? num : bytes_to_check; zrun_len += num; bytes_to_check -= num; @@ -262,7 +262,7 @@ int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, nzrun_len += 64; break; } - num = __builtin_ctzll(comp); + num = ctz64(comp); num = (num < bytes_to_check) ? num : bytes_to_check; nzrun_len += num; bytes_to_check -= num;