diff --git a/migration/block.c b/migration/block.c index 310e2b36dc..656f38f341 100644 --- a/migration/block.c +++ b/migration/block.c @@ -36,6 +36,8 @@ #define MAX_IS_ALLOCATED_SEARCH 65536 +#define MAX_INFLIGHT_IO 512 + //#define DEBUG_BLK_MIGRATION #ifdef DEBUG_BLK_MIGRATION @@ -665,7 +667,10 @@ static int block_save_iterate(QEMUFile *f, void *opaque) blk_mig_lock(); while ((block_mig_state.submitted + block_mig_state.read_done) * BLOCK_SIZE < - qemu_file_get_rate_limit(f)) { + qemu_file_get_rate_limit(f) && + (block_mig_state.submitted + + block_mig_state.read_done) < + MAX_INFLIGHT_IO) { blk_mig_unlock(); if (block_mig_state.bulk_completed == 0) { /* first finish the bulk phase */ diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 22d6b18e63..3946aa98aa 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -241,10 +241,7 @@ static int cleanup_range(const char *block_name, void *host_addr, * We turned off hugepage for the precopy stage with postcopy enabled * we can turn it back on now. */ - if (qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE)) { - error_report("%s HUGEPAGE: %s", __func__, strerror(errno)); - return -1; - } + qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); /* * We can also turn off userfault now since we should have all the @@ -345,10 +342,7 @@ static int nhp_range(const char *block_name, void *host_addr, * do delete areas of the page, even if THP thinks a hugepage would * be a good idea, so force hugepages off. */ - if (qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE)) { - error_report("%s: NOHUGEPAGE: %s", __func__, strerror(errno)); - return -1; - } + qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); return 0; }