multifd: Only flush once each full round of memory
We need to add a new flag to mean to flush at that point. Notice that we still flush at the end of setup and at the end of complete stages. Signed-off-by: Juan Quintela <quintela@redhat.com> Acked-by: Peter Xu <peterx@redhat.com> --- Add missing qemu_fflush(), now it passes all tests always. In the previous version, the check that changes the default value to false got lost in some rebase. Get it back.
This commit is contained in:
parent
b05292c237
commit
294e5a4034
@ -412,8 +412,7 @@ struct MigrationState {
|
||||
* only need to do this flush after we have go through all the
|
||||
* dirty pages. For historical reasons, we do that after each
|
||||
* section. This is suboptimal (we flush too many times).
|
||||
* Default value is false. Setting this property has no effect
|
||||
* until the patch that removes this comment. (since 8.1)
|
||||
* Default value is false. (since 8.1)
|
||||
*/
|
||||
bool multifd_flush_after_each_section;
|
||||
/*
|
||||
|
@ -89,7 +89,7 @@ Property migration_properties[] = {
|
||||
DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
|
||||
decompress_error_check, true),
|
||||
DEFINE_PROP_BOOL("multifd-flush-after-each-section", MigrationState,
|
||||
multifd_flush_after_each_section, true),
|
||||
multifd_flush_after_each_section, false),
|
||||
DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
|
||||
clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
|
||||
DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState,
|
||||
@ -341,11 +341,7 @@ bool migrate_multifd_flush_after_each_section(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
|
||||
/*
|
||||
* Until the patch that remove this comment, we always return that
|
||||
* the property is enabled.
|
||||
*/
|
||||
return true || s->multifd_flush_after_each_section;
|
||||
return s->multifd_flush_after_each_section;
|
||||
}
|
||||
|
||||
bool migrate_postcopy(void)
|
||||
|
@ -86,6 +86,7 @@
|
||||
#define RAM_SAVE_FLAG_XBZRLE 0x40
|
||||
/* 0x80 is reserved in qemu-file.h for RAM_SAVE_FLAG_HOOK */
|
||||
#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
|
||||
#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
|
||||
/* We can't use any flag that is bigger than 0x200 */
|
||||
|
||||
int (*xbzrle_encode_buffer_func)(uint8_t *, uint8_t *, int,
|
||||
@ -1581,6 +1582,7 @@ retry:
|
||||
* associated with the search process.
|
||||
*
|
||||
* Returns:
|
||||
* <0: An error happened
|
||||
* PAGE_ALL_CLEAN: no dirty page found, give up
|
||||
* PAGE_TRY_AGAIN: no dirty page found, retry for next block
|
||||
* PAGE_DIRTY_FOUND: dirty page found
|
||||
@ -1608,6 +1610,15 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
|
||||
pss->page = 0;
|
||||
pss->block = QLIST_NEXT_RCU(pss->block, next);
|
||||
if (!pss->block) {
|
||||
if (!migrate_multifd_flush_after_each_section()) {
|
||||
QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
|
||||
int ret = multifd_send_sync_main(f);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
|
||||
qemu_fflush(f);
|
||||
}
|
||||
/*
|
||||
* If memory migration starts over, we will meet a dirtied page
|
||||
* which may still exists in compression threads's ring, so we
|
||||
@ -2600,6 +2611,9 @@ static int ram_find_and_save_block(RAMState *rs)
|
||||
break;
|
||||
} else if (res == PAGE_TRY_AGAIN) {
|
||||
continue;
|
||||
} else if (res < 0) {
|
||||
pages = res;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3286,6 +3300,10 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!migrate_multifd_flush_after_each_section()) {
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
|
||||
}
|
||||
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||
qemu_fflush(f);
|
||||
|
||||
@ -3471,6 +3489,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!migrate_multifd_flush_after_each_section()) {
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
|
||||
}
|
||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||
qemu_fflush(f);
|
||||
|
||||
@ -4152,7 +4173,9 @@ int ram_load_postcopy(QEMUFile *f, int channel)
|
||||
}
|
||||
decompress_data_with_multi_threads(f, page_buffer, len);
|
||||
break;
|
||||
|
||||
case RAM_SAVE_FLAG_MULTIFD_FLUSH:
|
||||
multifd_recv_sync_main();
|
||||
break;
|
||||
case RAM_SAVE_FLAG_EOS:
|
||||
/* normal exit */
|
||||
if (migrate_multifd_flush_after_each_section()) {
|
||||
@ -4426,6 +4449,9 @@ static int ram_load_precopy(QEMUFile *f)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case RAM_SAVE_FLAG_MULTIFD_FLUSH:
|
||||
multifd_recv_sync_main();
|
||||
break;
|
||||
case RAM_SAVE_FLAG_EOS:
|
||||
/* normal exit */
|
||||
if (migrate_multifd_flush_after_each_section()) {
|
||||
|
Loading…
Reference in New Issue
Block a user