migration/postcopy: enable compress during postcopy
postcopy requires to place a whole host page, while migration thread migrate memory in target page size. This makes postcopy need to collect all target pages in one host page before placing via userfaultfd. To enable compress during postcopy, there are two problems to solve: 1. Random order for target page arrival 2. Target pages in one host page arrives without interrupt by target page from other host page The first one is handled by previous cleanup patch. This patch handles the second one by: 1. Flush compress thread for each host page 2. Wait for decompress thread for before placing host page Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
91ba442f5c
commit
644acf99b8
@ -1005,17 +1005,6 @@ static bool migrate_caps_check(bool *cap_list,
|
||||
#endif
|
||||
|
||||
if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
|
||||
if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) {
|
||||
/* The decompression threads asynchronously write into RAM
|
||||
* rather than use the atomic copies needed to avoid
|
||||
* userfaulting. It should be possible to fix the decompression
|
||||
* threads for compatibility in future.
|
||||
*/
|
||||
error_setg(errp, "Postcopy is not currently compatible "
|
||||
"with compression");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* This check is reasonably expensive, so only when it's being
|
||||
* set the first time, also it's only the destination that needs
|
||||
* special support.
|
||||
|
@ -3469,6 +3469,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||
|
||||
rs->target_page_count += pages;
|
||||
|
||||
/*
|
||||
* During postcopy, it is necessary to make sure one whole host
|
||||
* page is sent in one chunk.
|
||||
*/
|
||||
if (migrate_postcopy_ram()) {
|
||||
flush_compressed_data(rs);
|
||||
}
|
||||
|
||||
/*
|
||||
* we want to check in the 1st loop, just in case it was the 1st
|
||||
* time and we had to sync the dirty bitmap.
|
||||
@ -4061,6 +4069,7 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
void *place_source = NULL;
|
||||
RAMBlock *block = NULL;
|
||||
uint8_t ch;
|
||||
int len;
|
||||
|
||||
addr = qemu_get_be64(f);
|
||||
|
||||
@ -4078,7 +4087,8 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
|
||||
trace_ram_load_postcopy_loop((uint64_t)addr, flags);
|
||||
place_needed = false;
|
||||
if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
|
||||
if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
|
||||
RAM_SAVE_FLAG_COMPRESS_PAGE)) {
|
||||
block = ram_block_from_stream(f, flags);
|
||||
|
||||
host = host_from_ram_block_offset(block, addr);
|
||||
@ -4161,6 +4171,17 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
TARGET_PAGE_SIZE);
|
||||
}
|
||||
break;
|
||||
case RAM_SAVE_FLAG_COMPRESS_PAGE:
|
||||
all_zero = false;
|
||||
len = qemu_get_be32(f);
|
||||
if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
|
||||
error_report("Invalid compressed data length: %d", len);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
decompress_data_with_multi_threads(f, page_buffer, len);
|
||||
break;
|
||||
|
||||
case RAM_SAVE_FLAG_EOS:
|
||||
/* normal exit */
|
||||
multifd_recv_sync_main();
|
||||
@ -4172,6 +4193,11 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Got the whole host page, wait for decompress before placing. */
|
||||
if (place_needed) {
|
||||
ret |= wait_for_decompress_done();
|
||||
}
|
||||
|
||||
/* Detect for any possible file errors */
|
||||
if (!ret && qemu_file_get_error(f)) {
|
||||
ret = qemu_file_get_error(f);
|
||||
|
Loading…
Reference in New Issue
Block a user