migration: move some code to ram_save_host_page

Move some code from ram_save_target_page() to ram_save_host_page()
to make it be more readable for latter patches that dramatically
clean ram_save_target_page() up

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
Message-Id: <20180330075128.26919-7-xiaoguangrong@tencent.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
Xiao Guangrong 2018-03-30 15:51:24 +08:00 committed by Dr. David Alan Gilbert
parent 059ff0fb29
commit 1faa5665c0
1 changed files with 19 additions and 24 deletions

View File

@ -1483,38 +1483,23 @@ err:
* Returns the number of pages written * Returns the number of pages written
* *
* @rs: current RAM state * @rs: current RAM state
* @ms: current migration state
* @pss: data about the page we want to send * @pss: data about the page we want to send
* @last_stage: if we are at the completion stage * @last_stage: if we are at the completion stage
*/ */
static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
bool last_stage) bool last_stage)
{ {
int res = 0; /*
* If xbzrle is on, stop using the data compression after first
/* Check the pages is dirty and if it is send it */ * round of migration even if compression is enabled. In theory,
if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { * xbzrle can do better than compression.
/* */
* If xbzrle is on, stop using the data compression after first if (migrate_use_compression() &&
* round of migration even if compression is enabled. In theory, (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
* xbzrle can do better than compression. return ram_save_compressed_page(rs, pss, last_stage);
*/
if (migrate_use_compression() &&
(rs->ram_bulk_stage || !migrate_use_xbzrle())) {
res = ram_save_compressed_page(rs, pss, last_stage);
} else {
res = ram_save_page(rs, pss, last_stage);
}
if (res < 0) {
return res;
}
if (pss->block->unsentmap) {
clear_bit(pss->page, pss->block->unsentmap);
}
} }
return res; return ram_save_page(rs, pss, last_stage);
} }
/** /**
@ -1543,12 +1528,22 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
do { do {
/* Check the pages is dirty and if it is send it */
if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
pss->page++;
continue;
}
tmppages = ram_save_target_page(rs, pss, last_stage); tmppages = ram_save_target_page(rs, pss, last_stage);
if (tmppages < 0) { if (tmppages < 0) {
return tmppages; return tmppages;
} }
pages += tmppages; pages += tmppages;
if (pss->block->unsentmap) {
clear_bit(pss->page, pss->block->unsentmap);
}
pss->page++; pss->page++;
} while ((pss->page & (pagesize_bits - 1)) && } while ((pss->page & (pagesize_bits - 1)) &&
offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS)); offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));