Reduce the PVM stop time during Checkpoint

When flushing memory from ram cache to ram during every checkpoint
on secondary VM, we can copy continuous chunks of memory instead of
4096 bytes per time to reduce the time of VM stop during checkpoint.

Signed-off-by: Lei Rao <lei.rao@intel.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Lukas Straub <lukasstraub2@web.de>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Lukas Straub <lukasstraub2@web.de>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
Rao, Lei 2021-11-09 11:04:55 +08:00 committed by Juan Quintela
parent adc903a6c0
commit a6a83cef9c

View File

@ -836,6 +836,41 @@ migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb,
}
}
/*
* colo_bitmap_find_diry:find contiguous dirty pages from start
*
* Returns the page offset within memory region of the start of the contiguout
* dirty page
*
* @rs: current RAM state
* @rb: RAMBlock where to search for dirty pages
* @start: page where we start the search
* @num: the number of contiguous dirty pages
*/
static inline
unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
unsigned long start, unsigned long *num)
{
unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
unsigned long *bitmap = rb->bmap;
unsigned long first, next;
*num = 0;
if (ramblock_is_ignored(rb)) {
return size;
}
first = find_next_bit(bitmap, size, start);
if (first >= size) {
return first;
}
next = find_next_zero_bit(bitmap, size, first + 1);
assert(next >= first);
*num = next - first;
return first;
}
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
RAMBlock *rb,
unsigned long page)
@ -3886,19 +3921,26 @@ void colo_flush_ram_cache(void)
block = QLIST_FIRST_RCU(&ram_list.blocks);
while (block) {
offset = migration_bitmap_find_dirty(ram_state, block, offset);
unsigned long num = 0;
offset = colo_bitmap_find_dirty(ram_state, block, offset, &num);
if (!offset_in_ramblock(block,
((ram_addr_t)offset) << TARGET_PAGE_BITS)) {
offset = 0;
num = 0;
block = QLIST_NEXT_RCU(block, next);
} else {
migration_bitmap_clear_dirty(ram_state, block, offset);
unsigned long i = 0;
for (i = 0; i < num; i++) {
migration_bitmap_clear_dirty(ram_state, block, offset + i);
}
dst_host = block->host
+ (((ram_addr_t)offset) << TARGET_PAGE_BITS);
src_host = block->colo_cache
+ (((ram_addr_t)offset) << TARGET_PAGE_BITS);
memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num);
offset += num;
}
}
}