migration: synchronize memory bitmap 64bits at a time

We use the old code if the bitmaps are not aligned

Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Orit Wasserman <owasserm@redhat.com>
This commit is contained in:
Juan Quintela 2013-11-06 11:33:05 +01:00
parent 791fa2a245
commit aa8dc04477

View File

@ -50,6 +50,7 @@
#include "exec/cpu-all.h" #include "exec/cpu-all.h"
#include "exec/ram_addr.h" #include "exec/ram_addr.h"
#include "hw/acpi/acpi.h" #include "hw/acpi/acpi.h"
#include "qemu/host-utils.h"
#ifdef DEBUG_ARCH_INIT #ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \ #define DPRINTF(fmt, ...) \
@ -376,7 +377,25 @@ static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
{ {
ram_addr_t addr; ram_addr_t addr;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
for (k = page; k < page + nr; k++) {
if (src[k]) {
unsigned long new_dirty;
new_dirty = ~migration_bitmap[k];
migration_bitmap[k] |= src[k];
new_dirty &= src[k];
migration_dirty_pages += ctpopl(new_dirty);
src[k] = 0;
}
}
} else {
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_get_dirty(start + addr, if (cpu_physical_memory_get_dirty(start + addr,
TARGET_PAGE_SIZE, TARGET_PAGE_SIZE,
@ -387,6 +406,7 @@ static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
migration_bitmap_set_dirty(start + addr); migration_bitmap_set_dirty(start + addr);
} }
} }
}
} }