kvm: avoid cpu_get_physical_page_desc()

This reaches into the innards of the memory core, which are being
changed.  Switch to a memory API version.

Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Avi Kivity 2011-12-19 13:18:13 +02:00
parent dcd97e33af
commit ffcde12f6c
1 changed files with 10 additions and 17 deletions

View File

@ -340,16 +340,12 @@ static int kvm_set_migration_log(int enable)
} }
/* get kvm's dirty pages bitmap and update qemu's */ /* get kvm's dirty pages bitmap and update qemu's */
static int kvm_get_dirty_pages_log_range(unsigned long start_addr, static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
unsigned long *bitmap, unsigned long *bitmap)
unsigned long offset,
unsigned long mem_size)
{ {
unsigned int i, j; unsigned int i, j;
unsigned long page_number, addr, addr1, c; unsigned long page_number, addr, addr1, c;
ram_addr_t ram_addr; unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) /
HOST_LONG_BITS;
/* /*
* bitmap-traveling is faster than memory-traveling (for addr...) * bitmap-traveling is faster than memory-traveling (for addr...)
@ -363,9 +359,8 @@ static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
c &= ~(1ul << j); c &= ~(1ul << j);
page_number = i * HOST_LONG_BITS + j; page_number = i * HOST_LONG_BITS + j;
addr1 = page_number * TARGET_PAGE_SIZE; addr1 = page_number * TARGET_PAGE_SIZE;
addr = offset + addr1; addr = section->offset_within_region + addr1;
ram_addr = cpu_get_physical_page_desc(addr); memory_region_set_dirty(section->mr, addr);
cpu_physical_memory_set_dirty(ram_addr);
} while (c != 0); } while (c != 0);
} }
} }
@ -382,14 +377,15 @@ static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
* @start_add: start of logged region. * @start_add: start of logged region.
* @end_addr: end of logged region. * @end_addr: end of logged region.
*/ */
static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
target_phys_addr_t end_addr)
{ {
KVMState *s = kvm_state; KVMState *s = kvm_state;
unsigned long size, allocated_size = 0; unsigned long size, allocated_size = 0;
KVMDirtyLog d; KVMDirtyLog d;
KVMSlot *mem; KVMSlot *mem;
int ret = 0; int ret = 0;
target_phys_addr_t start_addr = section->offset_within_address_space;
target_phys_addr_t end_addr = start_addr + section->size;
d.dirty_bitmap = NULL; d.dirty_bitmap = NULL;
while (start_addr < end_addr) { while (start_addr < end_addr) {
@ -428,8 +424,7 @@ static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
break; break;
} }
kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap, kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
mem->start_addr, mem->memory_size);
start_addr = mem->start_addr + mem->memory_size; start_addr = mem->start_addr + mem->memory_size;
} }
g_free(d.dirty_bitmap); g_free(d.dirty_bitmap);
@ -686,11 +681,9 @@ static void kvm_region_del(MemoryListener *listener,
static void kvm_log_sync(MemoryListener *listener, static void kvm_log_sync(MemoryListener *listener,
MemoryRegionSection *section) MemoryRegionSection *section)
{ {
target_phys_addr_t start = section->offset_within_address_space;
target_phys_addr_t end = start + section->size;
int r; int r;
r = kvm_physical_sync_dirty_bitmap(start, end); r = kvm_physical_sync_dirty_bitmap(section);
if (r < 0) { if (r < 0) {
abort(); abort();
} }