kvm: clear dirty bitmaps from all overlapping memslots
Currently MemoryRegionSection has 1:1 mapping to KVMSlot. However next patch will allow splitting MemoryRegionSection into several KVMSlot-s, make sure that kvm_physical_log_slot_clear() is able to handle such 1:N mapping. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Message-Id: <20190924144751.24149-3-imammedo@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
parent
4222147dfb
commit
84516e5b8d
@ -589,8 +589,8 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
|
||||
* satisfy the KVM interface requirement. Firstly, do the start
|
||||
* page alignment on 64 host pages
|
||||
*/
|
||||
bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK;
|
||||
start_delta = start - mem->start_addr - bmap_start;
|
||||
bmap_start = start & KVM_CLEAR_LOG_MASK;
|
||||
start_delta = start - bmap_start;
|
||||
bmap_start /= psize;
|
||||
|
||||
/*
|
||||
@ -694,8 +694,8 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
KVMState *s = kvm_state;
|
||||
uint64_t start, size;
|
||||
KVMSlot *mem = NULL;
|
||||
uint64_t start, size, offset, count;
|
||||
KVMSlot *mem;
|
||||
int ret, i;
|
||||
|
||||
if (!s->manual_dirty_log_protect) {
|
||||
@ -713,22 +713,30 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
|
||||
|
||||
kvm_slots_lock(kml);
|
||||
|
||||
/* Find any possible slot that covers the section */
|
||||
for (i = 0; i < s->nr_slots; i++) {
|
||||
mem = &kml->slots[i];
|
||||
if (mem->start_addr <= start &&
|
||||
start + size <= mem->start_addr + mem->memory_size) {
|
||||
/* Discard slots that are empty or do not overlap the section */
|
||||
if (!mem->memory_size ||
|
||||
mem->start_addr > start + size - 1 ||
|
||||
start > mem->start_addr + mem->memory_size - 1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (start >= mem->start_addr) {
|
||||
/* The slot starts before section or is aligned to it. */
|
||||
offset = start - mem->start_addr;
|
||||
count = MIN(mem->memory_size - offset, size);
|
||||
} else {
|
||||
/* The slot starts after section. */
|
||||
offset = 0;
|
||||
count = MIN(mem->memory_size, size - (mem->start_addr - start));
|
||||
}
|
||||
ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
|
||||
if (ret < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We should always find one memslot until this point, otherwise
|
||||
* there could be something wrong from the upper layer
|
||||
*/
|
||||
assert(mem && i != s->nr_slots);
|
||||
ret = kvm_log_clear_one_slot(mem, kml->as_id, start, size);
|
||||
|
||||
kvm_slots_unlock(kml);
|
||||
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user