cpu_physical_memory_sync_dirty_bitmap: Another alignment fix
This code has an optimised, word aligned version, and a boring unaligned version. My commitf70d345
fixed one alignment issue, but there's another. The optimised version operates on 'longs' dealing with (typically) 64 pages at a time, replacing the whole long by a 0 and counting the bits. If the Ramblock is less than 64bits in length that long can contain bits representing two different RAMBlocks, but the code will update the bmap belinging to the 1st RAMBlock only while having updated the total dirty page count for both. This probably didn't matter prior to6b6712ef
which split the dirty bitmap by RAMBlock, but now they're separate RAMBlocks we end up with a count that doesn't match the state in the bitmaps. Symptom: Migration showing a few dirty pages left to be sent constantly Seen on aarch64 and x86 with x86+ovmf Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reported-by: Wei Huang <wei@redhat.com> Fixes:6b6712efcc
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
f4bdc13e49
commit
aa777e297c
@ -391,9 +391,10 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
|
|||||||
uint64_t num_dirty = 0;
|
uint64_t num_dirty = 0;
|
||||||
unsigned long *dest = rb->bmap;
|
unsigned long *dest = rb->bmap;
|
||||||
|
|
||||||
/* start address is aligned at the start of a word? */
|
/* start address and length is aligned at the start of a word? */
|
||||||
if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
|
if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
|
||||||
(start + rb->offset)) {
|
(start + rb->offset) &&
|
||||||
|
!(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
|
||||||
int k;
|
int k;
|
||||||
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
|
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
|
||||||
unsigned long * const *src;
|
unsigned long * const *src;
|
||||||
|
Loading…
Reference in New Issue
Block a user