migration: Simplify alignment and alignment checks

Let's use QEMU_ALIGN_DOWN() and friends to make the code a bit easier to
read.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
David Hildenbrand 2021-10-11 19:53:44 +02:00 committed by Juan Quintela
parent 9470c5e082
commit 7648297d40
3 changed files with 8 additions and 9 deletions

View File

@ -391,7 +391,7 @@ int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
int migrate_send_rp_req_pages(MigrationIncomingState *mis, int migrate_send_rp_req_pages(MigrationIncomingState *mis,
RAMBlock *rb, ram_addr_t start, uint64_t haddr) RAMBlock *rb, ram_addr_t start, uint64_t haddr)
{ {
void *aligned = (void *)(uintptr_t)(haddr & (-qemu_ram_pagesize(rb))); void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
bool received = false; bool received = false;
WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
@ -2637,8 +2637,8 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
* Since we currently insist on matching page sizes, just sanity check * Since we currently insist on matching page sizes, just sanity check
* we're being asked for whole host pages. * we're being asked for whole host pages.
*/ */
if (start & (our_host_ps - 1) || if (!QEMU_IS_ALIGNED(start, our_host_ps) ||
(len & (our_host_ps - 1))) { !QEMU_IS_ALIGNED(len, our_host_ps)) {
error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
" len: %zd", __func__, start, len); " len: %zd", __func__, start, len);
mark_source_rp_bad(ms); mark_source_rp_bad(ms);

View File

@ -402,7 +402,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
strerror(errno)); strerror(errno));
goto out; goto out;
} }
g_assert(((size_t)testarea & (pagesize - 1)) == 0); g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize));
reg_struct.range.start = (uintptr_t)testarea; reg_struct.range.start = (uintptr_t)testarea;
reg_struct.range.len = pagesize; reg_struct.range.len = pagesize;
@ -660,7 +660,7 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
struct uffdio_range range; struct uffdio_range range;
int ret; int ret;
trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
range.start = client_addr & ~(pagesize - 1); range.start = ROUND_DOWN(client_addr, pagesize);
range.len = pagesize; range.len = pagesize;
ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
if (ret) { if (ret) {
@ -702,8 +702,7 @@ static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
uint64_t client_addr, uint64_t rb_offset) uint64_t client_addr, uint64_t rb_offset)
{ {
size_t pagesize = qemu_ram_pagesize(rb); uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
uint64_t aligned_rbo = rb_offset & ~(pagesize - 1);
MigrationIncomingState *mis = migration_incoming_get_current(); MigrationIncomingState *mis = migration_incoming_get_current();
trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
@ -993,7 +992,7 @@ static void *postcopy_ram_fault_thread(void *opaque)
break; break;
} }
rb_offset &= ~(qemu_ram_pagesize(rb) - 1); rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
qemu_ram_get_idstr(rb), qemu_ram_get_idstr(rb),
rb_offset, rb_offset,

View File

@ -811,7 +811,7 @@ static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb,
assert(shift >= 6); assert(shift >= 6);
size = 1ULL << (TARGET_PAGE_BITS + shift); size = 1ULL << (TARGET_PAGE_BITS + shift);
start = (((ram_addr_t)page) << TARGET_PAGE_BITS) & (-size); start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size);
trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
memory_region_clear_dirty_bitmap(rb->mr, start, size); memory_region_clear_dirty_bitmap(rb->mr, start, size);
} }