dump: fix kdump to work over non-aligned blocks

Rewrite get_next_page() to work over non-aligned blocks. When it
encounters non aligned addresses, it will try to fill a page provided by
the caller.

This solves a kdump crash with "tpm-crb-cmd" RAM memory region,
qemu-kvm: ../dump/dump.c:1162: _Bool get_next_page(GuestPhysBlock **,
uint64_t *, uint8_t **, DumpState *): Assertion `(block->target_start &
~target_page_mask) == 0' failed.

because:
guest_phys_block_add_section: target_start=00000000fed40080 target_end=00000000fed41000: added (count: 4)

Fixes:
https://bugzilla.redhat.com/show_bug.cgi?id=2120480

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
This commit is contained in:
Marc-André Lureau 2022-09-05 16:06:21 +04:00
parent 08df343874
commit 94d788408d
1 changed files with 56 additions and 23 deletions

View File

@ -1116,50 +1116,81 @@ static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
}
/*
* exam every page and return the page frame number and the address of the page.
* bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
* blocks, so block->target_start and block->target_end should be interal
* multiples of the target page size.
* Return the page frame number and the page content in *bufptr. bufptr can be
* NULL. If not NULL, *bufptr must contains a target page size of pre-allocated
* memory. This is not necessarily the memory returned.
*/
static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
uint8_t **bufptr, DumpState *s)
{
GuestPhysBlock *block = *blockptr;
hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
uint8_t *buf;
uint32_t page_size = s->dump_info.page_size;
uint8_t *buf = NULL, *hbuf;
hwaddr addr;
/* block == NULL means the start of the iteration */
if (!block) {
block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
*blockptr = block;
addr = block->target_start;
*pfnptr = dump_paddr_to_pfn(s, addr);
} else {
addr = dump_pfn_to_paddr(s, *pfnptr + 1);
*pfnptr += 1;
addr = dump_pfn_to_paddr(s, *pfnptr);
}
assert(block != NULL);
if ((addr >= block->target_start) &&
(addr + s->dump_info.page_size <= block->target_end)) {
buf = block->host_addr + (addr - block->target_start);
} else {
/* the next page is in the next block */
block = QTAILQ_NEXT(block, next);
*blockptr = block;
if (!block) {
return false;
while (1) {
if (addr >= block->target_start && addr < block->target_end) {
size_t n = MIN(block->target_end - addr, page_size - addr % page_size);
hbuf = block->host_addr + (addr - block->target_start);
if (!buf) {
if (n == page_size) {
/* this is a whole target page, go for it */
assert(addr % page_size == 0);
buf = hbuf;
break;
} else if (bufptr) {
assert(*bufptr);
buf = *bufptr;
memset(buf, 0, page_size);
} else {
return true;
}
}
memcpy(buf + addr % page_size, hbuf, n);
addr += n;
if (addr % page_size == 0) {
/* we filled up the page */
break;
}
} else {
/* the next page is in the next block */
*blockptr = block = QTAILQ_NEXT(block, next);
if (!block) {
break;
}
addr = block->target_start;
/* are we still in the same page? */
if (dump_paddr_to_pfn(s, addr) != *pfnptr) {
if (buf) {
/* no, but we already filled something earlier, return it */
break;
} else {
/* else continue from there */
*pfnptr = dump_paddr_to_pfn(s, addr);
}
}
}
addr = block->target_start;
buf = block->host_addr;
}
assert((block->target_start & ~target_page_mask) == 0);
assert((block->target_end & ~target_page_mask) == 0);
*pfnptr = dump_paddr_to_pfn(s, addr);
if (bufptr) {
*bufptr = buf;
}
return true;
return buf != NULL;
}
static void write_dump_bitmap(DumpState *s, Error **errp)
@ -1297,6 +1328,7 @@ static void write_dump_pages(DumpState *s, Error **errp)
uint8_t *buf;
GuestPhysBlock *block_iter = NULL;
uint64_t pfn_iter;
g_autofree uint8_t *page = NULL;
/* get offset of page_desc and page_data in dump file */
offset_desc = s->offset_page;
@ -1332,12 +1364,13 @@ static void write_dump_pages(DumpState *s, Error **errp)
}
offset_data += s->dump_info.page_size;
page = g_malloc(s->dump_info.page_size);
/*
* dump memory to vmcore page by page. zero page will all be resided in the
* first page of page section
*/
while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
for (buf = page; get_next_page(&block_iter, &pfn_iter, &buf, s); buf = page) {
/* check zero page */
if (buffer_is_zero(buf, s->dump_info.page_size)) {
ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),