spapr/xive: rework the mapping the KVM memory regions

Today, the interrupt device is fully initialized at reset when the CAS
negotiation process has completed. Depending on the KVM capabilities,
the SpaprXive memory regions (ESB, TIMA) are initialized with a host
MMIO backend or a QEMU emulated backend. This results in a complex
initialization sequence partially done at realize and later at reset,
and some memory region leaks.

To simplify this sequence and to remove of the late initialization of
the emulated device which is required to be done only once, we
introduce new memory regions specific for KVM. These regions are
mapped as overlaps on top of the emulated device to make use of the
host MMIOs. Also provide proper cleanups of these regions when the
XIVE KVM device is destroyed to fix the leaks.

Signed-off-by: Cédric Le Goater <clg@kaod.org>
Message-Id: <20190614165920.12670-2-clg@kaod.org>
Reviewed-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
Cédric Le Goater 2019-06-14 18:59:19 +02:00 committed by David Gibson
parent a2166410ad
commit 981b1c6266
5 changed files with 24 additions and 39 deletions

View File

@ -194,13 +194,6 @@ void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
}
}
void spapr_xive_map_mmio(SpaprXive *xive)
{
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
}
void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
{
memory_region_set_enabled(&xive->source.esb_mmio, enable);
@ -305,6 +298,7 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
error_propagate(errp, local_err);
return;
}
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
/*
* Initialize the END ESB source
@ -318,6 +312,7 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
error_propagate(errp, local_err);
return;
}
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
/* Set the mapping address of the END ESB pages after the source ESBs */
xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
@ -333,31 +328,18 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp)
qemu_register_reset(spapr_xive_reset, dev);
/* Define all XIVE MMIO regions on SysBus */
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
}
void spapr_xive_init(SpaprXive *xive, Error **errp)
{
XiveSource *xsrc = &xive->source;
/*
* The emulated XIVE device can only be initialized once. If the
* ESB memory region has been already mapped, it means we have been
* through there.
*/
if (memory_region_is_mapped(&xsrc->esb_mmio)) {
return;
}
/* TIMA initialization */
memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive,
"xive.tima", 4ull << TM_SHIFT);
sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
/* Map all regions */
spapr_xive_map_mmio(xive);
/*
* Map all regions. These will be enabled or disabled at reset and
* can also be overridden by KVM memory regions if active
*/
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
}
static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,

View File

@ -728,8 +728,10 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
return;
}
memory_region_init_ram_device_ptr(&xsrc->esb_mmio, OBJECT(xsrc),
memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc),
"xive.esb", esb_len, xsrc->esb_mmap);
memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0,
&xsrc->esb_mmio_kvm, 1);
/*
* 2. END ESB pages (No KVM support yet)
@ -744,8 +746,10 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
error_propagate(errp, local_err);
return;
}
memory_region_init_ram_device_ptr(&xive->tm_mmio, OBJECT(xive),
memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive),
"xive.tima", tima_len, xive->tm_mmap);
memory_region_add_subregion_overlap(&xive->tm_mmio, 0,
&xive->tm_mmio_kvm, 1);
xive->change = qemu_add_vm_change_state_handler(
kvmppc_xive_change_state_handler, xive);
@ -771,9 +775,6 @@ void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
kvm_kernel_irqchip = true;
kvm_msi_via_irqfd_allowed = true;
kvm_gsi_direct_mapping = true;
/* Map all regions */
spapr_xive_map_mmio(xive);
}
void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp)
@ -795,13 +796,15 @@ void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp)
xsrc = &xive->source;
esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 0);
memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm);
object_unparent(OBJECT(&xsrc->esb_mmio_kvm));
munmap(xsrc->esb_mmap, esb_len);
xsrc->esb_mmap = NULL;
sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 1);
sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 2);
memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm);
object_unparent(OBJECT(&xive->tm_mmio_kvm));
munmap(xive->tm_mmap, 4ull << TM_SHIFT);
xive->tm_mmap = NULL;
/*
* When the KVM device fd is closed, the KVM device is destroyed

View File

@ -413,7 +413,6 @@ static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr)
static void spapr_irq_init_emu_xive(SpaprMachineState *spapr, Error **errp)
{
spapr_xive_init(spapr->xive, errp);
}
static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp)

View File

@ -42,6 +42,7 @@ typedef struct SpaprXive {
/* KVM support */
int fd;
void *tm_mmap;
MemoryRegion tm_mmio_kvm;
VMChangeStateEntry *change;
} SpaprXive;
@ -66,7 +67,6 @@ void spapr_xive_map_mmio(SpaprXive *xive);
int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
uint32_t *out_server, uint8_t *out_prio);
void spapr_xive_init(SpaprXive *xive, Error **errp);
/*
* KVM XIVE device helpers

View File

@ -197,6 +197,7 @@ typedef struct XiveSource {
/* KVM support */
void *esb_mmap;
MemoryRegion esb_mmio_kvm;
XiveNotifier *xive;
} XiveSource;