spapr/xive: Allocate vCPU IPIs from the vCPU contexts
When QEMU switches to the XIVE interrupt mode, it creates all the guest interrupts at the level of the KVM device. These interrupts are backed by real HW interrupts from the IPI interrupt pool of the XIVE controller. Currently, this is done from the QEMU main thread, which results in allocating all interrupts from the chip on which QEMU is running. IPIs are not distributed across the system and the load is not well balanced across the interrupt controllers. Change the vCPU IPI allocation to run from the vCPU context. The associated XIVE IPI interrupt will be allocated on the chip on which the vCPU is running and improve distribution of the IPIs in the system. When the vCPUs are pinned, this will make the IPI local to the chip of the vCPU. It will reduce rerouting between interrupt controllers and gives better performance. Device interrupts are still treated the same. To improve placement, we would need some information on the chip owning the virtual source or the HW source in case of a passthrough device but this reuires changes in PAPR. Signed-off-by: Cédric Le Goater <clg@kaod.org> Message-Id: <20200820134547.2355743-5-clg@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
acbdb9956f
commit
eab0a2d06e
@ -146,13 +146,43 @@ int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
|
||||
return s.ret;
|
||||
}
|
||||
|
||||
static int kvmppc_xive_reset_ipi(SpaprXive *xive, CPUState *cs, Error **errp)
|
||||
/*
|
||||
* Allocate the vCPU IPIs from the vCPU context. This will allocate
|
||||
* the XIVE IPI interrupt on the chip on which the vCPU is running.
|
||||
* This gives a better distribution of IPIs when the guest has a lot
|
||||
* of vCPUs. When the vCPUs are pinned, this will make the IPI local
|
||||
* to the chip of the vCPU. It will reduce rerouting between interrupt
|
||||
* controllers and gives better performance.
|
||||
*/
|
||||
typedef struct {
|
||||
SpaprXive *xive;
|
||||
Error *err;
|
||||
int rc;
|
||||
} XiveInitIPI;
|
||||
|
||||
static void kvmppc_xive_reset_ipi_on_cpu(CPUState *cs, run_on_cpu_data arg)
|
||||
{
|
||||
unsigned long ipi = kvm_arch_vcpu_id(cs);
|
||||
XiveInitIPI *s = arg.host_ptr;
|
||||
uint64_t state = 0;
|
||||
|
||||
return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, ipi,
|
||||
&state, true, errp);
|
||||
s->rc = kvm_device_access(s->xive->fd, KVM_DEV_XIVE_GRP_SOURCE, ipi,
|
||||
&state, true, &s->err);
|
||||
}
|
||||
|
||||
static int kvmppc_xive_reset_ipi(SpaprXive *xive, CPUState *cs, Error **errp)
|
||||
{
|
||||
XiveInitIPI s = {
|
||||
.xive = xive,
|
||||
.err = NULL,
|
||||
.rc = 0,
|
||||
};
|
||||
|
||||
run_on_cpu(cs, kvmppc_xive_reset_ipi_on_cpu, RUN_ON_CPU_HOST_PTR(&s));
|
||||
if (s.err) {
|
||||
error_propagate(errp, s.err);
|
||||
}
|
||||
return s.rc;
|
||||
}
|
||||
|
||||
int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
|
||||
|
Loading…
Reference in New Issue
Block a user