2016-06-10 02:59:01 +02:00
|
|
|
/*
|
|
|
|
* sPAPR CPU core device, acts as container of CPU thread devices.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Bharata B Rao <bharata@linux.vnet.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
2017-10-17 18:43:53 +02:00
|
|
|
#include "qemu/osdep.h"
|
2016-06-10 02:59:01 +02:00
|
|
|
#include "hw/cpu/core.h"
|
|
|
|
#include "hw/ppc/spapr_cpu_core.h"
|
2016-10-11 08:56:52 +02:00
|
|
|
#include "target/ppc/cpu.h"
|
2016-06-10 02:59:01 +02:00
|
|
|
#include "hw/ppc/spapr.h"
|
|
|
|
#include "hw/boards.h"
|
|
|
|
#include "qapi/error.h"
|
2016-06-22 19:11:19 +02:00
|
|
|
#include "sysemu/cpus.h"
|
2017-02-23 01:39:18 +01:00
|
|
|
#include "sysemu/kvm.h"
|
2016-10-11 08:56:52 +02:00
|
|
|
#include "target/ppc/kvm_ppc.h"
|
2016-06-10 02:59:02 +02:00
|
|
|
#include "hw/ppc/ppc.h"
|
2016-10-11 08:56:52 +02:00
|
|
|
#include "target/ppc/mmu-hash64.h"
|
2016-06-22 19:11:19 +02:00
|
|
|
#include "sysemu/numa.h"
|
2017-09-25 13:00:02 +02:00
|
|
|
#include "sysemu/hw_accel.h"
|
2017-02-23 01:39:18 +01:00
|
|
|
#include "qemu/error-report.h"
|
2016-06-10 02:59:02 +02:00
|
|
|
|
|
|
|
static void spapr_cpu_reset(void *opaque)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = opaque;
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
CPUPPCState *env = &cpu->env;
|
2017-11-24 08:05:49 +01:00
|
|
|
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
2018-06-13 08:22:18 +02:00
|
|
|
sPAPRCPUState *spapr_cpu = spapr_cpu_state(cpu);
|
2018-04-05 08:02:51 +02:00
|
|
|
target_ulong lpcr;
|
2016-06-10 02:59:02 +02:00
|
|
|
|
|
|
|
cpu_reset(cs);
|
|
|
|
|
|
|
|
/* All CPUs start halted. CPU0 is unhalted from the machine level
|
|
|
|
* reset code and the rest are explicitly started up by the guest
|
|
|
|
* using an RTAS call */
|
|
|
|
cs->halted = 1;
|
|
|
|
|
Fix a deadlock case in the CPU hotplug flow
We need to set cs->halted to 1 before calling ppc_set_compat. The reason
is that ppc_set_compat kicks up the new thread created to manage the
hotplugged KVM virtual CPU and the code drives directly to KVM_RUN
ioctl. When cs->halted is 1, the code:
int kvm_cpu_exec(CPUState *cpu)
...
if (kvm_arch_process_async_events(cpu)) {
atomic_set(&cpu->exit_request, 0);
return EXCP_HLT;
}
...
returns before it reaches KVM_RUN, giving time to the main thread to
finish its job. Otherwise we can fall in a deadlock because the KVM
thread will issue the KVM_RUN ioctl while the main thread is setting up
KVM registers. Depending on how these jobs are scheduled we'll end up
freezing QEMU.
The following output shows kvm_vcpu_ioctl sleeping because it cannot get
the mutex and never will.
PS: kvm_vcpu_ioctl was triggered kvm_set_one_reg - compat_pvr.
STATE: TASK_UNINTERRUPTIBLE|TASK_WAKEKILL
PID: 61564 TASK: c000003e981e0780 CPU: 48 COMMAND: "qemu-system-ppc"
#0 [c000003e982679a0] __schedule at c000000000b10a44
#1 [c000003e98267a60] schedule at c000000000b113a8
#2 [c000003e98267a90] schedule_preempt_disabled at c000000000b11910
#3 [c000003e98267ab0] __mutex_lock at c000000000b132ec
#4 [c000003e98267bc0] kvm_vcpu_ioctl at c00800000ea03140 [kvm]
#5 [c000003e98267d20] do_vfs_ioctl at c000000000407d30
#6 [c000003e98267dc0] ksys_ioctl at c000000000408674
#7 [c000003e98267e10] sys_ioctl at c0000000004086f8
#8 [c000003e98267e30] system_call at c00000000000b488
crash> struct -x kvm.vcpus 0xc000003da0000000
vcpus = {0xc000003db4880000, 0xc000003d52b80000, 0xc0000039e9c80000, 0xc000003d0e200000, 0xc000003d58280000, 0x0, 0x0, ...}
crash> struct -x kvm_vcpu.mutex.owner 0xc000003d58280000
mutex.owner = {
counter = 0xc000003a23a5c881 <- flag 1: waiters
},
crash> bt 0xc000003a23a5c880
PID: 61579 TASK: c000003a23a5c880 CPU: 9 COMMAND: "CPU 4/KVM"
(active)
crash> struct -x kvm_vcpu.mutex.wait_list 0xc000003d58280000
mutex.wait_list = {
next = 0xc000003e98267b10,
prev = 0xc000003e98267b10
},
crash> struct -x mutex_waiter.task 0xc000003e98267b10
task = 0xc000003e981e0780
The following command-line was used to reproduce the problem (note: gdb
and trace can change the results).
$ qemu-ppc/build/ppc64-softmmu/qemu-system-ppc64 -cpu host \
-enable-kvm -m 4096 \
-smp 4,maxcpus=8,sockets=1,cores=2,threads=4 \
-display none -nographic \
-drive file=disk1.qcow2,format=qcow2
...
(qemu) device_add host-spapr-cpu-core,core-id=4
[no interaction is possible after it, only SIGKILL to take the terminal
back]
Signed-off-by: Jose Ricardo Ziviani <joserz@linux.ibm.com>
Reviewed-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2018-09-02 16:19:04 +02:00
|
|
|
/* Set compatibility mode to match the boot CPU, which was either set
|
|
|
|
* by the machine reset code or by CAS. This should never fail.
|
|
|
|
*/
|
|
|
|
ppc_set_compat(cpu, POWERPC_CPU(first_cpu)->compat_pvr, &error_abort);
|
|
|
|
|
2016-06-10 02:59:02 +02:00
|
|
|
env->spr[SPR_HIOR] = 0;
|
2017-11-24 08:05:49 +01:00
|
|
|
|
2018-04-05 08:02:51 +02:00
|
|
|
lpcr = env->spr[SPR_LPCR];
|
|
|
|
|
|
|
|
/* Set emulated LPCR to not send interrupts to hypervisor. Note that
|
|
|
|
* under KVM, the actual HW LPCR will be set differently by KVM itself,
|
|
|
|
* the settings below ensure proper operations with TCG in absence of
|
|
|
|
* a real hypervisor.
|
|
|
|
*
|
|
|
|
* Clearing VPM0 will also cause us to use RMOR in mmu-hash64.c for
|
|
|
|
* real mode accesses, which thankfully defaults to 0 and isn't
|
|
|
|
* accessible in guest mode.
|
2018-04-05 08:27:18 +02:00
|
|
|
*
|
|
|
|
* Disable Power-saving mode Exit Cause exceptions for the CPU, so
|
|
|
|
* we don't get spurious wakups before an RTAS start-cpu call.
|
2018-04-05 08:02:51 +02:00
|
|
|
*/
|
2018-04-05 08:27:18 +02:00
|
|
|
lpcr &= ~(LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV | pcc->lpcr_pm);
|
2018-04-05 08:02:51 +02:00
|
|
|
lpcr |= LPCR_LPES0 | LPCR_LPES1;
|
|
|
|
|
|
|
|
/* Set RMLS to the max (ie, 16G) */
|
|
|
|
lpcr &= ~LPCR_RMLS;
|
|
|
|
lpcr |= 1ull << LPCR_RMLS_SHIFT;
|
|
|
|
|
|
|
|
ppc_store_lpcr(cpu, lpcr);
|
|
|
|
|
|
|
|
/* Set a full AMOR so guest can use the AMR as it sees fit */
|
|
|
|
env->spr[SPR_AMOR] = 0xffffffffffffffffull;
|
2018-06-13 08:22:18 +02:00
|
|
|
|
|
|
|
spapr_cpu->vpa_addr = 0;
|
|
|
|
spapr_cpu->slb_shadow_addr = 0;
|
|
|
|
spapr_cpu->slb_shadow_size = 0;
|
|
|
|
spapr_cpu->dtl_addr = 0;
|
|
|
|
spapr_cpu->dtl_size = 0;
|
2018-03-28 05:45:44 +02:00
|
|
|
|
|
|
|
spapr_caps_cpu_apply(SPAPR_MACHINE(qdev_get_machine()), cpu);
|
2018-04-16 08:19:52 +02:00
|
|
|
|
|
|
|
kvm_check_mmu(cpu, &error_fatal);
|
2016-06-10 02:59:02 +02:00
|
|
|
}
|
|
|
|
|
2018-05-01 08:22:49 +02:00
|
|
|
void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip, target_ulong r3)
|
|
|
|
{
|
2018-04-05 08:27:18 +02:00
|
|
|
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
2018-05-01 08:22:49 +02:00
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
|
|
|
|
env->nip = nip;
|
|
|
|
env->gpr[3] = r3;
|
2018-09-04 11:24:18 +02:00
|
|
|
kvmppc_set_reg_ppc_online(cpu, 1);
|
2018-05-01 08:22:49 +02:00
|
|
|
CPU(cpu)->halted = 0;
|
2018-04-05 08:27:18 +02:00
|
|
|
/* Enable Power-saving mode Exit Cause exceptions */
|
|
|
|
ppc_store_lpcr(cpu, env->spr[SPR_LPCR] | pcc->lpcr_pm);
|
2018-05-01 08:22:49 +02:00
|
|
|
}
|
|
|
|
|
2016-06-10 02:59:03 +02:00
|
|
|
/*
|
|
|
|
* Return the sPAPR CPU core type for @model which essentially is the CPU
|
|
|
|
* model specified with -cpu cmdline option.
|
|
|
|
*/
|
2017-10-09 21:51:05 +02:00
|
|
|
const char *spapr_get_cpu_core_type(const char *cpu_type)
|
2016-06-10 02:59:03 +02:00
|
|
|
{
|
2017-10-09 21:51:05 +02:00
|
|
|
int len = strlen(cpu_type) - strlen(POWERPC_CPU_TYPE_SUFFIX);
|
|
|
|
char *core_type = g_strdup_printf(SPAPR_CPU_CORE_TYPE_NAME("%.*s"),
|
|
|
|
len, cpu_type);
|
|
|
|
ObjectClass *oc = object_class_by_name(core_type);
|
|
|
|
|
|
|
|
g_free(core_type);
|
|
|
|
if (!oc) {
|
|
|
|
return NULL;
|
2016-08-09 18:59:59 +02:00
|
|
|
}
|
|
|
|
|
2017-10-09 21:51:05 +02:00
|
|
|
return object_class_get_name(oc);
|
2016-06-10 02:59:03 +02:00
|
|
|
}
|
|
|
|
|
spapr_cpu_core: migrate VPA related state
QEMU implements the "Shared Processor LPAR" (SPLPAR) option, which allows
the hypervisor to time-slice a physical processor into multiple virtual
processor. The intent is to allow more guests to run, and to optimize
processor utilization.
The guest OS can cede idle VCPUs, so that their processing capacity may
be used by other VCPUs, with the H_CEDE hcall. The guest OS can also
optimize spinlocks, by confering the time-slice of a spinning VCPU to the
spinlock holder if it's currently notrunning, with the H_CONFER hcall.
Both hcalls depend on a "Virtual Processor Area" (VPA) to be registered
by the guest OS, generally during early boot. Other per-VCPU areas can
be registered: the "SLB Shadow Buffer" which allows a more efficient
dispatching of VCPUs, and the "Dispatch Trace Log Buffer" (DTL) which
is used to compute time stolen by the hypervisor. Both DTL and SLB Shadow
areas depend on the VPA to be registered.
The VPA/SLB Shadow/DTL are state that QEMU should migrate, but this doesn't
happen, for no apparent reason other than it was just never coded. This
causes the features listed above to stop working after migration, and it
breaks the logic of the H_REGISTER_VPA hcall in the destination.
The VPA is set at the guest request, ie, we don't have to migrate
it before the guest has actually set it. This patch hence adds an
"spapr_cpu/vpa" subsection to the recently introduced per-CPU machine
data migration stream.
Since DTL and SLB Shadow are optional and both depend on VPA, they get
their own subsections "spapr_cpu/vpa/slb_shadow" and "spapr_cpu/vpa/dtl"
hanging from the "spapr_cpu/vpa" subsection.
Note that this won't break migration to older QEMUs. Is is already handled
by only registering the vmstate handler for per-CPU data with newer machine
types.
Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2018-06-18 14:26:49 +02:00
|
|
|
static bool slb_shadow_needed(void *opaque)
|
|
|
|
{
|
|
|
|
sPAPRCPUState *spapr_cpu = opaque;
|
|
|
|
|
|
|
|
return spapr_cpu->slb_shadow_addr != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_spapr_cpu_slb_shadow = {
|
|
|
|
.name = "spapr_cpu/vpa/slb_shadow",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = slb_shadow_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT64(slb_shadow_addr, sPAPRCPUState),
|
|
|
|
VMSTATE_UINT64(slb_shadow_size, sPAPRCPUState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool dtl_needed(void *opaque)
|
|
|
|
{
|
|
|
|
sPAPRCPUState *spapr_cpu = opaque;
|
|
|
|
|
|
|
|
return spapr_cpu->dtl_addr != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_spapr_cpu_dtl = {
|
|
|
|
.name = "spapr_cpu/vpa/dtl",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = dtl_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT64(dtl_addr, sPAPRCPUState),
|
|
|
|
VMSTATE_UINT64(dtl_size, sPAPRCPUState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool vpa_needed(void *opaque)
|
|
|
|
{
|
|
|
|
sPAPRCPUState *spapr_cpu = opaque;
|
|
|
|
|
|
|
|
return spapr_cpu->vpa_addr != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_spapr_cpu_vpa = {
|
|
|
|
.name = "spapr_cpu/vpa",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = vpa_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT64(vpa_addr, sPAPRCPUState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
.subsections = (const VMStateDescription * []) {
|
|
|
|
&vmstate_spapr_cpu_slb_shadow,
|
|
|
|
&vmstate_spapr_cpu_dtl,
|
|
|
|
NULL
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-06-18 14:26:35 +02:00
|
|
|
static const VMStateDescription vmstate_spapr_cpu_state = {
|
|
|
|
.name = "spapr_cpu",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
spapr_cpu_core: migrate VPA related state
QEMU implements the "Shared Processor LPAR" (SPLPAR) option, which allows
the hypervisor to time-slice a physical processor into multiple virtual
processor. The intent is to allow more guests to run, and to optimize
processor utilization.
The guest OS can cede idle VCPUs, so that their processing capacity may
be used by other VCPUs, with the H_CEDE hcall. The guest OS can also
optimize spinlocks, by confering the time-slice of a spinning VCPU to the
spinlock holder if it's currently notrunning, with the H_CONFER hcall.
Both hcalls depend on a "Virtual Processor Area" (VPA) to be registered
by the guest OS, generally during early boot. Other per-VCPU areas can
be registered: the "SLB Shadow Buffer" which allows a more efficient
dispatching of VCPUs, and the "Dispatch Trace Log Buffer" (DTL) which
is used to compute time stolen by the hypervisor. Both DTL and SLB Shadow
areas depend on the VPA to be registered.
The VPA/SLB Shadow/DTL are state that QEMU should migrate, but this doesn't
happen, for no apparent reason other than it was just never coded. This
causes the features listed above to stop working after migration, and it
breaks the logic of the H_REGISTER_VPA hcall in the destination.
The VPA is set at the guest request, ie, we don't have to migrate
it before the guest has actually set it. This patch hence adds an
"spapr_cpu/vpa" subsection to the recently introduced per-CPU machine
data migration stream.
Since DTL and SLB Shadow are optional and both depend on VPA, they get
their own subsections "spapr_cpu/vpa/slb_shadow" and "spapr_cpu/vpa/dtl"
hanging from the "spapr_cpu/vpa" subsection.
Note that this won't break migration to older QEMUs. Is is already handled
by only registering the vmstate handler for per-CPU data with newer machine
types.
Signed-off-by: Greg Kurz <groug@kaod.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2018-06-18 14:26:49 +02:00
|
|
|
.subsections = (const VMStateDescription * []) {
|
|
|
|
&vmstate_spapr_cpu_vpa,
|
|
|
|
NULL
|
|
|
|
}
|
2018-06-18 14:26:35 +02:00
|
|
|
};
|
|
|
|
|
2018-08-08 17:59:19 +02:00
|
|
|
static void spapr_unrealize_vcpu(PowerPCCPU *cpu, sPAPRCPUCore *sc)
|
|
|
|
{
|
|
|
|
if (!sc->pre_3_0_migration) {
|
|
|
|
vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data);
|
|
|
|
}
|
|
|
|
qemu_unregister_reset(spapr_cpu_reset, cpu);
|
2019-01-17 08:53:26 +01:00
|
|
|
if (spapr_cpu_state(cpu)->icp) {
|
|
|
|
object_unparent(OBJECT(spapr_cpu_state(cpu)->icp));
|
2019-01-02 06:57:35 +01:00
|
|
|
}
|
2019-01-17 08:53:26 +01:00
|
|
|
if (spapr_cpu_state(cpu)->tctx) {
|
|
|
|
object_unparent(OBJECT(spapr_cpu_state(cpu)->tctx));
|
2019-01-02 06:57:35 +01:00
|
|
|
}
|
2018-08-08 17:59:19 +02:00
|
|
|
cpu_remove_sync(CPU(cpu));
|
|
|
|
object_unparent(OBJECT(cpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_cpu_core_unrealize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
|
|
|
|
CPUCore *cc = CPU_CORE(dev);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < cc->nr_threads; i++) {
|
|
|
|
spapr_unrealize_vcpu(sc->threads[i], sc);
|
|
|
|
}
|
|
|
|
g_free(sc->threads);
|
|
|
|
}
|
|
|
|
|
2018-06-13 03:48:26 +02:00
|
|
|
static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
2018-08-08 17:59:19 +02:00
|
|
|
sPAPRCPUCore *sc, Error **errp)
|
2016-06-10 02:59:01 +02:00
|
|
|
{
|
2018-06-13 03:48:26 +02:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2018-08-08 17:59:19 +02:00
|
|
|
CPUState *cs = CPU(cpu);
|
2016-07-01 07:14:39 +02:00
|
|
|
Error *local_err = NULL;
|
2017-04-03 09:45:58 +02:00
|
|
|
|
2018-06-13 03:48:26 +02:00
|
|
|
object_property_set_bool(OBJECT(cpu), true, "realized", &local_err);
|
2017-04-03 09:45:58 +02:00
|
|
|
if (local_err) {
|
2017-05-15 13:39:55 +02:00
|
|
|
goto error;
|
2017-04-03 09:45:58 +02:00
|
|
|
}
|
2016-06-10 02:59:01 +02:00
|
|
|
|
2018-06-13 03:48:26 +02:00
|
|
|
/* Set time-base frequency to 512 MHz */
|
|
|
|
cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ);
|
|
|
|
|
|
|
|
cpu_ppc_set_vhyp(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
|
|
|
|
kvmppc_set_papr(cpu);
|
2016-06-10 02:59:01 +02:00
|
|
|
|
2018-06-13 03:48:26 +02:00
|
|
|
qemu_register_reset(spapr_cpu_reset, cpu);
|
|
|
|
spapr_cpu_reset(cpu);
|
|
|
|
|
2019-01-02 06:57:34 +01:00
|
|
|
spapr->irq->cpu_intc_create(spapr, cpu, &local_err);
|
2016-06-29 22:50:32 +02:00
|
|
|
if (local_err) {
|
spapr_cpu_core: add missing rollback on realization path
The spapr_realize_vcpu() function doesn't rollback in case of error.
This isn't a problem with coldplugged CPUs because the machine won't
start and QEMU will exit. Hotplug is a different story though: the
CPU thread is started under object_property_set_bool() and it assumes
it can access the CPU object.
If icp_create() fails, we return an error without unregistering the
reset handler for this CPU, and we let the underlying QEMU thread for
this CPU alive. Since spapr_cpu_core_realize() doesn't care to unrealize
already realized CPUs either, but happily frees all of them anyway, the
CPU thread crashes instantly:
(qemu) device_add host-spapr-cpu-core,core-id=1,id=gku
GKU: failing icp_create (cpu 0x11497fd0)
^^^^^^^^^^
Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffee3feaa0 (LWP 24725)]
0x00000000104c8374 in object_dynamic_cast_assert (obj=0x11497fd0,
^^^^^^^^^^^^^^
pointer to the CPU object
623 trace_object_dynamic_cast_assert(obj ? obj->class->type->name
(gdb) p obj->class->type
$1 = (Type) 0x0
(gdb) p * obj
$2 = {class = 0x10ea9c10, free = 0x11244620,
^^^^^^^^^^
should be g_free
(gdb) p g_free
$3 = {<text variable, no debug info>} 0x7ffff282bef0 <g_free>
obj is a dangling pointer to the CPU that was just destroyed in
spapr_cpu_core_realize().
This patch adds proper rollback to both spapr_realize_vcpu() and
spapr_cpu_core_realize().
Signed-off-by: Greg Kurz <groug@kaod.org>
[dwg: Fixed a conflict due to a change in my tree]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2018-06-14 23:50:42 +02:00
|
|
|
goto error_unregister;
|
2016-06-10 02:59:01 +02:00
|
|
|
}
|
2017-04-03 09:45:58 +02:00
|
|
|
|
2018-08-08 17:59:19 +02:00
|
|
|
if (!sc->pre_3_0_migration) {
|
|
|
|
vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state,
|
|
|
|
cpu->machine_data);
|
|
|
|
}
|
|
|
|
|
2017-05-15 13:39:55 +02:00
|
|
|
return;
|
|
|
|
|
spapr_cpu_core: add missing rollback on realization path
The spapr_realize_vcpu() function doesn't rollback in case of error.
This isn't a problem with coldplugged CPUs because the machine won't
start and QEMU will exit. Hotplug is a different story though: the
CPU thread is started under object_property_set_bool() and it assumes
it can access the CPU object.
If icp_create() fails, we return an error without unregistering the
reset handler for this CPU, and we let the underlying QEMU thread for
this CPU alive. Since spapr_cpu_core_realize() doesn't care to unrealize
already realized CPUs either, but happily frees all of them anyway, the
CPU thread crashes instantly:
(qemu) device_add host-spapr-cpu-core,core-id=1,id=gku
GKU: failing icp_create (cpu 0x11497fd0)
^^^^^^^^^^
Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffee3feaa0 (LWP 24725)]
0x00000000104c8374 in object_dynamic_cast_assert (obj=0x11497fd0,
^^^^^^^^^^^^^^
pointer to the CPU object
623 trace_object_dynamic_cast_assert(obj ? obj->class->type->name
(gdb) p obj->class->type
$1 = (Type) 0x0
(gdb) p * obj
$2 = {class = 0x10ea9c10, free = 0x11244620,
^^^^^^^^^^
should be g_free
(gdb) p g_free
$3 = {<text variable, no debug info>} 0x7ffff282bef0 <g_free>
obj is a dangling pointer to the CPU that was just destroyed in
spapr_cpu_core_realize().
This patch adds proper rollback to both spapr_realize_vcpu() and
spapr_cpu_core_realize().
Signed-off-by: Greg Kurz <groug@kaod.org>
[dwg: Fixed a conflict due to a change in my tree]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2018-06-14 23:50:42 +02:00
|
|
|
error_unregister:
|
|
|
|
qemu_unregister_reset(spapr_cpu_reset, cpu);
|
|
|
|
cpu_remove_sync(CPU(cpu));
|
2017-06-16 03:37:53 +02:00
|
|
|
error:
|
2017-05-15 13:39:55 +02:00
|
|
|
error_propagate(errp, local_err);
|
2016-06-10 02:59:01 +02:00
|
|
|
}
|
|
|
|
|
2018-06-14 23:50:57 +02:00
|
|
|
static PowerPCCPU *spapr_create_vcpu(sPAPRCPUCore *sc, int i, Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(sc);
|
|
|
|
CPUCore *cc = CPU_CORE(sc);
|
|
|
|
Object *obj;
|
|
|
|
char *id;
|
|
|
|
CPUState *cs;
|
|
|
|
PowerPCCPU *cpu;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
obj = object_new(scc->cpu_type);
|
|
|
|
|
|
|
|
cs = CPU(obj);
|
|
|
|
cpu = POWERPC_CPU(obj);
|
|
|
|
cs->cpu_index = cc->core_id + i;
|
|
|
|
spapr_set_vcpu_id(cpu, cs->cpu_index, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu->node_id = sc->node_id;
|
|
|
|
|
|
|
|
id = g_strdup_printf("thread[%d]", i);
|
|
|
|
object_property_add_child(OBJECT(sc), id, obj, &local_err);
|
|
|
|
g_free(id);
|
|
|
|
if (local_err) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2018-06-13 08:22:18 +02:00
|
|
|
cpu->machine_data = g_new0(sPAPRCPUState, 1);
|
|
|
|
|
2018-06-14 23:50:57 +02:00
|
|
|
object_unref(obj);
|
|
|
|
return cpu;
|
|
|
|
|
|
|
|
err:
|
|
|
|
object_unref(obj);
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-06-18 14:26:35 +02:00
|
|
|
static void spapr_delete_vcpu(PowerPCCPU *cpu, sPAPRCPUCore *sc)
|
2018-06-14 23:50:57 +02:00
|
|
|
{
|
2018-06-13 08:22:18 +02:00
|
|
|
sPAPRCPUState *spapr_cpu = spapr_cpu_state(cpu);
|
|
|
|
|
|
|
|
cpu->machine_data = NULL;
|
|
|
|
g_free(spapr_cpu);
|
2018-06-14 23:50:57 +02:00
|
|
|
object_unparent(OBJECT(cpu));
|
|
|
|
}
|
|
|
|
|
2016-06-10 02:59:01 +02:00
|
|
|
static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
2017-10-12 18:30:23 +02:00
|
|
|
/* We don't use SPAPR_MACHINE() in order to exit gracefully if the user
|
|
|
|
* tries to add a sPAPR CPU core to a non-pseries machine.
|
|
|
|
*/
|
|
|
|
sPAPRMachineState *spapr =
|
|
|
|
(sPAPRMachineState *) object_dynamic_cast(qdev_get_machine(),
|
|
|
|
TYPE_SPAPR_MACHINE);
|
2016-06-10 02:59:01 +02:00
|
|
|
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
|
|
|
|
CPUCore *cc = CPU_CORE(OBJECT(dev));
|
|
|
|
Error *local_err = NULL;
|
2016-07-01 07:14:39 +02:00
|
|
|
int i, j;
|
2016-06-10 02:59:01 +02:00
|
|
|
|
2017-10-12 18:30:23 +02:00
|
|
|
if (!spapr) {
|
|
|
|
error_setg(errp, TYPE_SPAPR_CPU_CORE " needs a pseries machine");
|
2017-08-24 05:52:32 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-11-20 10:19:54 +01:00
|
|
|
sc->threads = g_new(PowerPCCPU *, cc->nr_threads);
|
2016-06-10 02:59:01 +02:00
|
|
|
for (i = 0; i < cc->nr_threads; i++) {
|
2018-06-14 23:50:57 +02:00
|
|
|
sc->threads[i] = spapr_create_vcpu(sc, i, &local_err);
|
2016-06-10 02:59:01 +02:00
|
|
|
if (local_err) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2016-07-01 07:14:39 +02:00
|
|
|
|
|
|
|
for (j = 0; j < cc->nr_threads; j++) {
|
2018-08-08 17:59:19 +02:00
|
|
|
spapr_realize_vcpu(sc->threads[j], spapr, sc, &local_err);
|
2016-07-01 07:14:39 +02:00
|
|
|
if (local_err) {
|
spapr_cpu_core: add missing rollback on realization path
The spapr_realize_vcpu() function doesn't rollback in case of error.
This isn't a problem with coldplugged CPUs because the machine won't
start and QEMU will exit. Hotplug is a different story though: the
CPU thread is started under object_property_set_bool() and it assumes
it can access the CPU object.
If icp_create() fails, we return an error without unregistering the
reset handler for this CPU, and we let the underlying QEMU thread for
this CPU alive. Since spapr_cpu_core_realize() doesn't care to unrealize
already realized CPUs either, but happily frees all of them anyway, the
CPU thread crashes instantly:
(qemu) device_add host-spapr-cpu-core,core-id=1,id=gku
GKU: failing icp_create (cpu 0x11497fd0)
^^^^^^^^^^
Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffee3feaa0 (LWP 24725)]
0x00000000104c8374 in object_dynamic_cast_assert (obj=0x11497fd0,
^^^^^^^^^^^^^^
pointer to the CPU object
623 trace_object_dynamic_cast_assert(obj ? obj->class->type->name
(gdb) p obj->class->type
$1 = (Type) 0x0
(gdb) p * obj
$2 = {class = 0x10ea9c10, free = 0x11244620,
^^^^^^^^^^
should be g_free
(gdb) p g_free
$3 = {<text variable, no debug info>} 0x7ffff282bef0 <g_free>
obj is a dangling pointer to the CPU that was just destroyed in
spapr_cpu_core_realize().
This patch adds proper rollback to both spapr_realize_vcpu() and
spapr_cpu_core_realize().
Signed-off-by: Greg Kurz <groug@kaod.org>
[dwg: Fixed a conflict due to a change in my tree]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2018-06-14 23:50:42 +02:00
|
|
|
goto err_unrealize;
|
2016-07-01 07:14:39 +02:00
|
|
|
}
|
2016-06-10 02:59:01 +02:00
|
|
|
}
|
2016-07-01 07:14:39 +02:00
|
|
|
return;
|
2016-06-10 02:59:01 +02:00
|
|
|
|
spapr_cpu_core: add missing rollback on realization path
The spapr_realize_vcpu() function doesn't rollback in case of error.
This isn't a problem with coldplugged CPUs because the machine won't
start and QEMU will exit. Hotplug is a different story though: the
CPU thread is started under object_property_set_bool() and it assumes
it can access the CPU object.
If icp_create() fails, we return an error without unregistering the
reset handler for this CPU, and we let the underlying QEMU thread for
this CPU alive. Since spapr_cpu_core_realize() doesn't care to unrealize
already realized CPUs either, but happily frees all of them anyway, the
CPU thread crashes instantly:
(qemu) device_add host-spapr-cpu-core,core-id=1,id=gku
GKU: failing icp_create (cpu 0x11497fd0)
^^^^^^^^^^
Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffee3feaa0 (LWP 24725)]
0x00000000104c8374 in object_dynamic_cast_assert (obj=0x11497fd0,
^^^^^^^^^^^^^^
pointer to the CPU object
623 trace_object_dynamic_cast_assert(obj ? obj->class->type->name
(gdb) p obj->class->type
$1 = (Type) 0x0
(gdb) p * obj
$2 = {class = 0x10ea9c10, free = 0x11244620,
^^^^^^^^^^
should be g_free
(gdb) p g_free
$3 = {<text variable, no debug info>} 0x7ffff282bef0 <g_free>
obj is a dangling pointer to the CPU that was just destroyed in
spapr_cpu_core_realize().
This patch adds proper rollback to both spapr_realize_vcpu() and
spapr_cpu_core_realize().
Signed-off-by: Greg Kurz <groug@kaod.org>
[dwg: Fixed a conflict due to a change in my tree]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2018-06-14 23:50:42 +02:00
|
|
|
err_unrealize:
|
|
|
|
while (--j >= 0) {
|
2018-08-08 17:59:19 +02:00
|
|
|
spapr_unrealize_vcpu(sc->threads[j], sc);
|
spapr_cpu_core: add missing rollback on realization path
The spapr_realize_vcpu() function doesn't rollback in case of error.
This isn't a problem with coldplugged CPUs because the machine won't
start and QEMU will exit. Hotplug is a different story though: the
CPU thread is started under object_property_set_bool() and it assumes
it can access the CPU object.
If icp_create() fails, we return an error without unregistering the
reset handler for this CPU, and we let the underlying QEMU thread for
this CPU alive. Since spapr_cpu_core_realize() doesn't care to unrealize
already realized CPUs either, but happily frees all of them anyway, the
CPU thread crashes instantly:
(qemu) device_add host-spapr-cpu-core,core-id=1,id=gku
GKU: failing icp_create (cpu 0x11497fd0)
^^^^^^^^^^
Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffee3feaa0 (LWP 24725)]
0x00000000104c8374 in object_dynamic_cast_assert (obj=0x11497fd0,
^^^^^^^^^^^^^^
pointer to the CPU object
623 trace_object_dynamic_cast_assert(obj ? obj->class->type->name
(gdb) p obj->class->type
$1 = (Type) 0x0
(gdb) p * obj
$2 = {class = 0x10ea9c10, free = 0x11244620,
^^^^^^^^^^
should be g_free
(gdb) p g_free
$3 = {<text variable, no debug info>} 0x7ffff282bef0 <g_free>
obj is a dangling pointer to the CPU that was just destroyed in
spapr_cpu_core_realize().
This patch adds proper rollback to both spapr_realize_vcpu() and
spapr_cpu_core_realize().
Signed-off-by: Greg Kurz <groug@kaod.org>
[dwg: Fixed a conflict due to a change in my tree]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2018-06-14 23:50:42 +02:00
|
|
|
}
|
2016-06-10 02:59:01 +02:00
|
|
|
err:
|
2016-06-27 18:28:15 +02:00
|
|
|
while (--i >= 0) {
|
2018-06-18 14:26:35 +02:00
|
|
|
spapr_delete_vcpu(sc->threads[i], sc);
|
2016-06-10 02:59:01 +02:00
|
|
|
}
|
|
|
|
g_free(sc->threads);
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
|
2017-05-10 13:29:46 +02:00
|
|
|
static Property spapr_cpu_core_properties[] = {
|
|
|
|
DEFINE_PROP_INT32("node-id", sPAPRCPUCore, node_id, CPU_UNSET_NUMA_NODE_ID),
|
2018-06-18 14:26:35 +02:00
|
|
|
DEFINE_PROP_BOOL("pre-3.0-migration", sPAPRCPUCore, pre_3_0_migration,
|
|
|
|
false),
|
2017-05-10 13:29:46 +02:00
|
|
|
DEFINE_PROP_END_OF_LIST()
|
|
|
|
};
|
|
|
|
|
2017-10-09 21:51:02 +02:00
|
|
|
static void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
|
2016-06-10 02:59:01 +02:00
|
|
|
{
|
2016-09-12 09:57:20 +02:00
|
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
|
|
|
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_CLASS(oc);
|
|
|
|
|
|
|
|
dc->realize = spapr_cpu_core_realize;
|
2018-06-13 03:48:26 +02:00
|
|
|
dc->unrealize = spapr_cpu_core_unrealize;
|
2017-05-10 13:29:46 +02:00
|
|
|
dc->props = spapr_cpu_core_properties;
|
2017-10-09 21:51:01 +02:00
|
|
|
scc->cpu_type = data;
|
2016-06-10 02:59:01 +02:00
|
|
|
}
|
|
|
|
|
2017-10-09 21:51:00 +02:00
|
|
|
#define DEFINE_SPAPR_CPU_CORE_TYPE(cpu_model) \
|
|
|
|
{ \
|
|
|
|
.parent = TYPE_SPAPR_CPU_CORE, \
|
2017-10-09 21:51:01 +02:00
|
|
|
.class_data = (void *) POWERPC_CPU_TYPE_NAME(cpu_model), \
|
2017-10-09 21:51:00 +02:00
|
|
|
.class_init = spapr_cpu_core_class_init, \
|
|
|
|
.name = SPAPR_CPU_CORE_TYPE_NAME(cpu_model), \
|
2016-06-10 02:59:01 +02:00
|
|
|
}
|
|
|
|
|
2017-10-09 21:51:00 +02:00
|
|
|
static const TypeInfo spapr_cpu_core_type_infos[] = {
|
|
|
|
{
|
|
|
|
.name = TYPE_SPAPR_CPU_CORE,
|
|
|
|
.parent = TYPE_CPU_CORE,
|
|
|
|
.abstract = true,
|
|
|
|
.instance_size = sizeof(sPAPRCPUCore),
|
|
|
|
.class_size = sizeof(sPAPRCPUCoreClass),
|
|
|
|
},
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("970_v2.2"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("970mp_v1.0"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("970mp_v1.1"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("power5+_v2.1"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("power7_v2.3"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("power7+_v2.1"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("power8_v2.0"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("power8e_v2.1"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("power8nvl_v1.0"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"),
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"),
|
2017-10-09 21:51:02 +02:00
|
|
|
#ifdef CONFIG_KVM
|
|
|
|
DEFINE_SPAPR_CPU_CORE_TYPE("host"),
|
|
|
|
#endif
|
2017-10-09 21:51:00 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_TYPES(spapr_cpu_core_type_infos)
|