2011-04-01 06:15:20 +02:00
|
|
|
/*
|
|
|
|
* QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
|
|
|
|
*
|
|
|
|
* Copyright (c) 2004-2007 Fabrice Bellard
|
|
|
|
* Copyright (c) 2007 Jocelyn Mayer
|
|
|
|
* Copyright (c) 2010 David Gibson, IBM Corporation.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
2016-01-26 19:16:58 +01:00
|
|
|
#include "qemu/osdep.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 09:01:28 +01:00
|
|
|
#include "qapi/error.h"
|
2017-08-18 07:50:22 +02:00
|
|
|
#include "qapi/visitor.h"
|
2012-12-17 18:20:04 +01:00
|
|
|
#include "sysemu/sysemu.h"
|
2015-02-08 19:51:16 +01:00
|
|
|
#include "sysemu/numa.h"
|
2013-02-04 15:40:22 +01:00
|
|
|
#include "hw/hw.h"
|
2015-12-15 13:16:16 +01:00
|
|
|
#include "qemu/log.h"
|
2014-03-17 03:40:27 +01:00
|
|
|
#include "hw/fw-path-provider.h"
|
2011-04-01 06:15:20 +02:00
|
|
|
#include "elf.h"
|
2012-10-24 08:43:34 +02:00
|
|
|
#include "net/net.h"
|
2015-09-01 03:25:35 +02:00
|
|
|
#include "sysemu/device_tree.h"
|
2014-10-07 13:59:13 +02:00
|
|
|
#include "sysemu/block-backend.h"
|
2012-12-17 18:20:04 +01:00
|
|
|
#include "sysemu/cpus.h"
|
2017-01-10 11:59:55 +01:00
|
|
|
#include "sysemu/hw_accel.h"
|
2011-09-29 23:39:10 +02:00
|
|
|
#include "kvm_ppc.h"
|
2017-04-24 19:02:44 +02:00
|
|
|
#include "migration/misc.h"
|
2017-04-24 18:53:30 +02:00
|
|
|
#include "migration/global_state.h"
|
2017-04-24 13:42:55 +02:00
|
|
|
#include "migration/register.h"
|
2013-07-18 21:33:01 +02:00
|
|
|
#include "mmu-hash64.h"
|
2017-03-20 00:46:46 +01:00
|
|
|
#include "mmu-book3s-v3.h"
|
spapr: Implement processor compatibility in ibm, client-architecture-support
Modern Linux kernels support last POWERPC CPUs so when a kernel boots,
in most cases it can find a matching cpu_spec in the kernel's cpu_specs
list. However if the kernel is quite old, it may be missing a definition
of the actual CPU. To provide an ability for old kernels to work on modern
hardware, a Processor Compatibility Mode has been introduced
by the PowerISA specification.
>From the hardware prospective, it is supported by the Processor
Compatibility Register (PCR) which is defined in PowerISA. The register
enables one of the compatibility modes (2.05/2.06/2.07).
Since PCR is a hypervisor privileged register and cannot be
directly accessed from the guest, the mode selection is done via
ibm,client-architecture-support (CAS) RTAS call using which the guest
specifies what "raw" and "architected" CPU versions it supports.
QEMU works out the best match, changes a "cpu-version" property of
every CPU and notifies the guest about the change by setting these
properties in the buffer passed as a response on a custom H_CAS hypercall.
This implements ibm,client-architecture-support parameters parsing
(now only for PVRs) and cooks the device tree diff with new values for
"cpu-version", "ibm,ppc-interrupt-server#s" and
"ibm,ppc-interrupt-server#s" properties.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-05-23 04:26:57 +02:00
|
|
|
#include "qom/cpu.h"
|
2011-04-01 06:15:20 +02:00
|
|
|
|
|
|
|
#include "hw/boards.h"
|
2013-02-05 17:06:20 +01:00
|
|
|
#include "hw/ppc/ppc.h"
|
2011-04-01 06:15:20 +02:00
|
|
|
#include "hw/loader.h"
|
|
|
|
|
2016-07-25 16:24:41 +02:00
|
|
|
#include "hw/ppc/fdt.h"
|
2013-02-05 17:06:20 +01:00
|
|
|
#include "hw/ppc/spapr.h"
|
|
|
|
#include "hw/ppc/spapr_vio.h"
|
|
|
|
#include "hw/pci-host/spapr.h"
|
|
|
|
#include "hw/ppc/xics.h"
|
2012-12-12 13:24:50 +01:00
|
|
|
#include "hw/pci/msi.h"
|
2011-04-01 06:15:20 +02:00
|
|
|
|
2013-02-04 15:40:22 +01:00
|
|
|
#include "hw/pci/pci.h"
|
2014-03-17 03:40:27 +01:00
|
|
|
#include "hw/scsi/scsi.h"
|
|
|
|
#include "hw/virtio/virtio-scsi.h"
|
2017-06-05 17:55:18 +02:00
|
|
|
#include "hw/virtio/vhost-scsi-common.h"
|
2011-08-09 17:57:37 +02:00
|
|
|
|
2012-12-17 18:19:49 +01:00
|
|
|
#include "exec/address-spaces.h"
|
2012-08-16 04:03:56 +02:00
|
|
|
#include "hw/usb.h"
|
2012-12-17 18:20:00 +01:00
|
|
|
#include "qemu/config-file.h"
|
2013-12-23 16:40:40 +01:00
|
|
|
#include "qemu/error-report.h"
|
2014-05-23 04:26:54 +02:00
|
|
|
#include "trace.h"
|
2014-08-20 14:16:36 +02:00
|
|
|
#include "hw/nmi.h"
|
2017-02-27 15:29:32 +01:00
|
|
|
#include "hw/intc/intc.h"
|
2011-10-03 12:56:38 +02:00
|
|
|
|
2014-10-14 18:40:06 +02:00
|
|
|
#include "hw/compat.h"
|
2016-03-20 18:16:19 +01:00
|
|
|
#include "qemu/cutils.h"
|
2016-06-10 02:59:03 +02:00
|
|
|
#include "hw/ppc/spapr_cpu_core.h"
|
2016-06-10 02:59:08 +02:00
|
|
|
#include "qmp-commands.h"
|
2014-10-14 18:40:06 +02:00
|
|
|
|
2011-04-01 06:15:20 +02:00
|
|
|
#include <libfdt.h>
|
|
|
|
|
2012-01-11 20:46:28 +01:00
|
|
|
/* SLOF memory layout:
|
|
|
|
*
|
|
|
|
* SLOF raw image loaded at 0, copies its romfs right below the flat
|
|
|
|
* device-tree, then position SLOF itself 31M below that
|
|
|
|
*
|
|
|
|
* So we set FW_OVERHEAD to 40MB which should account for all of that
|
|
|
|
* and more
|
|
|
|
*
|
|
|
|
* We load our kernel at 4M, leaving space for SLOF initial image
|
|
|
|
*/
|
2015-08-06 05:37:24 +02:00
|
|
|
#define FDT_MAX_SIZE 0x100000
|
2011-04-01 06:15:23 +02:00
|
|
|
#define RTAS_MAX_SIZE 0x10000
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
#define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */
|
Add SLOF-based partition firmware for pSeries machine, allowing more boot options
Currently, the emulated pSeries machine requires the use of the
-kernel parameter in order to explicitly load a guest kernel. This
means booting from the virtual disk, cdrom or network is not possible.
This patch addresses this limitation by inserting a within-partition
firmware image (derived from the "SLOF" free Open Firmware project).
If -kernel is not specified, qemu will now load the SLOF image, which
has access to the qemu boot device list through the device tree, and
can boot from any of the usual virtual devices.
In order to support the new firmware, an extension to the emulated
machine/hypervisor is necessary. Unlike Linux, which expects
multi-CPU entry to be handled kexec() style, the SLOF firmware expects
only one CPU to be active at entry, and to use a hypervisor RTAS
method to enable the other CPUs one by one.
This patch also implements this 'start-cpu' method, so that SLOF can
start the secondary CPUs and marshal them into the kexec() holding
pattern ready for entry into the guest OS. Linux should, and in the
future might directly use the start-cpu method to enable initially
disabled CPUs, but for now it does require kexec() entry.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 06:15:34 +02:00
|
|
|
#define FW_MAX_SIZE 0x400000
|
|
|
|
#define FW_FILE_NAME "slof.bin"
|
2012-01-11 20:46:28 +01:00
|
|
|
#define FW_OVERHEAD 0x2800000
|
|
|
|
#define KERNEL_LOAD_ADDR FW_MAX_SIZE
|
Add SLOF-based partition firmware for pSeries machine, allowing more boot options
Currently, the emulated pSeries machine requires the use of the
-kernel parameter in order to explicitly load a guest kernel. This
means booting from the virtual disk, cdrom or network is not possible.
This patch addresses this limitation by inserting a within-partition
firmware image (derived from the "SLOF" free Open Firmware project).
If -kernel is not specified, qemu will now load the SLOF image, which
has access to the qemu boot device list through the device tree, and
can boot from any of the usual virtual devices.
In order to support the new firmware, an extension to the emulated
machine/hypervisor is necessary. Unlike Linux, which expects
multi-CPU entry to be handled kexec() style, the SLOF firmware expects
only one CPU to be active at entry, and to use a hypervisor RTAS
method to enable the other CPUs one by one.
This patch also implements this 'start-cpu' method, so that SLOF can
start the secondary CPUs and marshal them into the kexec() holding
pattern ready for entry into the guest OS. Linux should, and in the
future might directly use the start-cpu method to enable initially
disabled CPUs, but for now it does require kexec() entry.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 06:15:34 +02:00
|
|
|
|
2012-01-11 20:46:28 +01:00
|
|
|
#define MIN_RMA_SLOF 128UL
|
2011-04-01 06:15:20 +02:00
|
|
|
|
2011-08-03 23:02:17 +02:00
|
|
|
#define PHANDLE_XICP 0x00001111
|
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
static ICSState *spapr_ics_create(sPAPRMachineState *spapr,
|
|
|
|
const char *type_ics,
|
|
|
|
int nr_irqs, Error **errp)
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 21:33:04 +02:00
|
|
|
{
|
2017-05-15 13:39:45 +02:00
|
|
|
Error *local_err = NULL;
|
2017-04-05 08:37:44 +02:00
|
|
|
Object *obj;
|
2017-02-27 15:29:10 +01:00
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
obj = object_new(type_ics);
|
2017-05-15 13:39:45 +02:00
|
|
|
object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
|
2017-06-08 15:42:41 +02:00
|
|
|
object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
|
|
|
|
&error_abort);
|
2017-05-15 13:39:45 +02:00
|
|
|
object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
goto error;
|
|
|
|
}
|
2017-04-05 08:37:44 +02:00
|
|
|
object_property_set_bool(obj, true, "realized", &local_err);
|
2017-05-15 13:39:45 +02:00
|
|
|
if (local_err) {
|
|
|
|
goto error;
|
2017-02-27 15:29:10 +01:00
|
|
|
}
|
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
return ICS_SIMPLE(obj);
|
2017-05-15 13:39:45 +02:00
|
|
|
|
|
|
|
error:
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return NULL;
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 21:33:04 +02:00
|
|
|
}
|
|
|
|
|
2017-06-14 15:29:19 +02:00
|
|
|
static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
|
|
|
|
{
|
|
|
|
/* Dummy entries correspond to unused ICPState objects in older QEMUs,
|
|
|
|
* and newer QEMUs don't even have them. In both cases, we don't want
|
|
|
|
* to send anything on the wire.
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
|
|
|
|
.name = "icp/server",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = pre_2_10_vmstate_dummy_icp_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UNUSED(4), /* uint32_t xirr */
|
|
|
|
VMSTATE_UNUSED(1), /* uint8_t pending_priority */
|
|
|
|
VMSTATE_UNUSED(1), /* uint8_t mfrr */
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void pre_2_10_vmstate_register_dummy_icp(int i)
|
|
|
|
{
|
|
|
|
vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
|
|
|
|
(void *)(uintptr_t) i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pre_2_10_vmstate_unregister_dummy_icp(int i)
|
|
|
|
{
|
|
|
|
vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
|
|
|
|
(void *)(uintptr_t) i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int xics_max_server_number(void)
|
|
|
|
{
|
|
|
|
return DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(), smp_threads);
|
|
|
|
}
|
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp)
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 21:33:04 +02:00
|
|
|
{
|
2017-04-05 08:37:44 +02:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
2017-06-14 15:29:19 +02:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 21:33:04 +02:00
|
|
|
|
2013-09-26 08:18:44 +02:00
|
|
|
if (kvm_enabled()) {
|
2017-02-27 15:29:29 +01:00
|
|
|
if (machine_kernel_irqchip_allowed(machine) &&
|
2017-04-05 08:37:44 +02:00
|
|
|
!xics_kvm_init(spapr, errp)) {
|
|
|
|
spapr->icp_type = TYPE_KVM_ICP;
|
2017-05-19 12:32:12 +02:00
|
|
|
spapr->ics = spapr_ics_create(spapr, TYPE_ICS_KVM, nr_irqs, errp);
|
2013-09-26 08:18:44 +02:00
|
|
|
}
|
2017-04-05 08:37:44 +02:00
|
|
|
if (machine_kernel_irqchip_required(machine) && !spapr->ics) {
|
2017-05-19 12:32:12 +02:00
|
|
|
error_prepend(errp, "kernel_irqchip requested but unavailable: ");
|
|
|
|
return;
|
2013-09-26 08:18:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-05 08:37:44 +02:00
|
|
|
if (!spapr->ics) {
|
2017-05-15 13:39:16 +02:00
|
|
|
xics_spapr_init(spapr);
|
2017-04-05 08:37:44 +02:00
|
|
|
spapr->icp_type = TYPE_ICP;
|
|
|
|
spapr->ics = spapr_ics_create(spapr, TYPE_ICS_SIMPLE, nr_irqs, errp);
|
2017-05-19 12:32:12 +02:00
|
|
|
if (!spapr->ics) {
|
|
|
|
return;
|
|
|
|
}
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 21:33:04 +02:00
|
|
|
}
|
2017-06-14 15:29:19 +02:00
|
|
|
|
|
|
|
if (smc->pre_2_10_has_unused_icps) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < xics_max_server_number(); i++) {
|
|
|
|
/* Dummy entries get deregistered when real ICPState objects
|
|
|
|
* are registered during CPU core hotplug.
|
|
|
|
*/
|
|
|
|
pre_2_10_vmstate_register_dummy_icp(i);
|
|
|
|
}
|
|
|
|
}
|
xics: rename types to be sane and follow coding style
Basically, in HW the layout of the interrupt network is:
- One ICP per processor thread (the "presenter"). This contains the
registers to fetch a pending interrupt (ack), EOI, and control the
processor priority.
- One ICS per logical source of interrupts (ie, one per PCI host
bridge, and a few others here or there). This contains the per-interrupt
source configuration (target processor(s), priority, mask) and the
per-interrupt internal state.
Under PAPR, there is a single "virtual" ICS ... somewhat (it's a bit
oddball what pHyp does here, arguably there are two but we can ignore
that distinction). There is no register level access. A pair of firmware
(RTAS) calls is used to configure each virtual interrupt.
So our model here is somewhat the same. We have one ICS in the emulated
XICS which arguably *is* the emulated XICS, there's no point making it a
separate "device", that would just be gross, and each VCPU has an
associated ICP.
Yet we call the "XICS" struct icp_state and then the ICPs
'struct icp_server_state'. It's particularly confusing when all of the
functions have xics_prefixes yet take *icp arguments.
Rename:
struct icp_state -> XICSState
struct icp_server_state -> ICPState
struct ics_state -> ICSState
struct ics_irq_state -> ICSIRQState
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Message-id: 1374175984-8930-12-git-send-email-aliguori@us.ibm.com
[aik: added ics_resend() on post_load]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-18 21:33:04 +02:00
|
|
|
}
|
|
|
|
|
2014-05-23 04:26:51 +02:00
|
|
|
static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
|
|
|
|
int smt_threads)
|
|
|
|
{
|
|
|
|
int i, ret = 0;
|
|
|
|
uint32_t servers_prop[smt_threads];
|
|
|
|
uint32_t gservers_prop[smt_threads * 2];
|
2017-08-09 07:38:56 +02:00
|
|
|
int index = spapr_vcpu_id(cpu);
|
2014-05-23 04:26:51 +02:00
|
|
|
|
2016-10-28 13:09:37 +02:00
|
|
|
if (cpu->compat_pvr) {
|
|
|
|
ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr);
|
2014-05-23 04:26:52 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-23 04:26:51 +02:00
|
|
|
/* Build interrupt servers and gservers properties */
|
|
|
|
for (i = 0; i < smt_threads; i++) {
|
|
|
|
servers_prop[i] = cpu_to_be32(index + i);
|
|
|
|
/* Hack, direct the group queues back to cpu 0 */
|
|
|
|
gservers_prop[i*2] = cpu_to_be32(index + i);
|
|
|
|
gservers_prop[i*2 + 1] = 0;
|
|
|
|
}
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
|
|
|
|
servers_prop, sizeof(servers_prop));
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
|
|
|
|
gservers_prop, sizeof(gservers_prop));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:24:01 +02:00
|
|
|
static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, PowerPCCPU *cpu)
|
2015-07-02 08:23:17 +02:00
|
|
|
{
|
2017-08-09 07:38:56 +02:00
|
|
|
int index = spapr_vcpu_id(cpu);
|
2015-07-02 08:23:17 +02:00
|
|
|
uint32_t associativity[] = {cpu_to_be32(0x5),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(0x0),
|
2017-05-30 18:24:00 +02:00
|
|
|
cpu_to_be32(cpu->node_id),
|
2015-07-02 08:23:17 +02:00
|
|
|
cpu_to_be32(index)};
|
|
|
|
|
|
|
|
/* Advertise NUMA via ibm,associativity */
|
2017-05-30 18:24:01 +02:00
|
|
|
return fdt_setprop(fdt, offset, "ibm,associativity", associativity,
|
2015-07-02 08:23:17 +02:00
|
|
|
sizeof(associativity));
|
|
|
|
}
|
|
|
|
|
2017-03-20 00:46:47 +01:00
|
|
|
/* Populate the "ibm,pa-features" property */
|
2017-03-20 00:46:49 +01:00
|
|
|
static void spapr_populate_pa_features(CPUPPCState *env, void *fdt, int offset,
|
|
|
|
bool legacy_guest)
|
2017-03-20 00:46:47 +01:00
|
|
|
{
|
|
|
|
uint8_t pa_features_206[] = { 6, 0,
|
|
|
|
0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
|
|
|
|
uint8_t pa_features_207[] = { 24, 0,
|
|
|
|
0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
|
|
|
|
0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
|
|
0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
|
2017-03-23 04:46:00 +01:00
|
|
|
uint8_t pa_features_300[] = { 66, 0,
|
|
|
|
/* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
|
|
|
|
/* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
|
|
|
|
0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
|
|
|
|
/* 6: DS207 */
|
|
|
|
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
|
|
|
|
/* 16: Vector */
|
2017-03-20 00:46:47 +01:00
|
|
|
0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
|
2017-03-23 04:46:00 +01:00
|
|
|
/* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
|
2017-05-09 07:03:12 +02:00
|
|
|
0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
|
2017-03-23 04:46:00 +01:00
|
|
|
/* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
|
|
|
|
/* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
|
|
|
|
/* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
|
|
|
|
/* 42: PM, 44: PC RA, 46: SC vec'd */
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
|
|
|
|
/* 48: SIMD, 50: QP BFP, 52: String */
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
|
|
|
|
/* 54: DecFP, 56: DecI, 58: SHA */
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
|
|
|
|
/* 60: NM atomic, 62: RNG */
|
|
|
|
0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
|
|
|
|
};
|
2017-03-20 00:46:47 +01:00
|
|
|
uint8_t *pa_features;
|
|
|
|
size_t pa_size;
|
|
|
|
|
|
|
|
switch (POWERPC_MMU_VER(env->mmu_model)) {
|
|
|
|
case POWERPC_MMU_VER_2_06:
|
|
|
|
pa_features = pa_features_206;
|
|
|
|
pa_size = sizeof(pa_features_206);
|
|
|
|
break;
|
|
|
|
case POWERPC_MMU_VER_2_07:
|
|
|
|
pa_features = pa_features_207;
|
|
|
|
pa_size = sizeof(pa_features_207);
|
|
|
|
break;
|
|
|
|
case POWERPC_MMU_VER_3_00:
|
|
|
|
pa_features = pa_features_300;
|
|
|
|
pa_size = sizeof(pa_features_300);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (env->ci_large_pages) {
|
|
|
|
/*
|
|
|
|
* Note: we keep CI large pages off by default because a 64K capable
|
|
|
|
* guest provisioned with large pages might otherwise try to map a qemu
|
|
|
|
* framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
|
|
|
|
* even if that qemu runs on a 4k host.
|
|
|
|
* We dd this bit back here if we are confident this is not an issue
|
|
|
|
*/
|
|
|
|
pa_features[3] |= 0x20;
|
|
|
|
}
|
|
|
|
if (kvmppc_has_cap_htm() && pa_size > 24) {
|
|
|
|
pa_features[24] |= 0x80; /* Transactional memory support */
|
|
|
|
}
|
2017-03-20 00:46:49 +01:00
|
|
|
if (legacy_guest && pa_size > 40) {
|
|
|
|
/* Workaround for broken kernels that attempt (guest) radix
|
|
|
|
* mode when they can't handle it, if they see the radix bit set
|
|
|
|
* in pa-features. So hide it from them. */
|
|
|
|
pa_features[40 + 2] &= ~0x80; /* Radix MMU */
|
|
|
|
}
|
2017-03-20 00:46:47 +01:00
|
|
|
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
|
|
|
|
}
|
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr)
|
2011-12-12 19:24:30 +01:00
|
|
|
{
|
2014-05-23 04:26:55 +02:00
|
|
|
int ret = 0, offset, cpus_offset;
|
|
|
|
CPUState *cs;
|
2011-12-12 19:24:30 +01:00
|
|
|
char cpu_model[32];
|
|
|
|
int smt = kvmppc_smt_threads();
|
2012-09-12 18:57:12 +02:00
|
|
|
uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
|
2011-12-12 19:24:30 +01:00
|
|
|
|
2014-05-23 04:26:55 +02:00
|
|
|
CPU_FOREACH(cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
2017-03-20 00:46:49 +01:00
|
|
|
CPUPPCState *env = &cpu->env;
|
2014-05-23 04:26:55 +02:00
|
|
|
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
2017-08-09 07:38:56 +02:00
|
|
|
int index = spapr_vcpu_id(cpu);
|
2016-10-28 13:35:48 +02:00
|
|
|
int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu));
|
2011-12-12 19:24:30 +01:00
|
|
|
|
2014-02-01 15:45:52 +01:00
|
|
|
if ((index % smt) != 0) {
|
2011-12-12 19:24:30 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-05-23 04:26:55 +02:00
|
|
|
snprintf(cpu_model, 32, "%s@%x", dc->fw_name, index);
|
2011-12-12 19:24:30 +01:00
|
|
|
|
2014-05-23 04:26:55 +02:00
|
|
|
cpus_offset = fdt_path_offset(fdt, "/cpus");
|
|
|
|
if (cpus_offset < 0) {
|
|
|
|
cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
|
|
|
|
"cpus");
|
|
|
|
if (cpus_offset < 0) {
|
|
|
|
return cpus_offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
offset = fdt_subnode_offset(fdt, cpus_offset, cpu_model);
|
2011-12-12 19:24:30 +01:00
|
|
|
if (offset < 0) {
|
2014-05-23 04:26:55 +02:00
|
|
|
offset = fdt_add_subnode(fdt, cpus_offset, cpu_model);
|
|
|
|
if (offset < 0) {
|
|
|
|
return offset;
|
|
|
|
}
|
2011-12-12 19:24:30 +01:00
|
|
|
}
|
|
|
|
|
2012-09-12 18:57:12 +02:00
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,pft-size",
|
|
|
|
pft_size_prop, sizeof(pft_size_prop));
|
2011-12-12 19:24:30 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2014-05-23 04:26:51 +02:00
|
|
|
|
2017-05-30 18:24:01 +02:00
|
|
|
if (nb_numa_nodes > 1) {
|
|
|
|
ret = spapr_fixup_cpu_numa_dt(fdt, offset, cpu);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2015-07-02 08:23:17 +02:00
|
|
|
}
|
|
|
|
|
2016-10-28 13:35:48 +02:00
|
|
|
ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt);
|
2014-05-23 04:26:51 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2017-03-20 00:46:49 +01:00
|
|
|
|
|
|
|
spapr_populate_pa_features(env, fdt, offset,
|
|
|
|
spapr->cas_legacy_guest_workaround);
|
2011-12-12 19:24:30 +01:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-06 20:43:05 +02:00
|
|
|
static hwaddr spapr_node0_size(MachineState *machine)
|
2014-07-03 05:10:06 +02:00
|
|
|
{
|
|
|
|
if (nb_numa_nodes) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < nb_numa_nodes; ++i) {
|
|
|
|
if (numa_info[i].node_mem) {
|
2015-07-02 08:23:05 +02:00
|
|
|
return MIN(pow2floor(numa_info[i].node_mem),
|
|
|
|
machine->ram_size);
|
2014-07-03 05:10:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-07-02 08:23:05 +02:00
|
|
|
return machine->ram_size;
|
2014-07-03 05:10:06 +02:00
|
|
|
}
|
|
|
|
|
2014-05-27 07:36:29 +02:00
|
|
|
static void add_str(GString *s, const gchar *s1)
|
|
|
|
{
|
|
|
|
g_string_append_len(s, s1, strlen(s1) + 1);
|
|
|
|
}
|
2012-09-12 18:57:12 +02:00
|
|
|
|
2015-07-13 02:34:00 +02:00
|
|
|
static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start,
|
2014-07-03 05:10:02 +02:00
|
|
|
hwaddr size)
|
|
|
|
{
|
|
|
|
uint32_t associativity[] = {
|
|
|
|
cpu_to_be32(0x4), /* length */
|
|
|
|
cpu_to_be32(0x0), cpu_to_be32(0x0),
|
2014-07-03 05:10:07 +02:00
|
|
|
cpu_to_be32(0x0), cpu_to_be32(nodeid)
|
2014-07-03 05:10:02 +02:00
|
|
|
};
|
|
|
|
char mem_name[32];
|
|
|
|
uint64_t mem_reg_property[2];
|
|
|
|
int off;
|
|
|
|
|
|
|
|
mem_reg_property[0] = cpu_to_be64(start);
|
|
|
|
mem_reg_property[1] = cpu_to_be64(size);
|
|
|
|
|
|
|
|
sprintf(mem_name, "memory@" TARGET_FMT_lx, start);
|
|
|
|
off = fdt_add_subnode(fdt, 0, mem_name);
|
|
|
|
_FDT(off);
|
|
|
|
_FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
|
|
|
|
_FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
|
|
|
|
sizeof(mem_reg_property))));
|
|
|
|
_FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
|
|
|
|
sizeof(associativity))));
|
2015-07-13 02:34:00 +02:00
|
|
|
return off;
|
2014-07-03 05:10:02 +02:00
|
|
|
}
|
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
static int spapr_populate_memory(sPAPRMachineState *spapr, void *fdt)
|
2012-09-12 18:57:12 +02:00
|
|
|
{
|
2015-07-02 08:23:05 +02:00
|
|
|
MachineState *machine = MACHINE(spapr);
|
2014-07-03 05:10:04 +02:00
|
|
|
hwaddr mem_start, node_size;
|
|
|
|
int i, nb_nodes = nb_numa_nodes;
|
|
|
|
NodeInfo *nodes = numa_info;
|
|
|
|
NodeInfo ramnode;
|
|
|
|
|
|
|
|
/* No NUMA nodes, assume there is just one node with whole RAM */
|
|
|
|
if (!nb_numa_nodes) {
|
|
|
|
nb_nodes = 1;
|
2015-07-02 08:23:05 +02:00
|
|
|
ramnode.node_mem = machine->ram_size;
|
2014-07-03 05:10:04 +02:00
|
|
|
nodes = &ramnode;
|
2013-11-25 04:14:51 +01:00
|
|
|
}
|
2012-09-12 18:57:12 +02:00
|
|
|
|
2014-07-03 05:10:04 +02:00
|
|
|
for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
|
|
|
|
if (!nodes[i].node_mem) {
|
|
|
|
continue;
|
|
|
|
}
|
2015-07-02 08:23:05 +02:00
|
|
|
if (mem_start >= machine->ram_size) {
|
2013-11-25 04:14:51 +01:00
|
|
|
node_size = 0;
|
|
|
|
} else {
|
2014-07-03 05:10:04 +02:00
|
|
|
node_size = nodes[i].node_mem;
|
2015-07-02 08:23:05 +02:00
|
|
|
if (node_size > machine->ram_size - mem_start) {
|
|
|
|
node_size = machine->ram_size - mem_start;
|
2013-11-25 04:14:51 +01:00
|
|
|
}
|
|
|
|
}
|
2014-07-03 05:10:04 +02:00
|
|
|
if (!mem_start) {
|
|
|
|
/* ppc_spapr_init() checks for rma_size <= node0_size already */
|
2015-08-03 07:35:41 +02:00
|
|
|
spapr_populate_memory_node(fdt, i, 0, spapr->rma_size);
|
2014-07-03 05:10:04 +02:00
|
|
|
mem_start += spapr->rma_size;
|
|
|
|
node_size -= spapr->rma_size;
|
|
|
|
}
|
2014-07-03 05:10:05 +02:00
|
|
|
for ( ; node_size; ) {
|
|
|
|
hwaddr sizetmp = pow2floor(node_size);
|
|
|
|
|
|
|
|
/* mem_start != 0 here */
|
|
|
|
if (ctzl(mem_start) < ctzl(sizetmp)) {
|
|
|
|
sizetmp = 1ULL << ctzl(mem_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
spapr_populate_memory_node(fdt, i, mem_start, sizetmp);
|
|
|
|
node_size -= sizetmp;
|
|
|
|
mem_start += sizetmp;
|
|
|
|
}
|
2012-09-12 18:57:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-02 08:23:17 +02:00
|
|
|
static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
|
|
|
|
sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
|
2017-08-09 07:38:56 +02:00
|
|
|
int index = spapr_vcpu_id(cpu);
|
2015-07-02 08:23:17 +02:00
|
|
|
uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
|
|
|
|
0xffffffff, 0xffffffff};
|
2016-06-10 02:59:02 +02:00
|
|
|
uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq()
|
|
|
|
: SPAPR_TIMEBASE_FREQ;
|
2015-07-02 08:23:17 +02:00
|
|
|
uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
|
|
|
|
uint32_t page_sizes_prop[64];
|
|
|
|
size_t page_sizes_prop_size;
|
2015-09-08 03:21:31 +02:00
|
|
|
uint32_t vcpus_per_socket = smp_threads * smp_cores;
|
2015-07-02 08:23:17 +02:00
|
|
|
uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
|
2016-10-28 13:35:48 +02:00
|
|
|
int compat_smt = MIN(smp_threads, ppc_compat_max_threads(cpu));
|
2016-06-10 02:59:04 +02:00
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
int drc_index;
|
2017-03-20 00:46:43 +01:00
|
|
|
uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ];
|
|
|
|
int i;
|
2016-06-10 02:59:04 +02:00
|
|
|
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index);
|
2016-06-10 02:59:04 +02:00
|
|
|
if (drc) {
|
2017-06-02 05:49:20 +02:00
|
|
|
drc_index = spapr_drc_index(drc);
|
2016-06-10 02:59:04 +02:00
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
|
|
|
|
}
|
2015-07-02 08:23:17 +02:00
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
|
|
|
|
_FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
|
|
|
|
env->dcache_line_size)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
|
|
|
|
env->dcache_line_size)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
|
|
|
|
env->icache_line_size)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
|
|
|
|
env->icache_line_size)));
|
|
|
|
|
|
|
|
if (pcc->l1_dcache_size) {
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
|
|
|
|
pcc->l1_dcache_size)));
|
|
|
|
} else {
|
2017-07-12 15:57:41 +02:00
|
|
|
warn_report("Unknown L1 dcache size for cpu");
|
2015-07-02 08:23:17 +02:00
|
|
|
}
|
|
|
|
if (pcc->l1_icache_size) {
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
|
|
|
|
pcc->l1_icache_size)));
|
|
|
|
} else {
|
2017-07-12 15:57:41 +02:00
|
|
|
warn_report("Unknown L1 icache size for cpu");
|
2015-07-02 08:23:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
|
2015-10-01 15:30:07 +02:00
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "slb-size", env->slb_nr)));
|
2015-07-02 08:23:17 +02:00
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", env->slb_nr)));
|
|
|
|
_FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
|
|
|
|
|
|
|
|
if (env->spr_cb[SPR_PURR].oea_read) {
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0)));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (env->mmu_model & POWERPC_MMU_1TSEG) {
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
|
|
|
|
segs, sizeof(segs))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advertise VMX/VSX (vector extensions) if available
|
|
|
|
* 0 / no property == no vector extensions
|
|
|
|
* 1 == VMX / Altivec available
|
|
|
|
* 2 == VSX available */
|
|
|
|
if (env->insns_flags & PPC_ALTIVEC) {
|
|
|
|
uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1;
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", vmx)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advertise DFP (Decimal Floating Point) if available
|
|
|
|
* 0 / no property == no DFP
|
|
|
|
* 1 == DFP available */
|
|
|
|
if (env->insns_flags2 & PPC2_DFP) {
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
|
|
|
|
}
|
|
|
|
|
2016-08-02 19:38:01 +02:00
|
|
|
page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop,
|
2015-07-02 08:23:17 +02:00
|
|
|
sizeof(page_sizes_prop));
|
|
|
|
if (page_sizes_prop_size) {
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
|
|
|
|
page_sizes_prop, page_sizes_prop_size)));
|
|
|
|
}
|
|
|
|
|
2017-03-20 00:46:49 +01:00
|
|
|
spapr_populate_pa_features(env, fdt, offset, false);
|
2015-10-22 09:30:59 +02:00
|
|
|
|
2015-07-02 08:23:17 +02:00
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
|
2015-09-08 03:21:31 +02:00
|
|
|
cs->cpu_index / vcpus_per_socket)));
|
2015-07-02 08:23:17 +02:00
|
|
|
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
|
|
|
|
pft_size_prop, sizeof(pft_size_prop))));
|
|
|
|
|
2017-05-30 18:24:01 +02:00
|
|
|
if (nb_numa_nodes > 1) {
|
|
|
|
_FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cpu));
|
|
|
|
}
|
2015-07-02 08:23:17 +02:00
|
|
|
|
2016-10-28 13:35:48 +02:00
|
|
|
_FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt));
|
2017-03-20 00:46:43 +01:00
|
|
|
|
|
|
|
if (pcc->radix_page_info) {
|
|
|
|
for (i = 0; i < pcc->radix_page_info->count; i++) {
|
|
|
|
radix_AP_encodings[i] =
|
|
|
|
cpu_to_be32(pcc->radix_page_info->entries[i]);
|
|
|
|
}
|
|
|
|
_FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings",
|
|
|
|
radix_AP_encodings,
|
|
|
|
pcc->radix_page_info->count *
|
|
|
|
sizeof(radix_AP_encodings[0]))));
|
|
|
|
}
|
2015-07-02 08:23:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
CPUState *cs;
|
|
|
|
int cpus_offset;
|
|
|
|
char *nodename;
|
|
|
|
int smt = kvmppc_smt_threads();
|
|
|
|
|
|
|
|
cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
|
|
|
|
_FDT(cpus_offset);
|
|
|
|
_FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We walk the CPUs in reverse order to ensure that CPU DT nodes
|
|
|
|
* created by fdt_add_subnode() end up in the right order in FDT
|
|
|
|
* for the guest kernel the enumerate the CPUs correctly.
|
|
|
|
*/
|
|
|
|
CPU_FOREACH_REVERSE(cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
2017-08-09 07:38:56 +02:00
|
|
|
int index = spapr_vcpu_id(cpu);
|
2015-07-02 08:23:17 +02:00
|
|
|
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
|
|
|
int offset;
|
|
|
|
|
|
|
|
if ((index % smt) != 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
|
|
|
|
offset = fdt_add_subnode(fdt, cpus_offset, nodename);
|
|
|
|
g_free(nodename);
|
|
|
|
_FDT(offset);
|
|
|
|
spapr_populate_cpu_dt(cs, fdt, offset, spapr);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-07-13 02:34:00 +02:00
|
|
|
/*
|
|
|
|
* Adds ibm,dynamic-reconfiguration-memory node.
|
|
|
|
* Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
|
|
|
|
* of this device tree node.
|
|
|
|
*/
|
|
|
|
static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(spapr);
|
|
|
|
int ret, i, offset;
|
|
|
|
uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)};
|
2016-06-10 07:14:48 +02:00
|
|
|
uint32_t hotplug_lmb_start = spapr->hotplug_memory.base / lmb_size;
|
|
|
|
uint32_t nr_lmbs = (spapr->hotplug_memory.base +
|
|
|
|
memory_region_size(&spapr->hotplug_memory.mr)) /
|
|
|
|
lmb_size;
|
2015-07-13 02:34:00 +02:00
|
|
|
uint32_t *int_buf, *cur_index, buf_len;
|
2015-08-03 07:35:40 +02:00
|
|
|
int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
|
2015-07-13 02:34:00 +02:00
|
|
|
|
2016-01-19 05:39:21 +01:00
|
|
|
/*
|
2016-06-10 07:14:48 +02:00
|
|
|
* Don't create the node if there is no hotpluggable memory
|
2016-01-19 05:39:21 +01:00
|
|
|
*/
|
2016-06-10 07:14:48 +02:00
|
|
|
if (machine->ram_size == machine->maxram_size) {
|
2016-01-19 05:39:21 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-15 21:34:20 +02:00
|
|
|
/*
|
|
|
|
* Allocate enough buffer size to fit in ibm,dynamic-memory
|
|
|
|
* or ibm,associativity-lookup-arrays
|
|
|
|
*/
|
|
|
|
buf_len = MAX(nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1, nr_nodes * 4 + 2)
|
|
|
|
* sizeof(uint32_t);
|
2015-07-13 02:34:00 +02:00
|
|
|
cur_index = int_buf = g_malloc0(buf_len);
|
|
|
|
|
|
|
|
offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
|
|
|
|
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
|
|
|
|
sizeof(prop_lmb_size));
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ibm,dynamic-memory */
|
|
|
|
int_buf[0] = cpu_to_be32(nr_lmbs);
|
|
|
|
cur_index++;
|
|
|
|
for (i = 0; i < nr_lmbs; i++) {
|
2016-06-10 07:14:48 +02:00
|
|
|
uint64_t addr = i * lmb_size;
|
2015-07-13 02:34:00 +02:00
|
|
|
uint32_t *dynamic_memory = cur_index;
|
|
|
|
|
2016-06-10 07:14:48 +02:00
|
|
|
if (i >= hotplug_lmb_start) {
|
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i);
|
2016-06-10 07:14:48 +02:00
|
|
|
g_assert(drc);
|
|
|
|
|
|
|
|
dynamic_memory[0] = cpu_to_be32(addr >> 32);
|
|
|
|
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
|
2017-06-02 05:49:20 +02:00
|
|
|
dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
|
2016-06-10 07:14:48 +02:00
|
|
|
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
|
|
|
|
dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
|
|
|
|
if (memory_region_present(get_system_memory(), addr)) {
|
|
|
|
dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
|
|
|
|
} else {
|
|
|
|
dynamic_memory[5] = cpu_to_be32(0);
|
|
|
|
}
|
2015-07-13 02:34:00 +02:00
|
|
|
} else {
|
2016-06-10 07:14:48 +02:00
|
|
|
/*
|
|
|
|
* LMB information for RMA, boot time RAM and gap b/n RAM and
|
|
|
|
* hotplug memory region -- all these are marked as reserved
|
|
|
|
* and as having no valid DRC.
|
|
|
|
*/
|
|
|
|
dynamic_memory[0] = cpu_to_be32(addr >> 32);
|
|
|
|
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
|
|
|
|
dynamic_memory[2] = cpu_to_be32(0);
|
|
|
|
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
|
|
|
|
dynamic_memory[4] = cpu_to_be32(-1);
|
|
|
|
dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED |
|
|
|
|
SPAPR_LMB_FLAGS_DRC_INVALID);
|
2015-07-13 02:34:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
|
|
|
|
}
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ibm,associativity-lookup-arrays */
|
|
|
|
cur_index = int_buf;
|
2015-08-03 07:35:40 +02:00
|
|
|
int_buf[0] = cpu_to_be32(nr_nodes);
|
2015-07-13 02:34:00 +02:00
|
|
|
int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */
|
|
|
|
cur_index += 2;
|
2015-08-03 07:35:40 +02:00
|
|
|
for (i = 0; i < nr_nodes; i++) {
|
2015-07-13 02:34:00 +02:00
|
|
|
uint32_t associativity[] = {
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(0x0),
|
|
|
|
cpu_to_be32(i)
|
|
|
|
};
|
|
|
|
memcpy(cur_index, associativity, sizeof(associativity));
|
|
|
|
cur_index += 4;
|
|
|
|
}
|
|
|
|
ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
|
|
|
|
(cur_index - int_buf) * sizeof(uint32_t));
|
|
|
|
out:
|
|
|
|
g_free(int_buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
static int spapr_dt_cas_updates(sPAPRMachineState *spapr, void *fdt,
|
|
|
|
sPAPROptionVector *ov5_updates)
|
|
|
|
{
|
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
|
2016-10-25 06:47:30 +02:00
|
|
|
int ret = 0, offset;
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
|
|
|
|
/* Generate ibm,dynamic-reconfiguration-memory node if required */
|
|
|
|
if (spapr_ovec_test(ov5_updates, OV5_DRCONF_MEMORY)) {
|
|
|
|
g_assert(smc->dr_lmb_enabled);
|
|
|
|
ret = spapr_populate_drconf_memory(spapr, fdt);
|
2016-10-25 06:47:30 +02:00
|
|
|
if (ret) {
|
|
|
|
goto out;
|
|
|
|
}
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
}
|
|
|
|
|
2016-10-25 06:47:30 +02:00
|
|
|
offset = fdt_path_offset(fdt, "/chosen");
|
|
|
|
if (offset < 0) {
|
|
|
|
offset = fdt_add_subnode(fdt, 0, "chosen");
|
|
|
|
if (offset < 0) {
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = spapr_ovec_populate_dt(fdt, offset, spapr->ov5_cas,
|
|
|
|
"ibm,architecture-vec-5");
|
|
|
|
|
|
|
|
out:
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
hw/ppc: CAS reset on early device hotplug
This patch is a follow up on the discussions made in patch
"hw/ppc: disable hotplug before CAS is completed" that can be
found at [1].
At this moment, we do not support CPU/memory hotplug in early
boot stages, before CAS. When a hotplug occurs, the event is logged
in an internal RTAS event log queue and an IRQ pulse is fired. In
regular conditions, the guest handles the interrupt by executing
check_exception, fetching the generated hotplug event and enabling
the device for use.
In early boot, this IRQ isn't caught (SLOF does not handle hotplug
events), leaving the event in the rtas event log queue. If the guest
executes check_exception due to another hotplug event, the re-assertion
of the IRQ ends up de-queuing the first hotplug event as well. In short,
a device hotplugged before CAS is considered coldplugged by SLOF.
This leads to device misbehavior and, in some cases, guest kernel
Ooops when trying to unplug the device.
A proper fix would be to turn every device hotplugged before CAS
as a colplugged device. This is not trivial to do with the current
code base though - the FDT is written in the guest memory at
ppc_spapr_reset and can't be retrieved without adding extra state
(fdt_size for example) that will need to managed and migrated. Adding
the hotplugged DT in the middle of CAS negotiation via the updated DT
tree works with CPU devs, but panics the guest kernel at boot. Additional
analysis would be necessary for LMBs and PCI devices. There are
questions to be made in QEMU/SLOF/kernel level about how we can make
this change in a sustainable way.
With Linux guests, a fix would be the kernel executing check_exception
at boot time, de-queueing the events that happened in early boot and
processing them. However, even if/when the newer kernels start
fetching these events at boot time, we need to take care of older
kernels that won't be doing that.
This patch works around the situation by issuing a CAS reset if a hotplugged
device is detected during CAS:
- the DRC conditions that warrant a CAS reset is the same as those that
triggers a DRC migration - the DRC must have a device attached and
the DRC state is not equal to its ready_state. With that in mind, this
patch makes use of 'spapr_drc_needed' to determine if a CAS reset
is needed.
- In the middle of CAS negotiations, the function
'spapr_hotplugged_dev_before_cas' goes through all the DRCs to see
if there are any DRC that requires a reset, using spapr_drc_needed. If
that happens, returns '1' in 'spapr_h_cas_compose_response' which will set
spapr->cas_reboot to true, causing the machine to reboot.
No changes are made for coldplug devices.
[1] http://lists.nongnu.org/archive/html/qemu-devel/2017-08/msg02855.html
Signed-off-by: Daniel Henrique Barboza <danielhb@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-08-30 20:21:41 +02:00
|
|
|
static bool spapr_hotplugged_dev_before_cas(void)
|
|
|
|
{
|
|
|
|
Object *drc_container, *obj;
|
|
|
|
ObjectProperty *prop;
|
|
|
|
ObjectPropertyIterator iter;
|
|
|
|
|
|
|
|
drc_container = container_get(object_get_root(), "/dr-connector");
|
|
|
|
object_property_iter_init(&iter, drc_container);
|
|
|
|
while ((prop = object_property_iter_next(&iter))) {
|
|
|
|
if (!strstart(prop->type, "link<", NULL)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
obj = object_property_get_link(drc_container, prop->name, NULL);
|
|
|
|
if (spapr_drc_needed(obj)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-07-13 02:34:00 +02:00
|
|
|
int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
|
|
|
|
target_ulong addr, target_ulong size,
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
sPAPROptionVector *ov5_updates)
|
2015-07-13 02:34:00 +02:00
|
|
|
{
|
|
|
|
void *fdt, *fdt_skel;
|
|
|
|
sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
|
|
|
|
|
hw/ppc: CAS reset on early device hotplug
This patch is a follow up on the discussions made in patch
"hw/ppc: disable hotplug before CAS is completed" that can be
found at [1].
At this moment, we do not support CPU/memory hotplug in early
boot stages, before CAS. When a hotplug occurs, the event is logged
in an internal RTAS event log queue and an IRQ pulse is fired. In
regular conditions, the guest handles the interrupt by executing
check_exception, fetching the generated hotplug event and enabling
the device for use.
In early boot, this IRQ isn't caught (SLOF does not handle hotplug
events), leaving the event in the rtas event log queue. If the guest
executes check_exception due to another hotplug event, the re-assertion
of the IRQ ends up de-queuing the first hotplug event as well. In short,
a device hotplugged before CAS is considered coldplugged by SLOF.
This leads to device misbehavior and, in some cases, guest kernel
Ooops when trying to unplug the device.
A proper fix would be to turn every device hotplugged before CAS
as a colplugged device. This is not trivial to do with the current
code base though - the FDT is written in the guest memory at
ppc_spapr_reset and can't be retrieved without adding extra state
(fdt_size for example) that will need to managed and migrated. Adding
the hotplugged DT in the middle of CAS negotiation via the updated DT
tree works with CPU devs, but panics the guest kernel at boot. Additional
analysis would be necessary for LMBs and PCI devices. There are
questions to be made in QEMU/SLOF/kernel level about how we can make
this change in a sustainable way.
With Linux guests, a fix would be the kernel executing check_exception
at boot time, de-queueing the events that happened in early boot and
processing them. However, even if/when the newer kernels start
fetching these events at boot time, we need to take care of older
kernels that won't be doing that.
This patch works around the situation by issuing a CAS reset if a hotplugged
device is detected during CAS:
- the DRC conditions that warrant a CAS reset is the same as those that
triggers a DRC migration - the DRC must have a device attached and
the DRC state is not equal to its ready_state. With that in mind, this
patch makes use of 'spapr_drc_needed' to determine if a CAS reset
is needed.
- In the middle of CAS negotiations, the function
'spapr_hotplugged_dev_before_cas' goes through all the DRCs to see
if there are any DRC that requires a reset, using spapr_drc_needed. If
that happens, returns '1' in 'spapr_h_cas_compose_response' which will set
spapr->cas_reboot to true, causing the machine to reboot.
No changes are made for coldplug devices.
[1] http://lists.nongnu.org/archive/html/qemu-devel/2017-08/msg02855.html
Signed-off-by: Daniel Henrique Barboza <danielhb@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-08-30 20:21:41 +02:00
|
|
|
if (spapr_hotplugged_dev_before_cas()) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-07-13 02:34:00 +02:00
|
|
|
size -= sizeof(hdr);
|
|
|
|
|
hw/ppc: CAS reset on early device hotplug
This patch is a follow up on the discussions made in patch
"hw/ppc: disable hotplug before CAS is completed" that can be
found at [1].
At this moment, we do not support CPU/memory hotplug in early
boot stages, before CAS. When a hotplug occurs, the event is logged
in an internal RTAS event log queue and an IRQ pulse is fired. In
regular conditions, the guest handles the interrupt by executing
check_exception, fetching the generated hotplug event and enabling
the device for use.
In early boot, this IRQ isn't caught (SLOF does not handle hotplug
events), leaving the event in the rtas event log queue. If the guest
executes check_exception due to another hotplug event, the re-assertion
of the IRQ ends up de-queuing the first hotplug event as well. In short,
a device hotplugged before CAS is considered coldplugged by SLOF.
This leads to device misbehavior and, in some cases, guest kernel
Ooops when trying to unplug the device.
A proper fix would be to turn every device hotplugged before CAS
as a colplugged device. This is not trivial to do with the current
code base though - the FDT is written in the guest memory at
ppc_spapr_reset and can't be retrieved without adding extra state
(fdt_size for example) that will need to managed and migrated. Adding
the hotplugged DT in the middle of CAS negotiation via the updated DT
tree works with CPU devs, but panics the guest kernel at boot. Additional
analysis would be necessary for LMBs and PCI devices. There are
questions to be made in QEMU/SLOF/kernel level about how we can make
this change in a sustainable way.
With Linux guests, a fix would be the kernel executing check_exception
at boot time, de-queueing the events that happened in early boot and
processing them. However, even if/when the newer kernels start
fetching these events at boot time, we need to take care of older
kernels that won't be doing that.
This patch works around the situation by issuing a CAS reset if a hotplugged
device is detected during CAS:
- the DRC conditions that warrant a CAS reset is the same as those that
triggers a DRC migration - the DRC must have a device attached and
the DRC state is not equal to its ready_state. With that in mind, this
patch makes use of 'spapr_drc_needed' to determine if a CAS reset
is needed.
- In the middle of CAS negotiations, the function
'spapr_hotplugged_dev_before_cas' goes through all the DRCs to see
if there are any DRC that requires a reset, using spapr_drc_needed. If
that happens, returns '1' in 'spapr_h_cas_compose_response' which will set
spapr->cas_reboot to true, causing the machine to reboot.
No changes are made for coldplug devices.
[1] http://lists.nongnu.org/archive/html/qemu-devel/2017-08/msg02855.html
Signed-off-by: Daniel Henrique Barboza <danielhb@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-08-30 20:21:41 +02:00
|
|
|
/* Create skeleton */
|
2015-07-13 02:34:00 +02:00
|
|
|
fdt_skel = g_malloc0(size);
|
|
|
|
_FDT((fdt_create(fdt_skel, size)));
|
|
|
|
_FDT((fdt_begin_node(fdt_skel, "")));
|
|
|
|
_FDT((fdt_end_node(fdt_skel)));
|
|
|
|
_FDT((fdt_finish(fdt_skel)));
|
|
|
|
fdt = g_malloc0(size);
|
|
|
|
_FDT((fdt_open_into(fdt_skel, fdt, size)));
|
|
|
|
g_free(fdt_skel);
|
|
|
|
|
|
|
|
/* Fixup cpu nodes */
|
2016-10-28 15:01:05 +02:00
|
|
|
_FDT((spapr_fixup_cpu_dt(fdt, spapr)));
|
2015-07-13 02:34:00 +02:00
|
|
|
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
if (spapr_dt_cas_updates(spapr, fdt, ov5_updates)) {
|
|
|
|
return -1;
|
2015-07-13 02:34:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Pack resulting tree */
|
|
|
|
_FDT((fdt_pack(fdt)));
|
|
|
|
|
|
|
|
if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
|
|
|
|
trace_spapr_cas_failed(size);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
|
|
|
|
cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
|
|
|
|
trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
|
|
|
|
g_free(fdt);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-20 06:55:36 +02:00
|
|
|
static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt)
|
|
|
|
{
|
|
|
|
int rtas;
|
|
|
|
GString *hypertas = g_string_sized_new(256);
|
|
|
|
GString *qemu_hypertas = g_string_sized_new(256);
|
|
|
|
uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
|
|
|
|
uint64_t max_hotplug_addr = spapr->hotplug_memory.base +
|
|
|
|
memory_region_size(&spapr->hotplug_memory.mr);
|
|
|
|
uint32_t lrdr_capacity[] = {
|
|
|
|
cpu_to_be32(max_hotplug_addr >> 32),
|
|
|
|
cpu_to_be32(max_hotplug_addr & 0xffffffff),
|
|
|
|
0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
|
|
|
|
cpu_to_be32(max_cpus / smp_threads),
|
|
|
|
};
|
|
|
|
|
|
|
|
_FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
|
|
|
|
|
|
|
|
/* hypertas */
|
|
|
|
add_str(hypertas, "hcall-pft");
|
|
|
|
add_str(hypertas, "hcall-term");
|
|
|
|
add_str(hypertas, "hcall-dabr");
|
|
|
|
add_str(hypertas, "hcall-interrupt");
|
|
|
|
add_str(hypertas, "hcall-tce");
|
|
|
|
add_str(hypertas, "hcall-vio");
|
|
|
|
add_str(hypertas, "hcall-splpar");
|
|
|
|
add_str(hypertas, "hcall-bulk");
|
|
|
|
add_str(hypertas, "hcall-set-mode");
|
|
|
|
add_str(hypertas, "hcall-sprg0");
|
|
|
|
add_str(hypertas, "hcall-copy");
|
|
|
|
add_str(hypertas, "hcall-debug");
|
|
|
|
add_str(qemu_hypertas, "hcall-memop1");
|
|
|
|
|
|
|
|
if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
|
|
|
|
add_str(hypertas, "hcall-multi-tce");
|
|
|
|
}
|
2017-05-12 07:46:11 +02:00
|
|
|
|
|
|
|
if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
|
|
|
|
add_str(hypertas, "hcall-hpt-resize");
|
|
|
|
}
|
|
|
|
|
2016-10-20 06:55:36 +02:00
|
|
|
_FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
|
|
|
|
hypertas->str, hypertas->len));
|
|
|
|
g_string_free(hypertas, TRUE);
|
|
|
|
_FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
|
|
|
|
qemu_hypertas->str, qemu_hypertas->len));
|
|
|
|
g_string_free(qemu_hypertas, TRUE);
|
|
|
|
|
|
|
|
_FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
|
|
|
|
refpoints, sizeof(refpoints)));
|
|
|
|
|
|
|
|
_FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
|
|
|
|
RTAS_ERROR_LOG_MAX));
|
|
|
|
_FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
|
|
|
|
RTAS_EVENT_SCAN_RATE));
|
|
|
|
|
|
|
|
if (msi_nonbroken) {
|
|
|
|
_FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* According to PAPR, rtas ibm,os-term does not guarantee a return
|
|
|
|
* back to the guest cpu.
|
|
|
|
*
|
|
|
|
* While an additional ibm,extended-os-term property indicates
|
|
|
|
* that rtas call return will always occur. Set this property.
|
|
|
|
*/
|
|
|
|
_FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
|
|
|
|
|
|
|
|
_FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
|
|
|
|
lrdr_capacity, sizeof(lrdr_capacity)));
|
|
|
|
|
|
|
|
spapr_dt_rtas_tokens(fdt, rtas);
|
|
|
|
}
|
|
|
|
|
2017-03-23 04:46:00 +01:00
|
|
|
/* Prepare ibm,arch-vec-5-platform-support, which indicates the MMU features
|
|
|
|
* that the guest may request and thus the valid values for bytes 24..26 of
|
|
|
|
* option vector 5: */
|
|
|
|
static void spapr_dt_ov5_platform_support(void *fdt, int chosen)
|
|
|
|
{
|
2017-05-02 08:37:18 +02:00
|
|
|
PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
|
|
|
|
|
2017-07-05 19:13:14 +02:00
|
|
|
char val[2 * 4] = {
|
2017-09-08 16:33:42 +02:00
|
|
|
23, 0x00, /* Xive mode, filled in below. */
|
2017-03-23 04:46:00 +01:00
|
|
|
24, 0x00, /* Hash/Radix, filled in below. */
|
|
|
|
25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
|
|
|
|
26, 0x40, /* Radix options: GTSE == yes. */
|
|
|
|
};
|
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
|
2017-07-05 19:13:14 +02:00
|
|
|
val[3] = 0x80; /* OV5_MMU_BOTH */
|
2017-03-23 04:46:00 +01:00
|
|
|
} else if (kvmppc_has_cap_mmu_radix()) {
|
2017-07-05 19:13:14 +02:00
|
|
|
val[3] = 0x40; /* OV5_MMU_RADIX_300 */
|
2017-03-23 04:46:00 +01:00
|
|
|
} else {
|
2017-07-05 19:13:14 +02:00
|
|
|
val[3] = 0x00; /* Hash */
|
2017-03-23 04:46:00 +01:00
|
|
|
}
|
|
|
|
} else {
|
2017-05-02 08:37:18 +02:00
|
|
|
if (first_ppc_cpu->env.mmu_model & POWERPC_MMU_V3) {
|
|
|
|
/* V3 MMU supports both hash and radix (with dynamic switching) */
|
2017-07-05 19:13:14 +02:00
|
|
|
val[3] = 0xC0;
|
2017-05-02 08:37:18 +02:00
|
|
|
} else {
|
|
|
|
/* Otherwise we can only do hash */
|
2017-07-05 19:13:14 +02:00
|
|
|
val[3] = 0x00;
|
2017-05-02 08:37:18 +02:00
|
|
|
}
|
2017-03-23 04:46:00 +01:00
|
|
|
}
|
|
|
|
_FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
|
|
|
|
val, sizeof(val)));
|
|
|
|
}
|
|
|
|
|
2016-10-24 03:05:57 +02:00
|
|
|
static void spapr_dt_chosen(sPAPRMachineState *spapr, void *fdt)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(spapr);
|
|
|
|
int chosen;
|
|
|
|
const char *boot_device = machine->boot_order;
|
|
|
|
char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
|
|
|
|
size_t cb = 0;
|
|
|
|
char *bootlist = get_boot_devices_list(&cb, true);
|
|
|
|
|
|
|
|
_FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
|
|
|
|
|
|
|
|
_FDT(fdt_setprop_string(fdt, chosen, "bootargs", machine->kernel_cmdline));
|
|
|
|
_FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
|
|
|
|
spapr->initrd_base));
|
|
|
|
_FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
|
|
|
|
spapr->initrd_base + spapr->initrd_size));
|
|
|
|
|
|
|
|
if (spapr->kernel_size) {
|
|
|
|
uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
|
|
|
|
cpu_to_be64(spapr->kernel_size) };
|
|
|
|
|
|
|
|
_FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
|
|
|
|
&kprop, sizeof(kprop)));
|
|
|
|
if (spapr->kernel_le) {
|
|
|
|
_FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (boot_menu) {
|
|
|
|
_FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu)));
|
|
|
|
}
|
|
|
|
_FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
|
|
|
|
_FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
|
|
|
|
_FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
|
|
|
|
|
|
|
|
if (cb && bootlist) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < cb; i++) {
|
|
|
|
if (bootlist[i] == '\n') {
|
|
|
|
bootlist[i] = ' ';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (boot_device && strlen(boot_device)) {
|
|
|
|
_FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!spapr->has_graphics && stdout_path) {
|
|
|
|
_FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
|
|
|
|
}
|
|
|
|
|
2017-03-23 04:46:00 +01:00
|
|
|
spapr_dt_ov5_platform_support(fdt, chosen);
|
|
|
|
|
2016-10-24 03:05:57 +02:00
|
|
|
g_free(stdout_path);
|
|
|
|
g_free(bootlist);
|
|
|
|
}
|
|
|
|
|
2016-10-20 06:59:36 +02:00
|
|
|
static void spapr_dt_hypervisor(sPAPRMachineState *spapr, void *fdt)
|
|
|
|
{
|
|
|
|
/* The /hypervisor node isn't in PAPR - this is a hack to allow PR
|
|
|
|
* KVM to work under pHyp with some guest co-operation */
|
|
|
|
int hypervisor;
|
|
|
|
uint8_t hypercall[16];
|
|
|
|
|
|
|
|
_FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
|
|
|
|
/* indicate KVM hypercall interface */
|
|
|
|
_FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
|
|
|
|
if (kvmppc_has_cap_fixup_hcalls()) {
|
|
|
|
/*
|
|
|
|
* Older KVM versions with older guest kernels were broken
|
|
|
|
* with the magic page, don't allow the guest to map it.
|
|
|
|
*/
|
|
|
|
if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
|
|
|
|
sizeof(hypercall))) {
|
|
|
|
_FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
|
|
|
|
hypercall, sizeof(hypercall)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-25 02:51:33 +02:00
|
|
|
static void *spapr_build_fdt(sPAPRMachineState *spapr,
|
|
|
|
hwaddr rtas_addr,
|
|
|
|
hwaddr rtas_size)
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
{
|
2017-09-06 20:43:05 +02:00
|
|
|
MachineState *machine = MACHINE(spapr);
|
2016-08-05 08:25:33 +02:00
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
2015-09-01 03:22:35 +02:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
|
2016-10-24 03:05:57 +02:00
|
|
|
int ret;
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
void *fdt;
|
2011-10-30 18:16:46 +01:00
|
|
|
sPAPRPHBState *phb;
|
2016-10-20 07:05:00 +02:00
|
|
|
char *buf;
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
|
2016-10-20 07:05:00 +02:00
|
|
|
fdt = g_malloc0(FDT_MAX_SIZE);
|
|
|
|
_FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
|
2016-10-20 07:05:00 +02:00
|
|
|
/* Root node */
|
|
|
|
_FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
|
|
|
|
_FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
|
|
|
|
_FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add info to guest to indentify which host is it being run on
|
|
|
|
* and what is the uuid of the guest
|
|
|
|
*/
|
|
|
|
if (kvmppc_get_host_model(&buf)) {
|
|
|
|
_FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
|
|
|
|
g_free(buf);
|
|
|
|
}
|
|
|
|
if (kvmppc_get_host_serial(&buf)) {
|
|
|
|
_FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
|
|
|
|
g_free(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = qemu_uuid_unparse_strdup(&qemu_uuid);
|
|
|
|
|
|
|
|
_FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
|
|
|
|
if (qemu_uuid_set) {
|
|
|
|
_FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
|
|
|
|
}
|
|
|
|
g_free(buf);
|
|
|
|
|
|
|
|
if (qemu_get_vm_name()) {
|
|
|
|
_FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
|
|
|
|
qemu_get_vm_name()));
|
|
|
|
}
|
|
|
|
|
|
|
|
_FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
|
|
|
|
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
|
2011-04-01 06:15:21 +02:00
|
|
|
|
2017-07-28 05:38:50 +02:00
|
|
|
/* /interrupt controller */
|
|
|
|
spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP);
|
|
|
|
|
2015-08-03 07:35:41 +02:00
|
|
|
ret = spapr_populate_memory(spapr, fdt);
|
|
|
|
if (ret < 0) {
|
2016-08-02 19:38:00 +02:00
|
|
|
error_report("couldn't setup memory nodes in fdt");
|
2015-08-03 07:35:41 +02:00
|
|
|
exit(1);
|
2012-09-12 18:57:12 +02:00
|
|
|
}
|
|
|
|
|
2016-10-20 07:01:17 +02:00
|
|
|
/* /vdevice */
|
|
|
|
spapr_dt_vdevice(spapr->vio_bus, fdt);
|
2011-04-01 06:15:21 +02:00
|
|
|
|
ppc/spapr: Implement H_RANDOM hypercall in QEMU
The PAPR interface defines a hypercall to pass high-quality
hardware generated random numbers to guests. Recent kernels can
already provide this hypercall to the guest if the right hardware
random number generator is available. But in case the user wants
to use another source like EGD, or QEMU is running with an older
kernel, we should also have this call in QEMU, so that guests that
do not support virtio-rng yet can get good random numbers, too.
This patch now adds a new pseudo-device to QEMU that either
directly provides this hypercall to the guest or is able to
enable the in-kernel hypercall if available. The in-kernel
hypercall can be enabled with the use-kvm property, e.g.:
qemu-system-ppc64 -device spapr-rng,use-kvm=true
For handling the hypercall in QEMU instead, a "RngBackend" is
required since the hypercall should provide "good" random data
instead of pseudo-random (like from a "simple" library function
like rand() or g_random_int()). Since there are multiple RngBackends
available, the user must select an appropriate back-end via the
"rng" property of the device, e.g.:
qemu-system-ppc64 -object rng-random,filename=/dev/hwrng,id=gid0 \
-device spapr-rng,rng=gid0 ...
See http://wiki.qemu-project.org/Features-Done/VirtIORNG for
other example of specifying RngBackends.
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2015-09-17 10:49:41 +02:00
|
|
|
if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
|
|
|
|
ret = spapr_rng_populate_dt(fdt);
|
|
|
|
if (ret < 0) {
|
2016-08-02 19:38:00 +02:00
|
|
|
error_report("could not set up rng device in the fdt");
|
ppc/spapr: Implement H_RANDOM hypercall in QEMU
The PAPR interface defines a hypercall to pass high-quality
hardware generated random numbers to guests. Recent kernels can
already provide this hypercall to the guest if the right hardware
random number generator is available. But in case the user wants
to use another source like EGD, or QEMU is running with an older
kernel, we should also have this call in QEMU, so that guests that
do not support virtio-rng yet can get good random numbers, too.
This patch now adds a new pseudo-device to QEMU that either
directly provides this hypercall to the guest or is able to
enable the in-kernel hypercall if available. The in-kernel
hypercall can be enabled with the use-kvm property, e.g.:
qemu-system-ppc64 -device spapr-rng,use-kvm=true
For handling the hypercall in QEMU instead, a "RngBackend" is
required since the hypercall should provide "good" random data
instead of pseudo-random (like from a "simple" library function
like rand() or g_random_int()). Since there are multiple RngBackends
available, the user must select an appropriate back-end via the
"rng" property of the device, e.g.:
qemu-system-ppc64 -object rng-random,filename=/dev/hwrng,id=gid0 \
-device spapr-rng,rng=gid0 ...
See http://wiki.qemu-project.org/Features-Done/VirtIORNG for
other example of specifying RngBackends.
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2015-09-17 10:49:41 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-30 18:16:46 +01:00
|
|
|
QLIST_FOREACH(phb, &spapr->phbs, list) {
|
2012-06-13 20:40:06 +02:00
|
|
|
ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt);
|
2016-04-21 12:08:58 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
error_report("couldn't setup PCI devices in fdt");
|
|
|
|
exit(1);
|
|
|
|
}
|
2011-10-30 18:16:46 +01:00
|
|
|
}
|
|
|
|
|
2015-07-02 08:23:17 +02:00
|
|
|
/* cpus */
|
|
|
|
spapr_populate_cpus_dt_node(fdt, spapr);
|
2011-12-12 19:24:30 +01:00
|
|
|
|
2015-09-01 03:22:35 +02:00
|
|
|
if (smc->dr_lmb_enabled) {
|
|
|
|
_FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
|
|
|
|
}
|
|
|
|
|
2017-02-10 11:20:57 +01:00
|
|
|
if (mc->has_hotpluggable_cpus) {
|
2016-06-10 02:59:04 +02:00
|
|
|
int offset = fdt_path_offset(fdt, "/cpus");
|
|
|
|
ret = spapr_drc_populate_dt(fdt, offset, NULL,
|
|
|
|
SPAPR_DR_CONNECTOR_TYPE_CPU);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("Couldn't set up CPU DR device tree properties");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-20 06:56:48 +02:00
|
|
|
/* /event-sources */
|
2016-10-27 04:20:26 +02:00
|
|
|
spapr_dt_events(spapr, fdt);
|
2016-10-20 06:56:48 +02:00
|
|
|
|
2016-10-20 06:55:36 +02:00
|
|
|
/* /rtas */
|
|
|
|
spapr_dt_rtas(spapr, fdt);
|
|
|
|
|
2016-10-24 03:05:57 +02:00
|
|
|
/* /chosen */
|
|
|
|
spapr_dt_chosen(spapr, fdt);
|
2016-10-20 06:34:59 +02:00
|
|
|
|
2016-10-20 06:59:36 +02:00
|
|
|
/* /hypervisor */
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
spapr_dt_hypervisor(spapr, fdt);
|
|
|
|
}
|
|
|
|
|
2016-10-20 06:34:59 +02:00
|
|
|
/* Build memory reserve map */
|
|
|
|
if (spapr->kernel_size) {
|
|
|
|
_FDT((fdt_add_mem_rsv(fdt, KERNEL_LOAD_ADDR, spapr->kernel_size)));
|
|
|
|
}
|
|
|
|
if (spapr->initrd_size) {
|
|
|
|
_FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, spapr->initrd_size)));
|
|
|
|
}
|
|
|
|
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
/* ibm,client-architecture-support updates */
|
|
|
|
ret = spapr_dt_cas_updates(spapr, fdt, spapr->ov5_cas);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("couldn't setup CAS properties fdt");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2016-10-25 02:51:33 +02:00
|
|
|
return fdt;
|
2011-04-01 06:15:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
|
|
|
|
{
|
|
|
|
return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
|
|
|
|
}
|
|
|
|
|
2016-10-28 13:06:21 +02:00
|
|
|
static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
|
|
|
|
PowerPCCPU *cpu)
|
2011-04-01 06:15:20 +02:00
|
|
|
{
|
2012-05-03 06:03:45 +02:00
|
|
|
CPUPPCState *env = &cpu->env;
|
|
|
|
|
tcg: drop global lock during TCG code execution
This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.
We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.
Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here.
These numbers demonstrate where we gain something:
20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm
20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm
The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond
32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm
32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm
We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com>
[FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex]
Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
[EGC: fixed iothread lock for cpu-exec IRQ handling]
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: -smp single-threaded fix, clean commit msg, BQL fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
[PM: target-arm changes]
Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 19:29:11 +01:00
|
|
|
/* The TCG path should also be holding the BQL at this point */
|
|
|
|
g_assert(qemu_mutex_iothread_locked());
|
|
|
|
|
2012-09-25 19:12:20 +02:00
|
|
|
if (msr_pr) {
|
|
|
|
hcall_dprintf("Hypercall made with MSR[PR]=1\n");
|
|
|
|
env->gpr[3] = H_PRIVILEGE;
|
|
|
|
} else {
|
2012-05-03 06:13:14 +02:00
|
|
|
env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
|
2012-09-25 19:12:20 +02:00
|
|
|
}
|
2011-04-01 06:15:20 +02:00
|
|
|
}
|
|
|
|
|
2017-03-01 07:54:36 +01:00
|
|
|
static uint64_t spapr_get_patbe(PPCVirtualHypervisor *vhyp)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
|
|
|
|
|
|
|
return spapr->patb_entry;
|
|
|
|
}
|
|
|
|
|
2014-11-17 05:12:30 +01:00
|
|
|
#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
|
|
|
|
#define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
|
|
|
|
#define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
|
|
|
|
#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
|
|
|
|
#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
|
|
|
|
|
2016-02-09 00:28:58 +01:00
|
|
|
/*
|
|
|
|
* Get the fd to access the kernel htab, re-opening it if necessary
|
|
|
|
*/
|
|
|
|
static int get_htab_fd(sPAPRMachineState *spapr)
|
|
|
|
{
|
2017-09-15 15:16:20 +02:00
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2016-02-09 00:28:58 +01:00
|
|
|
if (spapr->htab_fd >= 0) {
|
|
|
|
return spapr->htab_fd;
|
|
|
|
}
|
|
|
|
|
2017-09-15 15:16:20 +02:00
|
|
|
spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err);
|
2016-02-09 00:28:58 +01:00
|
|
|
if (spapr->htab_fd < 0) {
|
2017-09-15 15:16:20 +02:00
|
|
|
error_report_err(local_err);
|
2016-02-09 00:28:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return spapr->htab_fd;
|
|
|
|
}
|
|
|
|
|
2017-03-20 00:46:46 +01:00
|
|
|
void close_htab_fd(sPAPRMachineState *spapr)
|
2016-02-09 00:28:58 +01:00
|
|
|
{
|
|
|
|
if (spapr->htab_fd >= 0) {
|
|
|
|
close(spapr->htab_fd);
|
|
|
|
}
|
|
|
|
spapr->htab_fd = -1;
|
|
|
|
}
|
|
|
|
|
2017-02-23 01:39:18 +01:00
|
|
|
static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
|
|
|
|
|
|
|
return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
|
|
|
|
}
|
|
|
|
|
2017-09-25 13:00:02 +02:00
|
|
|
static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
|
|
|
|
|
|
|
assert(kvm_enabled());
|
|
|
|
|
|
|
|
if (!spapr->htab) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18);
|
|
|
|
}
|
|
|
|
|
2017-02-23 01:39:18 +01:00
|
|
|
static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
|
|
|
|
hwaddr ptex, int n)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
|
|
|
hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
|
|
|
|
|
|
|
|
if (!spapr->htab) {
|
|
|
|
/*
|
|
|
|
* HTAB is controlled by KVM. Fetch into temporary buffer
|
|
|
|
*/
|
|
|
|
ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
|
|
|
|
kvmppc_read_hptes(hptes, ptex, n);
|
|
|
|
return hptes;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* HTAB is controlled by QEMU. Just point to the internally
|
|
|
|
* accessible PTEG.
|
|
|
|
*/
|
|
|
|
return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
|
|
|
|
const ppc_hash_pte64_t *hptes,
|
|
|
|
hwaddr ptex, int n)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
|
|
|
|
|
|
|
if (!spapr->htab) {
|
|
|
|
g_free((void *)hptes);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Nothing to do for qemu managed HPT */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
|
|
|
|
uint64_t pte0, uint64_t pte1)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
|
|
|
hwaddr offset = ptex * HASH_PTE_SIZE_64;
|
|
|
|
|
|
|
|
if (!spapr->htab) {
|
|
|
|
kvmppc_write_hpte(ptex, pte0, pte1);
|
|
|
|
} else {
|
|
|
|
stq_p(spapr->htab + offset, pte0);
|
|
|
|
stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
pseries: Implement HPT resizing
This patch implements hypercalls allowing a PAPR guest to resize its own
hash page table. This will eventually allow for more flexible memory
hotplug.
The implementation is partially asynchronous, handled in a special thread
running the hpt_prepare_thread() function. The state of a pending resize
is stored in SPAPR_MACHINE->pending_hpt.
The H_RESIZE_HPT_PREPARE hypercall will kick off creation of a new HPT, or,
if one is already in progress, monitor it for completion. If there is an
existing HPT resize in progress that doesn't match the size specified in
the call, it will cancel it, replacing it with a new one matching the
given size.
The H_RESIZE_HPT_COMMIT completes transition to a resized HPT, and can only
be called successfully once H_RESIZE_HPT_PREPARE has successfully
completed initialization of a new HPT. The guest must ensure that there
are no concurrent accesses to the existing HPT while this is called (this
effectively means stop_machine() for Linux guests).
For now H_RESIZE_HPT_COMMIT goes through the whole old HPT, rehashing each
HPTE into the new HPT. This can have quite high latency, but it seems to
be of the order of typical migration downtime latencies for HPTs of size
up to ~2GiB (which would be used in a 256GiB guest).
In future we probably want to move more of the rehashing to the "prepare"
phase, by having H_ENTER and other hcalls update both current and
pending HPTs. That's a project for another day, but should be possible
without any changes to the guest interface.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-05-12 07:46:49 +02:00
|
|
|
int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
|
2016-02-09 01:15:12 +01:00
|
|
|
{
|
|
|
|
int shift;
|
|
|
|
|
|
|
|
/* We aim for a hash table of size 1/128 the size of RAM (rounded
|
|
|
|
* up). The PAPR recommendation is actually 1/64 of RAM size, but
|
|
|
|
* that's much more than is needed for Linux guests */
|
|
|
|
shift = ctz64(pow2ceil(ramsize)) - 7;
|
|
|
|
shift = MAX(shift, 18); /* Minimum architected size */
|
|
|
|
shift = MIN(shift, 46); /* Maximum architected size */
|
|
|
|
return shift;
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:49:20 +02:00
|
|
|
void spapr_free_hpt(sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
g_free(spapr->htab);
|
|
|
|
spapr->htab = NULL;
|
|
|
|
spapr->htab_shift = 0;
|
|
|
|
close_htab_fd(spapr);
|
|
|
|
}
|
|
|
|
|
2017-07-12 09:56:06 +02:00
|
|
|
void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
|
|
|
|
Error **errp)
|
2012-09-12 18:57:12 +02:00
|
|
|
{
|
2016-02-09 01:21:56 +01:00
|
|
|
long rc;
|
|
|
|
|
|
|
|
/* Clean up any HPT info from a previous boot */
|
2017-05-17 05:49:20 +02:00
|
|
|
spapr_free_hpt(spapr);
|
2016-02-09 01:21:56 +01:00
|
|
|
|
|
|
|
rc = kvmppc_reset_htab(shift);
|
|
|
|
if (rc < 0) {
|
|
|
|
/* kernel-side HPT needed, but couldn't allocate one */
|
|
|
|
error_setg_errno(errp, errno,
|
|
|
|
"Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
|
|
|
|
shift);
|
|
|
|
/* This is almost certainly fatal, but if the caller really
|
|
|
|
* wants to carry on with shift == 0, it's welcome to try */
|
|
|
|
} else if (rc > 0) {
|
|
|
|
/* kernel-side HPT allocated */
|
|
|
|
if (rc != shift) {
|
|
|
|
error_setg(errp,
|
|
|
|
"Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
|
|
|
|
shift, rc);
|
2015-09-24 10:22:48 +02:00
|
|
|
}
|
|
|
|
|
2012-09-12 18:57:12 +02:00
|
|
|
spapr->htab_shift = shift;
|
2016-03-08 01:35:15 +01:00
|
|
|
spapr->htab = NULL;
|
2015-09-24 10:22:47 +02:00
|
|
|
} else {
|
2016-02-09 01:21:56 +01:00
|
|
|
/* kernel-side HPT not needed, allocate in userspace instead */
|
|
|
|
size_t size = 1ULL << shift;
|
|
|
|
int i;
|
2015-09-24 10:22:47 +02:00
|
|
|
|
2016-02-09 01:21:56 +01:00
|
|
|
spapr->htab = qemu_memalign(size, size);
|
|
|
|
if (!spapr->htab) {
|
|
|
|
error_setg_errno(errp, errno,
|
|
|
|
"Could not allocate HPT of order %d", shift);
|
|
|
|
return;
|
2015-09-24 10:22:48 +02:00
|
|
|
}
|
|
|
|
|
2016-02-09 01:21:56 +01:00
|
|
|
memset(spapr->htab, 0, size);
|
|
|
|
spapr->htab_shift = shift;
|
2014-11-17 05:12:30 +01:00
|
|
|
|
2016-02-09 01:21:56 +01:00
|
|
|
for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
|
|
|
|
DIRTY_HPTE(HPTE(spapr->htab, i));
|
2014-11-17 05:12:30 +01:00
|
|
|
}
|
2012-09-12 18:57:12 +02:00
|
|
|
}
|
2011-04-01 06:15:20 +02:00
|
|
|
}
|
|
|
|
|
2017-03-20 00:46:46 +01:00
|
|
|
void spapr_setup_hpt_and_vrma(sPAPRMachineState *spapr)
|
|
|
|
{
|
2017-07-12 09:56:06 +02:00
|
|
|
int hpt_shift;
|
|
|
|
|
|
|
|
if ((spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED)
|
|
|
|
|| (spapr->cas_reboot
|
|
|
|
&& !spapr_ovec_test(spapr->ov5_cas, OV5_HPT_RESIZE))) {
|
|
|
|
hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
|
|
|
|
} else {
|
|
|
|
hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->ram_size);
|
|
|
|
}
|
|
|
|
spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal);
|
|
|
|
|
2017-03-20 00:46:46 +01:00
|
|
|
if (spapr->vrma_adjust) {
|
2017-09-06 20:43:05 +02:00
|
|
|
spapr->rma_size = kvmppc_rma_size(spapr_node0_size(MACHINE(spapr)),
|
2017-03-20 00:46:46 +01:00
|
|
|
spapr->htab_shift);
|
|
|
|
}
|
|
|
|
/* We're setting up a hash table, so that means we're not radix */
|
|
|
|
spapr->patb_entry = 0;
|
|
|
|
}
|
|
|
|
|
2016-09-21 07:23:53 +02:00
|
|
|
static void find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
|
2014-11-04 23:22:54 +01:00
|
|
|
{
|
|
|
|
bool matched = false;
|
|
|
|
|
|
|
|
if (object_dynamic_cast(OBJECT(sbdev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
|
|
|
|
matched = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!matched) {
|
|
|
|
error_report("Device %s is not supported by this machine yet.",
|
|
|
|
qdev_fw_name(DEVICE(sbdev)));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-12 18:57:11 +02:00
|
|
|
static void ppc_spapr_reset(void)
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
{
|
2016-02-09 01:21:56 +01:00
|
|
|
MachineState *machine = MACHINE(qdev_get_machine());
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
2013-05-29 22:29:20 +02:00
|
|
|
PowerPCCPU *first_ppc_cpu;
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
uint32_t rtas_limit;
|
2016-10-20 06:30:53 +02:00
|
|
|
hwaddr rtas_addr, fdt_addr;
|
2016-10-25 02:51:33 +02:00
|
|
|
void *fdt;
|
|
|
|
int rc;
|
2013-01-17 18:51:17 +01:00
|
|
|
|
2014-11-04 23:22:54 +01:00
|
|
|
/* Check for unknown sysbus devices */
|
|
|
|
foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL);
|
|
|
|
|
2017-03-20 00:46:46 +01:00
|
|
|
if (kvm_enabled() && kvmppc_has_cap_mmu_radix()) {
|
|
|
|
/* If using KVM with radix mode available, VCPUs can be started
|
|
|
|
* without a HPT because KVM will start them in radix mode.
|
|
|
|
* Set the GR bit in PATB so that we know there is no HPT. */
|
|
|
|
spapr->patb_entry = PATBE1_GR;
|
|
|
|
} else {
|
|
|
|
spapr_setup_hpt_and_vrma(spapr);
|
2016-02-09 01:21:56 +01:00
|
|
|
}
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
|
2012-09-12 18:57:11 +02:00
|
|
|
qemu_devices_reset();
|
2017-08-30 20:21:40 +02:00
|
|
|
spapr_clear_pending_events(spapr);
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
/*
|
|
|
|
* We place the device tree and RTAS just below either the top of the RMA,
|
|
|
|
* or just below 2GB, whichever is lowere, so that it can be
|
|
|
|
* processed with 32-bit real mode code if necessary
|
|
|
|
*/
|
|
|
|
rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR);
|
2016-10-20 06:30:53 +02:00
|
|
|
rtas_addr = rtas_limit - RTAS_MAX_SIZE;
|
|
|
|
fdt_addr = rtas_addr - FDT_MAX_SIZE;
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
/* if this reset wasn't generated by CAS, we should reset our
|
|
|
|
* negotiated options and start from scratch */
|
|
|
|
if (!spapr->cas_reboot) {
|
|
|
|
spapr_ovec_cleanup(spapr->ov5_cas);
|
|
|
|
spapr->ov5_cas = spapr_ovec_new();
|
2017-06-13 10:09:08 +02:00
|
|
|
|
|
|
|
ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal);
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
}
|
|
|
|
|
2016-10-20 06:30:53 +02:00
|
|
|
fdt = spapr_build_fdt(spapr, rtas_addr, spapr->rtas_size);
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
|
2016-10-20 06:37:41 +02:00
|
|
|
spapr_load_rtas(spapr, fdt, rtas_addr);
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
|
2016-10-25 02:51:33 +02:00
|
|
|
rc = fdt_pack(fdt);
|
|
|
|
|
|
|
|
/* Should only fail if we've built a corrupted tree */
|
|
|
|
assert(rc == 0);
|
|
|
|
|
|
|
|
if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
|
|
|
|
error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
|
|
|
|
fdt_totalsize(fdt), FDT_MAX_SIZE);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Load the fdt */
|
|
|
|
qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
|
2016-10-20 06:30:53 +02:00
|
|
|
cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
|
2016-10-25 02:51:33 +02:00
|
|
|
g_free(fdt);
|
|
|
|
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
/* Set up the entry state */
|
2013-05-29 22:29:20 +02:00
|
|
|
first_ppc_cpu = POWERPC_CPU(first_cpu);
|
2016-10-20 06:30:53 +02:00
|
|
|
first_ppc_cpu->env.gpr[3] = fdt_addr;
|
2013-05-29 22:29:20 +02:00
|
|
|
first_ppc_cpu->env.gpr[5] = 0;
|
|
|
|
first_cpu->halted = 0;
|
2015-07-02 08:23:06 +02:00
|
|
|
first_ppc_cpu->env.nip = SPAPR_ENTRY_POINT;
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
|
spapr: add option vector handling in CAS-generated resets
In some cases, ibm,client-architecture-support calls can fail. This
could happen in the current code for situations where the modified
device tree segment exceeds the buffer size provided by the guest
via the call parameters. In these cases, QEMU will reset, allowing
an opportunity to regenerate the device tree from scratch via
boot-time handling. There are potentially other scenarios as well,
not currently reachable in the current code, but possible in theory,
such as cases where device-tree properties or nodes need to be removed.
We currently don't handle either of these properly for option vector
capabilities however. Instead of carrying the negotiated capability
beyond the reset and creating the boot-time device tree accordingly,
we start from scratch, generating the same boot-time device tree as we
did prior to the CAS-generated and the same device tree updates as we
did before. This could (in theory) cause us to get stuck in a reset
loop. This hasn't been observed, but depending on the extensiveness
of CAS-induced device tree updates in the future, could eventually
become an issue.
Address this by pulling capability-related device tree
updates resulting from CAS calls into a common routine,
spapr_dt_cas_updates(), and adding an sPAPROptionVector*
parameter that allows us to test for newly-negotiated capabilities.
We invoke it as follows:
1) When ibm,client-architecture-support gets called, we
call spapr_dt_cas_updates() with the set of capabilities
added since the previous call to ibm,client-architecture-support.
For the initial boot, or a system reset generated by something
other than the CAS call itself, this set will consist of *all*
options supported both the platform and the guest. For calls
to ibm,client-architecture-support immediately after a CAS-induced
reset, we call spapr_dt_cas_updates() with only the set
of capabilities added since the previous call, since the other
capabilities will have already been addressed by the boot-time
device-tree this time around. In the unlikely event that
capabilities are *removed* since the previous CAS, we will
generate a CAS-induced reset. In the unlikely event that we
cannot fit the device-tree updates into the buffer provided
by the guest, well generate a CAS-induced reset.
2) When a CAS update results in the need to reset the machine and
include the updates in the boot-time device tree, we call the
spapr_dt_cas_updates() using the full set of negotiated
capabilities as part of the reset path. At initial boot, or after
a reset generated by something other than the CAS call itself,
this set will be empty, resulting in what should be the same
boot-time device-tree as we generated prior to this patch. For
CAS-induced reset, this routine will be called with the full set of
capabilities negotiated by the platform/guest in the previous
CAS call, which should result in CAS updates from previous call
being accounted for in the initial boot-time device tree.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Changed an int -> bool conversion to be more explicit]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-25 06:47:29 +02:00
|
|
|
spapr->cas_reboot = false;
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
}
|
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
static void spapr_create_nvram(sPAPRMachineState *spapr)
|
2012-11-12 17:46:57 +01:00
|
|
|
{
|
2013-07-04 15:09:22 +02:00
|
|
|
DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram");
|
2013-11-22 10:27:40 +01:00
|
|
|
DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
|
2012-11-12 17:46:57 +01:00
|
|
|
|
2013-11-22 10:27:40 +01:00
|
|
|
if (dinfo) {
|
2015-12-10 17:29:15 +01:00
|
|
|
qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo),
|
|
|
|
&error_fatal);
|
2012-11-12 17:46:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
qdev_init_nofail(dev);
|
|
|
|
|
|
|
|
spapr->nvram = (struct sPAPRNVRAM *)dev;
|
|
|
|
}
|
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
static void spapr_rtc_create(sPAPRMachineState *spapr)
|
2015-02-06 04:55:51 +01:00
|
|
|
{
|
2017-03-07 10:23:40 +01:00
|
|
|
object_initialize(&spapr->rtc, sizeof(spapr->rtc), TYPE_SPAPR_RTC);
|
|
|
|
object_property_add_child(OBJECT(spapr), "rtc", OBJECT(&spapr->rtc),
|
|
|
|
&error_fatal);
|
|
|
|
object_property_set_bool(OBJECT(&spapr->rtc), true, "realized",
|
|
|
|
&error_fatal);
|
|
|
|
object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc),
|
|
|
|
"date", &error_fatal);
|
2015-02-06 04:55:51 +01:00
|
|
|
}
|
|
|
|
|
2012-08-14 13:11:49 +02:00
|
|
|
/* Returns whether we want to use VGA or not */
|
2016-01-20 02:58:39 +01:00
|
|
|
static bool spapr_vga_init(PCIBus *pci_bus, Error **errp)
|
2012-08-06 18:42:00 +02:00
|
|
|
{
|
2012-08-14 13:11:49 +02:00
|
|
|
switch (vga_interface_type) {
|
|
|
|
case VGA_NONE:
|
2014-03-10 15:37:41 +01:00
|
|
|
return false;
|
|
|
|
case VGA_DEVICE:
|
|
|
|
return true;
|
2012-09-08 12:40:45 +02:00
|
|
|
case VGA_STD:
|
2015-09-15 07:51:29 +02:00
|
|
|
case VGA_VIRTIO:
|
2012-09-08 12:40:45 +02:00
|
|
|
return pci_vga_init(pci_bus) != NULL;
|
2012-08-14 13:11:49 +02:00
|
|
|
default:
|
2016-01-20 02:58:39 +01:00
|
|
|
error_setg(errp,
|
|
|
|
"Unsupported VGA mode, only -vga std or -vga virtio is supported");
|
|
|
|
return false;
|
2012-08-06 18:42:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-06 04:55:52 +01:00
|
|
|
static int spapr_post_load(void *opaque, int version_id)
|
|
|
|
{
|
2015-07-02 08:23:04 +02:00
|
|
|
sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
|
2015-02-06 04:55:52 +01:00
|
|
|
int err = 0;
|
|
|
|
|
2017-02-27 15:29:31 +01:00
|
|
|
if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) {
|
2017-04-03 09:45:58 +02:00
|
|
|
CPUState *cs;
|
|
|
|
CPU_FOREACH(cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
icp_resend(ICP(cpu->intc));
|
2017-02-27 15:29:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-09 20:32:39 +02:00
|
|
|
/* In earlier versions, there was no separate qdev for the PAPR
|
2015-02-06 04:55:52 +01:00
|
|
|
* RTC, so the RTC offset was stored directly in sPAPREnvironment.
|
|
|
|
* So when migrating from those versions, poke the incoming offset
|
|
|
|
* value into the RTC device */
|
|
|
|
if (version_id < 3) {
|
2017-03-07 10:23:40 +01:00
|
|
|
err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
|
2015-02-06 04:55:52 +01:00
|
|
|
}
|
|
|
|
|
2017-06-12 07:32:35 +02:00
|
|
|
if (spapr->patb_entry) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
|
|
|
|
bool radix = !!(spapr->patb_entry & PATBE1_GR);
|
|
|
|
bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
|
|
|
|
|
|
|
|
err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
|
|
|
|
if (err) {
|
|
|
|
error_report("Process table config unsupported by the host");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-06 04:55:52 +01:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool version_before_3(void *opaque, int version_id)
|
|
|
|
{
|
|
|
|
return version_id < 3;
|
|
|
|
}
|
|
|
|
|
2017-07-11 20:07:55 +02:00
|
|
|
static bool spapr_pending_events_needed(void *opaque)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
|
|
|
|
return !QTAILQ_EMPTY(&spapr->pending_events);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_spapr_event_entry = {
|
|
|
|
.name = "spapr_event_log_entry",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.fields = (VMStateField[]) {
|
2017-07-12 03:55:53 +02:00
|
|
|
VMSTATE_UINT32(summary, sPAPREventLogEntry),
|
|
|
|
VMSTATE_UINT32(extended_length, sPAPREventLogEntry),
|
2017-07-11 20:07:55 +02:00
|
|
|
VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, sPAPREventLogEntry, 0,
|
2017-07-12 03:55:53 +02:00
|
|
|
NULL, extended_length),
|
2017-07-11 20:07:55 +02:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_spapr_pending_events = {
|
|
|
|
.name = "spapr_pending_events",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = spapr_pending_events_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_QTAILQ_V(pending_events, sPAPRMachineState, 1,
|
|
|
|
vmstate_spapr_event_entry, sPAPREventLogEntry, next),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2016-11-18 02:40:27 +01:00
|
|
|
static bool spapr_ov5_cas_needed(void *opaque)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = opaque;
|
|
|
|
sPAPROptionVector *ov5_mask = spapr_ovec_new();
|
|
|
|
sPAPROptionVector *ov5_legacy = spapr_ovec_new();
|
|
|
|
sPAPROptionVector *ov5_removed = spapr_ovec_new();
|
|
|
|
bool cas_needed;
|
|
|
|
|
|
|
|
/* Prior to the introduction of sPAPROptionVector, we had two option
|
|
|
|
* vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
|
|
|
|
* Both of these options encode machine topology into the device-tree
|
|
|
|
* in such a way that the now-booted OS should still be able to interact
|
|
|
|
* appropriately with QEMU regardless of what options were actually
|
|
|
|
* negotiatied on the source side.
|
|
|
|
*
|
|
|
|
* As such, we can avoid migrating the CAS-negotiated options if these
|
|
|
|
* are the only options available on the current machine/platform.
|
|
|
|
* Since these are the only options available for pseries-2.7 and
|
|
|
|
* earlier, this allows us to maintain old->new/new->old migration
|
|
|
|
* compatibility.
|
|
|
|
*
|
|
|
|
* For QEMU 2.8+, there are additional CAS-negotiatable options available
|
|
|
|
* via default pseries-2.8 machines and explicit command-line parameters.
|
|
|
|
* Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
|
|
|
|
* of the actual CAS-negotiated values to continue working properly. For
|
|
|
|
* example, availability of memory unplug depends on knowing whether
|
|
|
|
* OV5_HP_EVT was negotiated via CAS.
|
|
|
|
*
|
|
|
|
* Thus, for any cases where the set of available CAS-negotiatable
|
|
|
|
* options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
|
|
|
|
* include the CAS-negotiated options in the migration stream.
|
|
|
|
*/
|
|
|
|
spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
|
|
|
|
spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
|
|
|
|
|
|
|
|
/* spapr_ovec_diff returns true if bits were removed. we avoid using
|
|
|
|
* the mask itself since in the future it's possible "legacy" bits may be
|
|
|
|
* removed via machine options, which could generate a false positive
|
|
|
|
* that breaks migration.
|
|
|
|
*/
|
|
|
|
spapr_ovec_intersect(ov5_legacy, spapr->ov5, ov5_mask);
|
|
|
|
cas_needed = spapr_ovec_diff(ov5_removed, spapr->ov5, ov5_legacy);
|
|
|
|
|
|
|
|
spapr_ovec_cleanup(ov5_mask);
|
|
|
|
spapr_ovec_cleanup(ov5_legacy);
|
|
|
|
spapr_ovec_cleanup(ov5_removed);
|
|
|
|
|
|
|
|
return cas_needed;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_spapr_ov5_cas = {
|
|
|
|
.name = "spapr_option_vector_ov5_cas",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = spapr_ov5_cas_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_STRUCT_POINTER_V(ov5_cas, sPAPRMachineState, 1,
|
|
|
|
vmstate_spapr_ovec, sPAPROptionVector),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2017-03-01 07:54:36 +01:00
|
|
|
static bool spapr_patb_entry_needed(void *opaque)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = opaque;
|
|
|
|
|
|
|
|
return !!spapr->patb_entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_spapr_patb_entry = {
|
|
|
|
.name = "spapr_patb_entry",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = spapr_patb_entry_needed,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT64(patb_entry, sPAPRMachineState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2013-07-18 21:33:01 +02:00
|
|
|
static const VMStateDescription vmstate_spapr = {
|
|
|
|
.name = "spapr",
|
2015-02-06 04:55:52 +01:00
|
|
|
.version_id = 3,
|
2013-07-18 21:33:01 +02:00
|
|
|
.minimum_version_id = 1,
|
2015-02-06 04:55:52 +01:00
|
|
|
.post_load = spapr_post_load,
|
2014-04-16 15:24:04 +02:00
|
|
|
.fields = (VMStateField[]) {
|
2015-02-06 04:55:52 +01:00
|
|
|
/* used to be @next_irq */
|
|
|
|
VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
/* RTC offset */
|
2015-07-02 08:23:04 +02:00
|
|
|
VMSTATE_UINT64_TEST(rtc_offset, sPAPRMachineState, version_before_3),
|
2015-02-06 04:55:52 +01:00
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
VMSTATE_PPC_TIMEBASE_V(tb, sPAPRMachineState, 2),
|
2013-07-18 21:33:01 +02:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
2016-11-18 02:40:27 +01:00
|
|
|
.subsections = (const VMStateDescription*[]) {
|
|
|
|
&vmstate_spapr_ov5_cas,
|
2017-03-01 07:54:36 +01:00
|
|
|
&vmstate_spapr_patb_entry,
|
2017-07-11 20:07:55 +02:00
|
|
|
&vmstate_spapr_pending_events,
|
2016-11-18 02:40:27 +01:00
|
|
|
NULL
|
|
|
|
}
|
2013-07-18 21:33:01 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static int htab_save_setup(QEMUFile *f, void *opaque)
|
|
|
|
{
|
2015-07-02 08:23:04 +02:00
|
|
|
sPAPRMachineState *spapr = opaque;
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
/* "Iteration" header */
|
2017-06-12 07:32:34 +02:00
|
|
|
if (!spapr->htab_shift) {
|
|
|
|
qemu_put_be32(f, -1);
|
|
|
|
} else {
|
|
|
|
qemu_put_be32(f, spapr->htab_shift);
|
|
|
|
}
|
2013-07-18 21:33:01 +02:00
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
if (spapr->htab) {
|
|
|
|
spapr->htab_save_index = 0;
|
|
|
|
spapr->htab_first_pass = true;
|
|
|
|
} else {
|
2017-06-12 07:32:34 +02:00
|
|
|
if (spapr->htab_shift) {
|
|
|
|
assert(kvm_enabled());
|
|
|
|
}
|
2013-07-18 21:33:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-18 21:33:01 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-19 16:11:07 +02:00
|
|
|
static void htab_save_chunk(QEMUFile *f, sPAPRMachineState *spapr,
|
|
|
|
int chunkstart, int n_valid, int n_invalid)
|
|
|
|
{
|
|
|
|
qemu_put_be32(f, chunkstart);
|
|
|
|
qemu_put_be16(f, n_valid);
|
|
|
|
qemu_put_be16(f, n_invalid);
|
|
|
|
qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
|
|
|
|
HASH_PTE_SIZE_64 * n_valid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void htab_save_end_marker(QEMUFile *f)
|
|
|
|
{
|
|
|
|
qemu_put_be32(f, 0);
|
|
|
|
qemu_put_be16(f, 0);
|
|
|
|
qemu_put_be16(f, 0);
|
|
|
|
}
|
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
static void htab_save_first_pass(QEMUFile *f, sPAPRMachineState *spapr,
|
2013-07-18 21:33:01 +02:00
|
|
|
int64_t max_ns)
|
|
|
|
{
|
2016-02-11 01:40:44 +01:00
|
|
|
bool has_timeout = max_ns != -1;
|
2013-07-18 21:33:01 +02:00
|
|
|
int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
|
|
|
|
int index = spapr->htab_save_index;
|
2013-08-21 17:03:08 +02:00
|
|
|
int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
assert(spapr->htab_first_pass);
|
|
|
|
|
|
|
|
do {
|
|
|
|
int chunkstart;
|
|
|
|
|
|
|
|
/* Consume invalid HPTEs */
|
|
|
|
while ((index < htabslots)
|
|
|
|
&& !HPTE_VALID(HPTE(spapr->htab, index))) {
|
|
|
|
CLEAN_HPTE(HPTE(spapr->htab, index));
|
2017-03-23 11:04:55 +01:00
|
|
|
index++;
|
2013-07-18 21:33:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Consume valid HPTEs */
|
|
|
|
chunkstart = index;
|
2014-11-17 05:12:29 +01:00
|
|
|
while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
|
2013-07-18 21:33:01 +02:00
|
|
|
&& HPTE_VALID(HPTE(spapr->htab, index))) {
|
|
|
|
CLEAN_HPTE(HPTE(spapr->htab, index));
|
2017-03-23 11:04:55 +01:00
|
|
|
index++;
|
2013-07-18 21:33:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (index > chunkstart) {
|
|
|
|
int n_valid = index - chunkstart;
|
|
|
|
|
2017-09-19 16:11:07 +02:00
|
|
|
htab_save_chunk(f, spapr, chunkstart, n_valid, 0);
|
2013-07-18 21:33:01 +02:00
|
|
|
|
2016-02-11 01:40:44 +01:00
|
|
|
if (has_timeout &&
|
|
|
|
(qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
|
2013-07-18 21:33:01 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while ((index < htabslots) && !qemu_file_rate_limit(f));
|
|
|
|
|
|
|
|
if (index >= htabslots) {
|
|
|
|
assert(index == htabslots);
|
|
|
|
index = 0;
|
|
|
|
spapr->htab_first_pass = false;
|
|
|
|
}
|
|
|
|
spapr->htab_save_index = index;
|
|
|
|
}
|
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
static int htab_save_later_pass(QEMUFile *f, sPAPRMachineState *spapr,
|
2013-07-18 21:33:03 +02:00
|
|
|
int64_t max_ns)
|
2013-07-18 21:33:01 +02:00
|
|
|
{
|
|
|
|
bool final = max_ns < 0;
|
|
|
|
int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
|
|
|
|
int examined = 0, sent = 0;
|
|
|
|
int index = spapr->htab_save_index;
|
2013-08-21 17:03:08 +02:00
|
|
|
int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
assert(!spapr->htab_first_pass);
|
|
|
|
|
|
|
|
do {
|
|
|
|
int chunkstart, invalidstart;
|
|
|
|
|
|
|
|
/* Consume non-dirty HPTEs */
|
|
|
|
while ((index < htabslots)
|
|
|
|
&& !HPTE_DIRTY(HPTE(spapr->htab, index))) {
|
|
|
|
index++;
|
|
|
|
examined++;
|
|
|
|
}
|
|
|
|
|
|
|
|
chunkstart = index;
|
|
|
|
/* Consume valid dirty HPTEs */
|
2014-11-17 05:12:29 +01:00
|
|
|
while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
|
2013-07-18 21:33:01 +02:00
|
|
|
&& HPTE_DIRTY(HPTE(spapr->htab, index))
|
|
|
|
&& HPTE_VALID(HPTE(spapr->htab, index))) {
|
|
|
|
CLEAN_HPTE(HPTE(spapr->htab, index));
|
|
|
|
index++;
|
|
|
|
examined++;
|
|
|
|
}
|
|
|
|
|
|
|
|
invalidstart = index;
|
|
|
|
/* Consume invalid dirty HPTEs */
|
2014-11-17 05:12:29 +01:00
|
|
|
while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
|
2013-07-18 21:33:01 +02:00
|
|
|
&& HPTE_DIRTY(HPTE(spapr->htab, index))
|
|
|
|
&& !HPTE_VALID(HPTE(spapr->htab, index))) {
|
|
|
|
CLEAN_HPTE(HPTE(spapr->htab, index));
|
|
|
|
index++;
|
|
|
|
examined++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index > chunkstart) {
|
|
|
|
int n_valid = invalidstart - chunkstart;
|
|
|
|
int n_invalid = index - invalidstart;
|
|
|
|
|
2017-09-19 16:11:07 +02:00
|
|
|
htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid);
|
2013-07-18 21:33:01 +02:00
|
|
|
sent += index - chunkstart;
|
|
|
|
|
2013-08-21 17:03:08 +02:00
|
|
|
if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
|
2013-07-18 21:33:01 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (examined >= htabslots) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index >= htabslots) {
|
|
|
|
assert(index == htabslots);
|
|
|
|
index = 0;
|
|
|
|
}
|
|
|
|
} while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
|
|
|
|
|
|
|
|
if (index >= htabslots) {
|
|
|
|
assert(index == htabslots);
|
|
|
|
index = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
spapr->htab_save_index = index;
|
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
return (examined >= htabslots) && (sent == 0) ? 1 : 0;
|
2013-07-18 21:33:01 +02:00
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
#define MAX_ITERATION_NS 5000000 /* 5 ms */
|
|
|
|
#define MAX_KVM_BUF_SIZE 2048
|
|
|
|
|
2013-07-18 21:33:01 +02:00
|
|
|
static int htab_save_iterate(QEMUFile *f, void *opaque)
|
|
|
|
{
|
2015-07-02 08:23:04 +02:00
|
|
|
sPAPRMachineState *spapr = opaque;
|
2016-02-09 00:28:58 +01:00
|
|
|
int fd;
|
2013-07-18 21:33:03 +02:00
|
|
|
int rc = 0;
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
/* Iteration header */
|
2017-06-12 07:32:34 +02:00
|
|
|
if (!spapr->htab_shift) {
|
|
|
|
qemu_put_be32(f, -1);
|
2017-07-18 12:16:32 +02:00
|
|
|
return 1;
|
2017-06-12 07:32:34 +02:00
|
|
|
} else {
|
|
|
|
qemu_put_be32(f, 0);
|
|
|
|
}
|
2013-07-18 21:33:01 +02:00
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
if (!spapr->htab) {
|
|
|
|
assert(kvm_enabled());
|
|
|
|
|
2016-02-09 00:28:58 +01:00
|
|
|
fd = get_htab_fd(spapr);
|
|
|
|
if (fd < 0) {
|
|
|
|
return fd;
|
2014-11-17 05:12:28 +01:00
|
|
|
}
|
|
|
|
|
2016-02-09 00:28:58 +01:00
|
|
|
rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
|
2013-07-18 21:33:03 +02:00
|
|
|
if (rc < 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
} else if (spapr->htab_first_pass) {
|
2013-07-18 21:33:01 +02:00
|
|
|
htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
|
|
|
|
} else {
|
2013-07-18 21:33:03 +02:00
|
|
|
rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
|
2013-07-18 21:33:01 +02:00
|
|
|
}
|
|
|
|
|
2017-09-19 16:11:07 +02:00
|
|
|
htab_save_end_marker(f);
|
2013-07-18 21:33:01 +02:00
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
return rc;
|
2013-07-18 21:33:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int htab_save_complete(QEMUFile *f, void *opaque)
|
|
|
|
{
|
2015-07-02 08:23:04 +02:00
|
|
|
sPAPRMachineState *spapr = opaque;
|
2016-02-09 00:28:58 +01:00
|
|
|
int fd;
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
/* Iteration header */
|
2017-06-12 07:32:34 +02:00
|
|
|
if (!spapr->htab_shift) {
|
|
|
|
qemu_put_be32(f, -1);
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
qemu_put_be32(f, 0);
|
|
|
|
}
|
2013-07-18 21:33:01 +02:00
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
if (!spapr->htab) {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(kvm_enabled());
|
|
|
|
|
2016-02-09 00:28:58 +01:00
|
|
|
fd = get_htab_fd(spapr);
|
|
|
|
if (fd < 0) {
|
|
|
|
return fd;
|
2014-11-17 05:12:28 +01:00
|
|
|
}
|
|
|
|
|
2016-02-09 00:28:58 +01:00
|
|
|
rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1);
|
2013-07-18 21:33:03 +02:00
|
|
|
if (rc < 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
} else {
|
2016-02-11 01:40:44 +01:00
|
|
|
if (spapr->htab_first_pass) {
|
|
|
|
htab_save_first_pass(f, spapr, -1);
|
|
|
|
}
|
2013-07-18 21:33:03 +02:00
|
|
|
htab_save_later_pass(f, spapr, -1);
|
|
|
|
}
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
/* End marker */
|
2017-09-19 16:11:07 +02:00
|
|
|
htab_save_end_marker(f);
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int htab_load(QEMUFile *f, void *opaque, int version_id)
|
|
|
|
{
|
2015-07-02 08:23:04 +02:00
|
|
|
sPAPRMachineState *spapr = opaque;
|
2013-07-18 21:33:01 +02:00
|
|
|
uint32_t section_hdr;
|
2013-07-18 21:33:03 +02:00
|
|
|
int fd = -1;
|
2017-09-15 15:16:20 +02:00
|
|
|
Error *local_err = NULL;
|
2013-07-18 21:33:01 +02:00
|
|
|
|
|
|
|
if (version_id < 1 || version_id > 1) {
|
2016-01-20 02:59:05 +01:00
|
|
|
error_report("htab_load() bad version");
|
2013-07-18 21:33:01 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
section_hdr = qemu_get_be32(f);
|
|
|
|
|
2017-06-12 07:32:34 +02:00
|
|
|
if (section_hdr == -1) {
|
|
|
|
spapr_free_hpt(spapr);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:01 +02:00
|
|
|
if (section_hdr) {
|
2016-02-09 01:21:56 +01:00
|
|
|
/* First section gives the htab size */
|
|
|
|
spapr_reallocate_hpt(spapr, section_hdr, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
2013-07-18 21:33:01 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
if (!spapr->htab) {
|
|
|
|
assert(kvm_enabled());
|
|
|
|
|
2017-09-15 15:16:20 +02:00
|
|
|
fd = kvmppc_get_htab_fd(true, 0, &local_err);
|
2013-07-18 21:33:03 +02:00
|
|
|
if (fd < 0) {
|
2017-09-15 15:16:20 +02:00
|
|
|
error_report_err(local_err);
|
2017-09-15 15:16:10 +02:00
|
|
|
return fd;
|
2013-07-18 21:33:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:01 +02:00
|
|
|
while (true) {
|
|
|
|
uint32_t index;
|
|
|
|
uint16_t n_valid, n_invalid;
|
|
|
|
|
|
|
|
index = qemu_get_be32(f);
|
|
|
|
n_valid = qemu_get_be16(f);
|
|
|
|
n_invalid = qemu_get_be16(f);
|
|
|
|
|
|
|
|
if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
|
|
|
|
/* End of Stream */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
if ((index + n_valid + n_invalid) >
|
2013-07-18 21:33:01 +02:00
|
|
|
(HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
|
|
|
|
/* Bad index in stream */
|
2016-01-20 02:59:05 +01:00
|
|
|
error_report(
|
|
|
|
"htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
|
|
|
|
index, n_valid, n_invalid, spapr->htab_shift);
|
2013-07-18 21:33:01 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
if (spapr->htab) {
|
|
|
|
if (n_valid) {
|
|
|
|
qemu_get_buffer(f, HPTE(spapr->htab, index),
|
|
|
|
HASH_PTE_SIZE_64 * n_valid);
|
|
|
|
}
|
|
|
|
if (n_invalid) {
|
|
|
|
memset(HPTE(spapr->htab, index + n_valid), 0,
|
|
|
|
HASH_PTE_SIZE_64 * n_invalid);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
assert(fd >= 0);
|
|
|
|
|
|
|
|
rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
|
|
|
|
if (rc < 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
2013-07-18 21:33:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:03 +02:00
|
|
|
if (!spapr->htab) {
|
|
|
|
assert(fd >= 0);
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:01 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-28 11:52:25 +02:00
|
|
|
static void htab_save_cleanup(void *opaque)
|
2016-07-21 11:21:34 +02:00
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = opaque;
|
|
|
|
|
|
|
|
close_htab_fd(spapr);
|
|
|
|
}
|
|
|
|
|
2013-07-18 21:33:01 +02:00
|
|
|
static SaveVMHandlers savevm_htab_handlers = {
|
2017-06-28 11:52:24 +02:00
|
|
|
.save_setup = htab_save_setup,
|
2013-07-18 21:33:01 +02:00
|
|
|
.save_live_iterate = htab_save_iterate,
|
2015-11-05 19:10:41 +01:00
|
|
|
.save_live_complete_precopy = htab_save_complete,
|
2017-06-28 11:52:25 +02:00
|
|
|
.save_cleanup = htab_save_cleanup,
|
2013-07-18 21:33:01 +02:00
|
|
|
.load_state = htab_load,
|
|
|
|
};
|
|
|
|
|
2015-03-18 13:30:44 +01:00
|
|
|
static void spapr_boot_set(void *opaque, const char *boot_device,
|
|
|
|
Error **errp)
|
|
|
|
{
|
2017-09-06 20:43:05 +02:00
|
|
|
MachineState *machine = MACHINE(opaque);
|
2015-03-18 13:30:44 +01:00
|
|
|
machine->boot_order = g_strdup(boot_device);
|
|
|
|
}
|
|
|
|
|
2015-08-12 05:16:48 +02:00
|
|
|
static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(spapr);
|
|
|
|
uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
|
2015-08-03 07:35:41 +02:00
|
|
|
uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
|
2015-08-12 05:16:48 +02:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_lmbs; i++) {
|
|
|
|
uint64_t addr;
|
|
|
|
|
2015-08-03 07:35:41 +02:00
|
|
|
addr = i * lmb_size + spapr->hotplug_memory.base;
|
2017-06-08 14:18:34 +02:00
|
|
|
spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
|
|
|
|
addr / lmb_size);
|
2015-08-12 05:16:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If RAM size, maxmem size and individual node mem sizes aren't aligned
|
|
|
|
* to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
|
|
|
|
* since we can't support such unaligned sizes with DRCONF_MEMORY.
|
|
|
|
*/
|
2016-01-25 12:46:47 +01:00
|
|
|
static void spapr_validate_node_memory(MachineState *machine, Error **errp)
|
2015-08-12 05:16:48 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2016-01-25 12:46:47 +01:00
|
|
|
if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
|
|
|
|
error_setg(errp, "Memory size 0x" RAM_ADDR_FMT
|
|
|
|
" is not aligned to %llu MiB",
|
|
|
|
machine->ram_size,
|
|
|
|
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) {
|
|
|
|
error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT
|
|
|
|
" is not aligned to %llu MiB",
|
|
|
|
machine->ram_size,
|
|
|
|
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
|
|
|
return;
|
2015-08-12 05:16:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nb_numa_nodes; i++) {
|
|
|
|
if (numa_info[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
|
2016-01-25 12:46:47 +01:00
|
|
|
error_setg(errp,
|
|
|
|
"Node %d memory size 0x%" PRIx64
|
|
|
|
" is not aligned to %llu MiB",
|
|
|
|
i, numa_info[i].node_mem,
|
|
|
|
SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
|
|
|
return;
|
2015-08-12 05:16:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
/* find cpu slot in machine->possible_cpus by core_id */
|
|
|
|
static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
|
|
|
|
{
|
|
|
|
int index = id / smp_threads;
|
|
|
|
|
|
|
|
if (index >= ms->possible_cpus->len) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (idx) {
|
|
|
|
*idx = index;
|
|
|
|
}
|
|
|
|
return &ms->possible_cpus->cpus[index];
|
|
|
|
}
|
|
|
|
|
2016-11-08 06:33:32 +01:00
|
|
|
static void spapr_init_cpus(sPAPRMachineState *spapr)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(spapr);
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
|
|
|
char *type = spapr_get_cpu_core_type(machine->cpu_model);
|
|
|
|
int smt = kvmppc_smt_threads();
|
2017-02-10 11:18:49 +01:00
|
|
|
const CPUArchIdList *possible_cpus;
|
|
|
|
int boot_cores_nr = smp_cpus / smp_threads;
|
2016-11-08 06:33:32 +01:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!type) {
|
|
|
|
error_report("Unable to find sPAPR CPU Core definition");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
possible_cpus = mc->possible_cpu_arch_ids(machine);
|
2017-02-10 11:20:57 +01:00
|
|
|
if (mc->has_hotpluggable_cpus) {
|
2016-11-08 06:33:32 +01:00
|
|
|
if (smp_cpus % smp_threads) {
|
|
|
|
error_report("smp_cpus (%u) must be multiple of threads (%u)",
|
|
|
|
smp_cpus, smp_threads);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
if (max_cpus % smp_threads) {
|
|
|
|
error_report("max_cpus (%u) must be multiple of threads (%u)",
|
|
|
|
max_cpus, smp_threads);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (max_cpus != smp_cpus) {
|
|
|
|
error_report("This machine version does not support CPU hotplug");
|
|
|
|
exit(1);
|
|
|
|
}
|
2017-02-10 11:18:49 +01:00
|
|
|
boot_cores_nr = possible_cpus->len;
|
2016-11-08 06:33:32 +01:00
|
|
|
}
|
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
for (i = 0; i < possible_cpus->len; i++) {
|
2016-11-08 06:33:32 +01:00
|
|
|
int core_id = i * smp_threads;
|
|
|
|
|
2017-02-10 11:20:57 +01:00
|
|
|
if (mc->has_hotpluggable_cpus) {
|
2017-06-08 14:18:34 +02:00
|
|
|
spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
|
|
|
|
(core_id / smp_threads) * smt);
|
2016-11-08 06:33:32 +01:00
|
|
|
}
|
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
if (i < boot_cores_nr) {
|
2016-11-08 06:33:32 +01:00
|
|
|
Object *core = object_new(type);
|
|
|
|
int nr_threads = smp_threads;
|
|
|
|
|
|
|
|
/* Handle the partially filled core for older machine types */
|
|
|
|
if ((i + 1) * smp_threads >= smp_cpus) {
|
|
|
|
nr_threads = smp_cpus - i * smp_threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
object_property_set_int(core, nr_threads, "nr-threads",
|
|
|
|
&error_fatal);
|
|
|
|
object_property_set_int(core, core_id, CPU_CORE_PROP_CORE_ID,
|
|
|
|
&error_fatal);
|
|
|
|
object_property_set_bool(core, true, "realized", &error_fatal);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
g_free(type);
|
|
|
|
}
|
|
|
|
|
2017-08-18 07:50:22 +02:00
|
|
|
static void spapr_set_vsmt_mode(sPAPRMachineState *spapr, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
|
|
|
bool vsmt_user = !!spapr->vsmt;
|
|
|
|
int kvm_smt = kvmppc_smt_threads();
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!kvm_enabled() && (smp_threads > 1)) {
|
|
|
|
error_setg(&local_err, "TCG cannot support more than 1 thread/core "
|
|
|
|
"on a pseries machine");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!is_power_of_2(smp_threads)) {
|
|
|
|
error_setg(&local_err, "Cannot support %d threads/core on a pseries "
|
|
|
|
"machine because it must be a power of 2", smp_threads);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Detemine the VSMT mode to use: */
|
|
|
|
if (vsmt_user) {
|
|
|
|
if (spapr->vsmt < smp_threads) {
|
|
|
|
error_setg(&local_err, "Cannot support VSMT mode %d"
|
|
|
|
" because it must be >= threads/core (%d)",
|
|
|
|
spapr->vsmt, smp_threads);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* In this case, spapr->vsmt has been set by the command line */
|
|
|
|
} else {
|
|
|
|
/* Choose a VSMT mode that may be higher than necessary but is
|
|
|
|
* likely to be compatible with hosts that don't have VSMT. */
|
|
|
|
spapr->vsmt = MAX(kvm_smt, smp_threads);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* KVM: If necessary, set the SMT mode: */
|
|
|
|
if (kvm_enabled() && (spapr->vsmt != kvm_smt)) {
|
|
|
|
ret = kvmppc_set_smt_threads(spapr->vsmt);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(&local_err,
|
|
|
|
"Failed to set KVM's VSMT mode to %d (errno %d)",
|
|
|
|
spapr->vsmt, ret);
|
|
|
|
if (!vsmt_user) {
|
|
|
|
error_append_hint(&local_err, "On PPC, a VM with %d threads/"
|
|
|
|
"core on a host with %d threads/core requires "
|
|
|
|
" the use of VSMT mode %d.\n",
|
|
|
|
smp_threads, kvm_smt, spapr->vsmt);
|
|
|
|
}
|
|
|
|
kvmppc_hint_smt_possible(&local_err);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* else TCG: nothing to do currently */
|
|
|
|
out:
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
|
2011-04-01 06:15:20 +02:00
|
|
|
/* pSeries LPAR / sPAPR hardware init */
|
2014-05-07 16:42:57 +02:00
|
|
|
static void ppc_spapr_init(MachineState *machine)
|
2011-04-01 06:15:20 +02:00
|
|
|
{
|
2015-07-02 08:23:04 +02:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
2015-08-12 05:16:48 +02:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
|
2014-05-07 16:42:57 +02:00
|
|
|
const char *kernel_filename = machine->kernel_filename;
|
|
|
|
const char *initrd_filename = machine->initrd_filename;
|
2012-08-20 19:08:05 +02:00
|
|
|
PCIHostState *phb;
|
2011-04-01 06:15:20 +02:00
|
|
|
int i;
|
2011-10-03 12:56:38 +02:00
|
|
|
MemoryRegion *sysmem = get_system_memory();
|
|
|
|
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
2014-07-10 17:03:41 +02:00
|
|
|
MemoryRegion *rma_region;
|
|
|
|
void *rma = NULL;
|
2012-10-23 12:30:10 +02:00
|
|
|
hwaddr rma_alloc_size;
|
2017-09-06 20:43:05 +02:00
|
|
|
hwaddr node0_size = spapr_node0_size(machine);
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
long load_limit, fw_size;
|
2011-04-01 06:15:23 +02:00
|
|
|
char *filename;
|
2017-05-12 07:46:11 +02:00
|
|
|
Error *resize_hpt_err = NULL;
|
2011-04-01 06:15:20 +02:00
|
|
|
|
2016-03-04 10:24:28 +01:00
|
|
|
msi_nonbroken = true;
|
2012-08-07 18:10:37 +02:00
|
|
|
|
2011-11-01 17:49:05 +01:00
|
|
|
QLIST_INIT(&spapr->phbs);
|
2017-05-24 09:01:48 +02:00
|
|
|
QTAILQ_INIT(&spapr->pending_dimm_unplugs);
|
2011-11-01 17:49:05 +01:00
|
|
|
|
2017-05-12 07:46:11 +02:00
|
|
|
/* Check HPT resizing availability */
|
|
|
|
kvmppc_check_papr_resize_hpt(&resize_hpt_err);
|
|
|
|
if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) {
|
|
|
|
/*
|
|
|
|
* If the user explicitly requested a mode we should either
|
|
|
|
* supply it, or fail completely (which we do below). But if
|
|
|
|
* it's not set explicitly, we reset our mode to something
|
|
|
|
* that works
|
|
|
|
*/
|
|
|
|
if (resize_hpt_err) {
|
|
|
|
spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
|
|
|
|
error_free(resize_hpt_err);
|
|
|
|
resize_hpt_err = NULL;
|
|
|
|
} else {
|
|
|
|
spapr->resize_hpt = smc->resize_hpt_default;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT);
|
|
|
|
|
|
|
|
if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) {
|
|
|
|
/*
|
|
|
|
* User requested HPT resize, but this host can't supply it. Bail out
|
|
|
|
*/
|
|
|
|
error_report_err(resize_hpt_err);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2011-09-29 23:39:11 +02:00
|
|
|
/* Allocate RMA if necessary */
|
2014-07-10 17:03:41 +02:00
|
|
|
rma_alloc_size = kvmppc_alloc_rma(&rma);
|
2011-09-29 23:39:11 +02:00
|
|
|
|
|
|
|
if (rma_alloc_size == -1) {
|
2015-05-07 07:33:41 +02:00
|
|
|
error_report("Unable to create RMA");
|
2011-09-29 23:39:11 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
2012-09-12 18:57:12 +02:00
|
|
|
|
2013-11-25 04:14:50 +01:00
|
|
|
if (rma_alloc_size && (rma_alloc_size < node0_size)) {
|
2012-09-12 18:57:12 +02:00
|
|
|
spapr->rma_size = rma_alloc_size;
|
2011-09-29 23:39:11 +02:00
|
|
|
} else {
|
2013-11-25 04:14:50 +01:00
|
|
|
spapr->rma_size = node0_size;
|
2012-09-12 18:57:12 +02:00
|
|
|
|
|
|
|
/* With KVM, we don't actually know whether KVM supports an
|
|
|
|
* unbounded RMA (PR KVM) or is limited by the hash table size
|
|
|
|
* (HV KVM using VRMA), so we always assume the latter
|
|
|
|
*
|
|
|
|
* In that case, we also limit the initial allocations for RTAS
|
|
|
|
* etc... to 256M since we have no way to know what the VRMA size
|
|
|
|
* is going to be as it depends on the size of the hash table
|
|
|
|
* isn't determined yet.
|
|
|
|
*/
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
spapr->vrma_adjust = 1;
|
|
|
|
spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
|
|
|
|
}
|
2016-07-04 23:37:08 +02:00
|
|
|
|
|
|
|
/* Actually we don't support unbounded RMA anymore since we
|
|
|
|
* added proper emulation of HV mode. The max we can get is
|
|
|
|
* 16G which also happens to be what we configure for PAPR
|
|
|
|
* mode so make sure we don't do anything bigger than that
|
|
|
|
*/
|
|
|
|
spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
|
2011-09-29 23:39:11 +02:00
|
|
|
}
|
|
|
|
|
2013-11-25 04:14:50 +01:00
|
|
|
if (spapr->rma_size > node0_size) {
|
2016-01-20 02:58:55 +01:00
|
|
|
error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")",
|
|
|
|
spapr->rma_size);
|
2013-11-25 04:14:50 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
/* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
|
|
|
|
load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
|
2011-04-01 06:15:20 +02:00
|
|
|
|
2013-03-13 16:53:28 +01:00
|
|
|
/* Set up Interrupt Controller before we create the VCPUs */
|
2017-04-05 08:37:44 +02:00
|
|
|
xics_system_init(machine, XICS_IRQS_SPAPR, &error_fatal);
|
2013-03-13 16:53:28 +01:00
|
|
|
|
2016-10-25 06:47:28 +02:00
|
|
|
/* Set up containers for ibm,client-set-architecture negotiated options */
|
|
|
|
spapr->ov5 = spapr_ovec_new();
|
|
|
|
spapr->ov5_cas = spapr_ovec_new();
|
|
|
|
|
2015-08-12 05:16:48 +02:00
|
|
|
if (smc->dr_lmb_enabled) {
|
2016-10-25 06:47:28 +02:00
|
|
|
spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
|
2016-01-25 12:46:47 +01:00
|
|
|
spapr_validate_node_memory(machine, &error_fatal);
|
2015-08-12 05:16:48 +02:00
|
|
|
}
|
|
|
|
|
2016-10-25 06:47:30 +02:00
|
|
|
spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
|
2017-05-02 08:37:18 +02:00
|
|
|
if (!kvm_enabled() || kvmppc_has_cap_mmu_radix()) {
|
|
|
|
/* KVM and TCG always allow GTSE with radix... */
|
2017-03-23 04:46:00 +01:00
|
|
|
spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
|
|
|
|
}
|
|
|
|
/* ... but not with hash (currently). */
|
2016-10-25 06:47:30 +02:00
|
|
|
|
2016-10-27 04:20:26 +02:00
|
|
|
/* advertise support for dedicated HP event source to guests */
|
|
|
|
if (spapr->use_hotplug_event_source) {
|
|
|
|
spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
|
|
|
|
}
|
|
|
|
|
2017-07-12 09:56:06 +02:00
|
|
|
/* advertise support for HPT resizing */
|
|
|
|
if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) {
|
|
|
|
spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE);
|
|
|
|
}
|
|
|
|
|
2011-04-01 06:15:20 +02:00
|
|
|
/* init CPUs */
|
2015-07-02 08:23:19 +02:00
|
|
|
if (machine->cpu_model == NULL) {
|
2016-10-05 09:44:51 +02:00
|
|
|
machine->cpu_model = kvm_enabled() ? "host" : smc->tcg_default_cpu;
|
2011-04-01 06:15:20 +02:00
|
|
|
}
|
2016-06-10 02:59:03 +02:00
|
|
|
|
2017-06-11 14:33:59 +02:00
|
|
|
spapr_cpu_parse_features(spapr);
|
2016-08-10 21:08:01 +02:00
|
|
|
|
2017-08-18 07:50:22 +02:00
|
|
|
spapr_set_vsmt_mode(spapr, &error_fatal);
|
|
|
|
|
2016-11-08 06:33:32 +01:00
|
|
|
spapr_init_cpus(spapr);
|
2011-04-01 06:15:20 +02:00
|
|
|
|
2015-05-07 07:33:59 +02:00
|
|
|
if (kvm_enabled()) {
|
|
|
|
/* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
|
|
|
|
kvmppc_enable_logical_ci_hcalls();
|
2015-09-08 03:25:13 +02:00
|
|
|
kvmppc_enable_set_mode_hcall();
|
2016-08-30 03:02:47 +02:00
|
|
|
|
|
|
|
/* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
|
|
|
|
kvmppc_enable_clear_ref_mod_hcalls();
|
2015-05-07 07:33:59 +02:00
|
|
|
}
|
|
|
|
|
2011-04-01 06:15:20 +02:00
|
|
|
/* allocate RAM */
|
2014-07-10 17:03:42 +02:00
|
|
|
memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
|
2015-07-02 08:23:05 +02:00
|
|
|
machine->ram_size);
|
2014-07-10 17:03:42 +02:00
|
|
|
memory_region_add_subregion(sysmem, 0, ram);
|
2011-04-01 06:15:20 +02:00
|
|
|
|
2014-07-10 17:03:41 +02:00
|
|
|
if (rma_alloc_size && rma) {
|
|
|
|
rma_region = g_new(MemoryRegion, 1);
|
|
|
|
memory_region_init_ram_ptr(rma_region, NULL, "ppc_spapr.rma",
|
|
|
|
rma_alloc_size, rma);
|
|
|
|
vmstate_register_ram_global(rma_region);
|
|
|
|
memory_region_add_subregion(sysmem, 0, rma_region);
|
|
|
|
}
|
|
|
|
|
2015-06-29 10:44:27 +02:00
|
|
|
/* initialize hotplug memory address space */
|
|
|
|
if (machine->ram_size < machine->maxram_size) {
|
|
|
|
ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size;
|
2016-06-02 16:07:37 +02:00
|
|
|
/*
|
|
|
|
* Limit the number of hotpluggable memory slots to half the number
|
|
|
|
* slots that KVM supports, leaving the other half for PCI and other
|
|
|
|
* devices. However ensure that number of slots doesn't drop below 32.
|
|
|
|
*/
|
|
|
|
int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 :
|
|
|
|
SPAPR_MAX_RAM_SLOTS;
|
2015-06-29 10:44:27 +02:00
|
|
|
|
2016-06-02 16:07:37 +02:00
|
|
|
if (max_memslots < SPAPR_MAX_RAM_SLOTS) {
|
|
|
|
max_memslots = SPAPR_MAX_RAM_SLOTS;
|
|
|
|
}
|
|
|
|
if (machine->ram_slots > max_memslots) {
|
2016-01-20 02:58:55 +01:00
|
|
|
error_report("Specified number of memory slots %"
|
|
|
|
PRIu64" exceeds max supported %d",
|
2016-06-02 16:07:37 +02:00
|
|
|
machine->ram_slots, max_memslots);
|
2016-01-20 02:58:55 +01:00
|
|
|
exit(1);
|
2015-06-29 10:44:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
spapr->hotplug_memory.base = ROUND_UP(machine->ram_size,
|
|
|
|
SPAPR_HOTPLUG_MEM_ALIGN);
|
|
|
|
memory_region_init(&spapr->hotplug_memory.mr, OBJECT(spapr),
|
|
|
|
"hotplug-memory", hotplug_mem_size);
|
|
|
|
memory_region_add_subregion(sysmem, spapr->hotplug_memory.base,
|
|
|
|
&spapr->hotplug_memory.mr);
|
|
|
|
}
|
|
|
|
|
2015-08-12 05:16:48 +02:00
|
|
|
if (smc->dr_lmb_enabled) {
|
|
|
|
spapr_create_lmb_dr_connectors(spapr);
|
|
|
|
}
|
|
|
|
|
2011-04-01 06:15:23 +02:00
|
|
|
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
|
2015-03-14 16:29:09 +01:00
|
|
|
if (!filename) {
|
2015-05-07 07:33:41 +02:00
|
|
|
error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
|
2015-03-14 16:29:09 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
spapr->rtas_size = get_image_size(filename);
|
2016-04-25 17:36:06 +02:00
|
|
|
if (spapr->rtas_size < 0) {
|
|
|
|
error_report("Could not get size of LPAR rtas '%s'", filename);
|
|
|
|
exit(1);
|
|
|
|
}
|
spapr: Locate RTAS and device-tree based on real RMA
We currently calculate the final RTAS and FDT location based on
the early estimate of the RMA size, cropped to 256M on KVM since
we only know the real RMA size at reset time which happens much
later in the boot process.
This means the FDT and RTAS end up right below 256M while they
could be much higher, using precious RMA space and limiting
what the OS bootloader can put there which has proved to be
a problem with some OSes (such as when using very large initrd's)
Fortunately, we do the actual copy of the device-tree into guest
memory much later, during reset, late enough to be able to do it
using the final RMA value, we just need to move the calculation
to the right place.
However, RTAS is still loaded too early, so we change the code to
load the tiny blob into qemu memory early on, and then copy it into
guest memory at reset time. It's small enough that the memory usage
doesn't matter.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[aik: fixed errors from checkpatch.pl, defined RTAS_MAX_ADDR]
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[agraf: fix compilation on 32bit hosts]
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-07-21 05:02:04 +02:00
|
|
|
spapr->rtas_blob = g_malloc(spapr->rtas_size);
|
|
|
|
if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
|
2015-05-07 07:33:41 +02:00
|
|
|
error_report("Could not load LPAR rtas '%s'", filename);
|
2011-04-01 06:15:23 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
2012-01-11 20:46:28 +01:00
|
|
|
if (spapr->rtas_size > RTAS_MAX_SIZE) {
|
2015-05-07 07:33:41 +02:00
|
|
|
error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
|
|
|
|
(size_t)spapr->rtas_size, RTAS_MAX_SIZE);
|
2012-01-11 20:46:28 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
2011-08-21 05:09:37 +02:00
|
|
|
g_free(filename);
|
2011-04-01 06:15:23 +02:00
|
|
|
|
2016-10-27 04:20:26 +02:00
|
|
|
/* Set up RTAS event infrastructure */
|
2012-10-08 20:17:39 +02:00
|
|
|
spapr_events_init(spapr);
|
|
|
|
|
2015-02-06 04:55:47 +01:00
|
|
|
/* Set up the RTC RTAS interfaces */
|
2015-02-06 04:55:51 +01:00
|
|
|
spapr_rtc_create(spapr);
|
2015-02-06 04:55:47 +01:00
|
|
|
|
Implement the PAPR (pSeries) virtualized interrupt controller (xics)
PAPR defines an interrupt control architecture which is logically divided
into ICS (Interrupt Control Presentation, each unit is responsible for
presenting interrupts to a particular "interrupt server", i.e. CPU) and
ICS (Interrupt Control Source, each unit responsible for one or more
hardware interrupts as numbered globally across the system). All PAPR
virtual IO devices expect to deliver interrupts via this mechanism. In
Linux, this interrupt controller system is handled by the "xics" driver.
On pSeries systems, access to the interrupt controller is virtualized via
hypercalls and RTAS methods. However, the virtualized interface is very
similar to the underlying interrupt controller hardware, and similar PICs
exist un-virtualized in some other systems.
This patch implements both the ICP and ICS sides of the PAPR interrupt
controller. For now, only the hypercall virtualized interface is provided,
however it would be relatively straightforward to graft an emulated
register interface onto the underlying interrupt logic if we want to add
a machine with a hardware ICS/ICP system in the future.
There are some limitations in this implementation: it is assumed for now
that only one instance of the ICS exists, although a full xics system can
have several, each responsible for a different group of hardware irqs.
ICP/ICS can handle both level-sensitve (LSI) and message signalled (MSI)
interrupt inputs. For now, this implementation supports only MSI
interrupts, since that is used by PAPR virtual IO devices.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-01 06:15:25 +02:00
|
|
|
/* Set up VIO bus */
|
2011-04-01 06:15:21 +02:00
|
|
|
spapr->vio_bus = spapr_vio_bus_init();
|
|
|
|
|
2011-05-26 11:52:44 +02:00
|
|
|
for (i = 0; i < MAX_SERIAL_PORTS; i++) {
|
2011-04-01 06:15:21 +02:00
|
|
|
if (serial_hds[i]) {
|
2012-04-25 19:55:41 +02:00
|
|
|
spapr_vty_create(spapr->vio_bus, serial_hds[i]);
|
2011-04-01 06:15:21 +02:00
|
|
|
}
|
|
|
|
}
|
2011-04-01 06:15:20 +02:00
|
|
|
|
2012-11-12 17:46:57 +01:00
|
|
|
/* We always have at least the nvram device on VIO */
|
|
|
|
spapr_create_nvram(spapr);
|
|
|
|
|
2011-10-30 18:16:46 +01:00
|
|
|
/* Set up PCI */
|
2012-08-07 18:10:33 +02:00
|
|
|
spapr_pci_rtas_init();
|
|
|
|
|
2013-03-13 16:53:25 +01:00
|
|
|
phb = spapr_create_phb(spapr, 0);
|
2011-10-30 18:16:46 +01:00
|
|
|
|
2011-05-26 11:52:44 +02:00
|
|
|
for (i = 0; i < nb_nics; i++) {
|
2011-04-01 06:15:29 +02:00
|
|
|
NICInfo *nd = &nd_table[i];
|
|
|
|
|
|
|
|
if (!nd->model) {
|
2011-08-21 05:09:37 +02:00
|
|
|
nd->model = g_strdup("ibmveth");
|
2011-04-01 06:15:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(nd->model, "ibmveth") == 0) {
|
2012-04-25 19:55:41 +02:00
|
|
|
spapr_vlan_create(spapr->vio_bus, nd);
|
2011-04-01 06:15:29 +02:00
|
|
|
} else {
|
2013-06-06 10:48:51 +02:00
|
|
|
pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
|
2011-04-01 06:15:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-01 06:15:31 +02:00
|
|
|
for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
|
2012-04-25 19:55:41 +02:00
|
|
|
spapr_vscsi_create(spapr->vio_bus);
|
2011-04-01 06:15:31 +02:00
|
|
|
}
|
|
|
|
|
2012-08-06 18:42:00 +02:00
|
|
|
/* Graphics */
|
2016-01-20 02:58:39 +01:00
|
|
|
if (spapr_vga_init(phb->bus, &error_fatal)) {
|
2012-08-14 13:22:13 +02:00
|
|
|
spapr->has_graphics = true;
|
2015-03-23 18:05:28 +01:00
|
|
|
machine->usb |= defaults_enabled() && !machine->usb_disabled;
|
2012-08-06 18:42:00 +02:00
|
|
|
}
|
|
|
|
|
2015-01-06 14:29:16 +01:00
|
|
|
if (machine->usb) {
|
2015-12-09 13:34:13 +01:00
|
|
|
if (smc->use_ohci_by_default) {
|
|
|
|
pci_create_simple(phb->bus, -1, "pci-ohci");
|
|
|
|
} else {
|
|
|
|
pci_create_simple(phb->bus, -1, "nec-usb-xhci");
|
|
|
|
}
|
2015-02-04 13:28:14 +01:00
|
|
|
|
2012-08-16 04:03:56 +02:00
|
|
|
if (spapr->has_graphics) {
|
2015-02-04 13:28:14 +01:00
|
|
|
USBBus *usb_bus = usb_bus_find(-1);
|
|
|
|
|
|
|
|
usb_create_simple(usb_bus, "usb-kbd");
|
|
|
|
usb_create_simple(usb_bus, "usb-mouse");
|
2012-08-16 04:03:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-12 18:57:12 +02:00
|
|
|
if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
|
2016-01-20 02:58:55 +01:00
|
|
|
error_report(
|
|
|
|
"pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
|
|
|
|
MIN_RMA_SLOF);
|
2012-01-11 20:46:28 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2011-04-01 06:15:20 +02:00
|
|
|
if (kernel_filename) {
|
|
|
|
uint64_t lowaddr = 0;
|
|
|
|
|
2016-10-20 06:31:45 +02:00
|
|
|
spapr->kernel_size = load_elf(kernel_filename, translate_kernel_address,
|
|
|
|
NULL, NULL, &lowaddr, NULL, 1,
|
|
|
|
PPC_ELF_MACHINE, 0, 0);
|
|
|
|
if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
|
|
|
|
spapr->kernel_size = load_elf(kernel_filename,
|
|
|
|
translate_kernel_address, NULL, NULL,
|
|
|
|
&lowaddr, NULL, 0, PPC_ELF_MACHINE,
|
|
|
|
0, 0);
|
|
|
|
spapr->kernel_le = spapr->kernel_size > 0;
|
2013-09-25 09:40:15 +02:00
|
|
|
}
|
2016-10-20 06:31:45 +02:00
|
|
|
if (spapr->kernel_size < 0) {
|
|
|
|
error_report("error loading %s: %s", kernel_filename,
|
|
|
|
load_elf_strerror(spapr->kernel_size));
|
2011-04-01 06:15:20 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* load initrd */
|
|
|
|
if (initrd_filename) {
|
2012-01-11 20:46:28 +01:00
|
|
|
/* Try to locate the initrd in the gap between the kernel
|
|
|
|
* and the firmware. Add a bit of space just in case
|
|
|
|
*/
|
2016-10-20 06:31:45 +02:00
|
|
|
spapr->initrd_base = (KERNEL_LOAD_ADDR + spapr->kernel_size
|
|
|
|
+ 0x1ffff) & ~0xffff;
|
|
|
|
spapr->initrd_size = load_image_targphys(initrd_filename,
|
|
|
|
spapr->initrd_base,
|
|
|
|
load_limit
|
|
|
|
- spapr->initrd_base);
|
|
|
|
if (spapr->initrd_size < 0) {
|
2016-01-20 02:58:55 +01:00
|
|
|
error_report("could not load initial ram disk '%s'",
|
|
|
|
initrd_filename);
|
2011-04-01 06:15:20 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
2012-01-11 20:46:28 +01:00
|
|
|
}
|
Delay creation of pseries device tree until reset
At present, the 'pseries' machine creates a flattened device tree in the
machine->init function to pass to either the guest kernel or to firmware.
However, the machine->init function runs before processing of -device
command line options, which means that the device tree so created will
be (incorrectly) missing devices specified that way.
Supplying a correct device tree is, in any case, part of the required
platform entry conditions. Therefore, this patch moves the creation and
loading of the device tree from machine->init to a reset callback. The
setup of entry point address and initial register state moves with it,
which leads to a slight cleanup.
This is not, alas, quite enough to make a fully working reset for pseries.
For that we would need to reload the firmware images, which on this
machine are loaded into RAM. It's a step in the right direction, though.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
2011-04-05 07:12:10 +02:00
|
|
|
|
2013-07-03 21:26:50 +02:00
|
|
|
if (bios_name == NULL) {
|
|
|
|
bios_name = FW_FILE_NAME;
|
|
|
|
}
|
|
|
|
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
|
2015-03-14 16:29:09 +01:00
|
|
|
if (!filename) {
|
2015-05-07 07:33:40 +02:00
|
|
|
error_report("Could not find LPAR firmware '%s'", bios_name);
|
2015-03-14 16:29:09 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
2012-01-11 20:46:28 +01:00
|
|
|
fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
|
2015-05-07 07:33:40 +02:00
|
|
|
if (fw_size <= 0) {
|
|
|
|
error_report("Could not load LPAR firmware '%s'", filename);
|
2012-01-11 20:46:28 +01:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
g_free(filename);
|
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
/* FIXME: Should register things through the MachineState's qdev
|
|
|
|
* interface, this is a legacy from the sPAPREnvironment structure
|
|
|
|
* which predated MachineState but had a similar function */
|
2013-07-18 21:33:01 +02:00
|
|
|
vmstate_register(NULL, 0, &vmstate_spapr, spapr);
|
|
|
|
register_savevm_live(NULL, "spapr/htab", -1, 1,
|
|
|
|
&savevm_htab_handlers, spapr);
|
|
|
|
|
2015-03-18 13:30:44 +01:00
|
|
|
qemu_register_boot_set(spapr_boot_set, spapr);
|
2017-01-27 13:24:58 +01:00
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
2017-03-27 07:22:19 +02:00
|
|
|
/* to stop and start vmclock */
|
2017-01-27 13:24:58 +01:00
|
|
|
qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change,
|
|
|
|
&spapr->tb);
|
2017-03-27 07:22:19 +02:00
|
|
|
|
|
|
|
kvmppc_spapr_enable_inkernel_multitce();
|
2017-01-27 13:24:58 +01:00
|
|
|
}
|
2011-04-01 06:15:20 +02:00
|
|
|
}
|
|
|
|
|
2013-12-23 16:40:40 +01:00
|
|
|
static int spapr_kvm_type(const char *vm_type)
|
|
|
|
{
|
|
|
|
if (!vm_type) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(vm_type, "HV")) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(vm_type, "PR")) {
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
error_report("Unknown kvm-type specified '%s'", vm_type);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2014-03-17 03:40:27 +01:00
|
|
|
/*
|
2015-01-19 04:45:12 +01:00
|
|
|
* Implementation of an interface to adjust firmware path
|
2014-03-17 03:40:27 +01:00
|
|
|
* for the bootindex property handling.
|
|
|
|
*/
|
|
|
|
static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
|
|
|
|
DeviceState *dev)
|
|
|
|
{
|
|
|
|
#define CAST(type, obj, name) \
|
|
|
|
((type *)object_dynamic_cast(OBJECT(obj), (name)))
|
|
|
|
SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE);
|
|
|
|
sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
|
2017-06-05 17:55:18 +02:00
|
|
|
VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
|
2014-03-17 03:40:27 +01:00
|
|
|
|
|
|
|
if (d) {
|
|
|
|
void *spapr = CAST(void, bus->parent, "spapr-vscsi");
|
|
|
|
VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
|
|
|
|
USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
|
|
|
|
|
|
|
|
if (spapr) {
|
|
|
|
/*
|
|
|
|
* Replace "channel@0/disk@0,0" with "disk@8000000000000000":
|
|
|
|
* We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun
|
|
|
|
* in the top 16 bits of the 64-bit LUN
|
|
|
|
*/
|
|
|
|
unsigned id = 0x8000 | (d->id << 8) | d->lun;
|
|
|
|
return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
|
|
|
|
(uint64_t)id << 48);
|
|
|
|
} else if (virtio) {
|
|
|
|
/*
|
|
|
|
* We use SRP luns of the form 01000000 | (target << 8) | lun
|
|
|
|
* in the top 32 bits of the 64-bit LUN
|
|
|
|
* Note: the quote above is from SLOF and it is wrong,
|
|
|
|
* the actual binding is:
|
|
|
|
* swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
|
|
|
|
*/
|
|
|
|
unsigned id = 0x1000000 | (d->id << 16) | d->lun;
|
|
|
|
return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
|
|
|
|
(uint64_t)id << 32);
|
|
|
|
} else if (usb) {
|
|
|
|
/*
|
|
|
|
* We use SRP luns of the form 01000000 | (usb-port << 16) | lun
|
|
|
|
* in the top 32 bits of the 64-bit LUN
|
|
|
|
*/
|
|
|
|
unsigned usb_port = atoi(usb->port->path);
|
|
|
|
unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
|
|
|
|
return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
|
|
|
|
(uint64_t)id << 32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
hw/ppc/spapr: Fix boot path of usb-host storage devices
When passing through an USB storage device to a pseries guest, it
is currently not possible to automatically boot from the device
if the "bootindex" property has been specified, too (e.g. when using
"-device nec-usb-xhci -device usb-host,hostbus=1,hostaddr=2,bootindex=0"
at the command line). The problem is that QEMU builds a device tree path
like "/pci@800000020000000/usb@0/usb-host@1" and passes it to SLOF
in the /chosen/qemu,boot-list property. SLOF, however, probes the
USB device, recognizes that it is a storage device and thus changes
its name to "storage", and additionally adds a child node for the
SCSI LUN, so the correct boot path in SLOF is something like
"/pci@800000020000000/usb@0/storage@1/disk@101000000000000" instead.
So when we detect an USB mass storage device with SCSI interface,
we've got to adjust the firmware boot-device path properly that
SLOF can automatically boot from the device.
Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1354177
Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-12-14 22:44:17 +01:00
|
|
|
/*
|
|
|
|
* SLOF probes the USB devices, and if it recognizes that the device is a
|
|
|
|
* storage device, it changes its name to "storage" instead of "usb-host",
|
|
|
|
* and additionally adds a child node for the SCSI LUN, so the correct
|
|
|
|
* boot path in SLOF is something like .../storage@1/disk@xxx" instead.
|
|
|
|
*/
|
|
|
|
if (strcmp("usb-host", qdev_fw_name(dev)) == 0) {
|
|
|
|
USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE);
|
|
|
|
if (usb_host_dev_is_scsi_storage(usbdev)) {
|
|
|
|
return g_strdup_printf("storage@%s/disk", usbdev->port->path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-17 03:40:27 +01:00
|
|
|
if (phb) {
|
|
|
|
/* Replace "pci" with "pci@800000020000000" */
|
|
|
|
return g_strdup_printf("pci@%"PRIX64, phb->buid);
|
|
|
|
}
|
|
|
|
|
2017-06-05 17:55:18 +02:00
|
|
|
if (vsc) {
|
|
|
|
/* Same logic as virtio above */
|
|
|
|
unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun;
|
|
|
|
return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32);
|
|
|
|
}
|
|
|
|
|
2017-06-07 10:20:27 +02:00
|
|
|
if (g_str_equal("pci-bridge", qdev_fw_name(dev))) {
|
|
|
|
/* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
|
|
|
|
PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
|
|
|
|
return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn));
|
|
|
|
}
|
|
|
|
|
2014-03-17 03:40:27 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-05-30 23:24:32 +02:00
|
|
|
static char *spapr_get_kvm_type(Object *obj, Error **errp)
|
|
|
|
{
|
2015-07-02 08:23:04 +02:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
2014-05-30 23:24:32 +02:00
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
return g_strdup(spapr->kvm_type);
|
2014-05-30 23:24:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
|
|
|
|
{
|
2015-07-02 08:23:04 +02:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
2014-05-30 23:24:32 +02:00
|
|
|
|
2015-07-02 08:23:04 +02:00
|
|
|
g_free(spapr->kvm_type);
|
|
|
|
spapr->kvm_type = g_strdup(value);
|
2014-05-30 23:24:32 +02:00
|
|
|
}
|
|
|
|
|
spapr: add hotplug interrupt machine options
This adds machine options of the form:
-machine pseries,modern-hotplug-events=true
-machine pseries,modern-hotplug-events=false
If false, QEMU will force the use of "legacy" style hotplug events,
which are surfaced through EPOW events instead of a dedicated
hot plug event source, and lack certain features necessary, mainly,
for memory unplug support.
If true, QEMU will enable support for "modern" dedicated hot plug
event source. Note that we will still default to "legacy" style unless
the guest advertises support for the "modern" hotplug events via
ibm,client-architecture-support hcall during early boot.
For pseries-2.7 and earlier we default to false, for newer machine
types we default to true.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-27 04:20:27 +02:00
|
|
|
static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
|
|
|
|
|
|
|
return spapr->use_hotplug_event_source;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_set_modern_hotplug_events(Object *obj, bool value,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
|
|
|
|
|
|
|
spapr->use_hotplug_event_source = value;
|
|
|
|
}
|
|
|
|
|
2017-05-12 07:46:11 +02:00
|
|
|
static char *spapr_get_resize_hpt(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
|
|
|
|
|
|
|
switch (spapr->resize_hpt) {
|
|
|
|
case SPAPR_RESIZE_HPT_DEFAULT:
|
|
|
|
return g_strdup("default");
|
|
|
|
case SPAPR_RESIZE_HPT_DISABLED:
|
|
|
|
return g_strdup("disabled");
|
|
|
|
case SPAPR_RESIZE_HPT_ENABLED:
|
|
|
|
return g_strdup("enabled");
|
|
|
|
case SPAPR_RESIZE_HPT_REQUIRED:
|
|
|
|
return g_strdup("required");
|
|
|
|
}
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
|
|
|
|
|
|
|
if (strcmp(value, "default") == 0) {
|
|
|
|
spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT;
|
|
|
|
} else if (strcmp(value, "disabled") == 0) {
|
|
|
|
spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED;
|
|
|
|
} else if (strcmp(value, "enabled") == 0) {
|
|
|
|
spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED;
|
|
|
|
} else if (strcmp(value, "required") == 0) {
|
|
|
|
spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED;
|
|
|
|
} else {
|
|
|
|
error_setg(errp, "Bad value for \"resize-hpt\" property");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-18 07:50:22 +02:00
|
|
|
static void spapr_get_vsmt(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
|
|
|
{
|
|
|
|
visit_type_uint32(v, name, (uint32_t *)opaque, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name,
|
|
|
|
void *opaque, Error **errp)
|
|
|
|
{
|
|
|
|
visit_type_uint32(v, name, (uint32_t *)opaque, errp);
|
|
|
|
}
|
|
|
|
|
2014-05-30 23:24:32 +02:00
|
|
|
static void spapr_machine_initfn(Object *obj)
|
|
|
|
{
|
2016-02-09 00:28:58 +01:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
|
|
|
|
|
|
|
spapr->htab_fd = -1;
|
spapr: add hotplug interrupt machine options
This adds machine options of the form:
-machine pseries,modern-hotplug-events=true
-machine pseries,modern-hotplug-events=false
If false, QEMU will force the use of "legacy" style hotplug events,
which are surfaced through EPOW events instead of a dedicated
hot plug event source, and lack certain features necessary, mainly,
for memory unplug support.
If true, QEMU will enable support for "modern" dedicated hot plug
event source. Note that we will still default to "legacy" style unless
the guest advertises support for the "modern" hotplug events via
ibm,client-architecture-support hcall during early boot.
For pseries-2.7 and earlier we default to false, for newer machine
types we default to true.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-27 04:20:27 +02:00
|
|
|
spapr->use_hotplug_event_source = true;
|
2014-05-30 23:24:32 +02:00
|
|
|
object_property_add_str(obj, "kvm-type",
|
|
|
|
spapr_get_kvm_type, spapr_set_kvm_type, NULL);
|
2014-12-16 17:58:05 +01:00
|
|
|
object_property_set_description(obj, "kvm-type",
|
|
|
|
"Specifies the KVM virtualization mode (HV, PR)",
|
|
|
|
NULL);
|
spapr: add hotplug interrupt machine options
This adds machine options of the form:
-machine pseries,modern-hotplug-events=true
-machine pseries,modern-hotplug-events=false
If false, QEMU will force the use of "legacy" style hotplug events,
which are surfaced through EPOW events instead of a dedicated
hot plug event source, and lack certain features necessary, mainly,
for memory unplug support.
If true, QEMU will enable support for "modern" dedicated hot plug
event source. Note that we will still default to "legacy" style unless
the guest advertises support for the "modern" hotplug events via
ibm,client-architecture-support hcall during early boot.
For pseries-2.7 and earlier we default to false, for newer machine
types we default to true.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-27 04:20:27 +02:00
|
|
|
object_property_add_bool(obj, "modern-hotplug-events",
|
|
|
|
spapr_get_modern_hotplug_events,
|
|
|
|
spapr_set_modern_hotplug_events,
|
|
|
|
NULL);
|
|
|
|
object_property_set_description(obj, "modern-hotplug-events",
|
|
|
|
"Use dedicated hotplug event mechanism in"
|
|
|
|
" place of standard EPOW events when possible"
|
|
|
|
" (required for memory hot-unplug support)",
|
|
|
|
NULL);
|
2017-06-11 14:33:59 +02:00
|
|
|
|
|
|
|
ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
|
|
|
|
"Maximum permitted CPU compatibility mode",
|
|
|
|
&error_fatal);
|
2017-05-12 07:46:11 +02:00
|
|
|
|
|
|
|
object_property_add_str(obj, "resize-hpt",
|
|
|
|
spapr_get_resize_hpt, spapr_set_resize_hpt, NULL);
|
|
|
|
object_property_set_description(obj, "resize-hpt",
|
|
|
|
"Resizing of the Hash Page Table (enabled, disabled, required)",
|
|
|
|
NULL);
|
2017-08-18 07:50:22 +02:00
|
|
|
object_property_add(obj, "vsmt", "uint32", spapr_get_vsmt,
|
|
|
|
spapr_set_vsmt, NULL, &spapr->vsmt, &error_abort);
|
|
|
|
object_property_set_description(obj, "vsmt",
|
|
|
|
"Virtual SMT: KVM behaves as if this were"
|
|
|
|
" the host's SMT mode", &error_abort);
|
2014-05-30 23:24:32 +02:00
|
|
|
}
|
|
|
|
|
2015-12-28 07:38:26 +01:00
|
|
|
static void spapr_machine_finalizefn(Object *obj)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
|
|
|
|
|
|
|
g_free(spapr->kvm_type);
|
|
|
|
}
|
|
|
|
|
2016-12-05 06:50:21 +01:00
|
|
|
void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg)
|
2014-08-20 14:16:36 +02:00
|
|
|
{
|
|
|
|
cpu_synchronize_state(cs);
|
|
|
|
ppc_cpu_do_system_reset(cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
|
|
|
|
{
|
|
|
|
CPUState *cs;
|
|
|
|
|
|
|
|
CPU_FOREACH(cs) {
|
2016-12-05 06:50:21 +01:00
|
|
|
async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
|
2014-08-20 14:16:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-27 04:20:29 +02:00
|
|
|
static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
|
|
|
|
uint32_t node, bool dedicated_hp_event_source,
|
|
|
|
Error **errp)
|
2015-09-01 03:22:35 +02:00
|
|
|
{
|
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
int i, fdt_offset, fdt_size;
|
|
|
|
void *fdt;
|
2016-10-27 04:20:29 +02:00
|
|
|
uint64_t addr = addr_start;
|
spapr: Treat devices added before inbound migration as coldplugged
When migrating a guest which has already had devices hotplugged,
libvirt typically starts the destination qemu with -incoming defer,
adds those hotplugged devices with qmp, then initiates the incoming
migration.
This causes problems for the management of spapr DRC state. Because
the device is treated as hotplugged, it goes into a DRC state for a
device immediately after it's plugged, but before the guest has
acknowledged its presence. However, chances are the guest on the
source machine *has* acknowledged the device's presence and configured
it.
If the source has fully configured the device, then DRC state won't be
sent in the migration stream: for maximum migration compatibility with
earlier versions we don't migrate DRCs in coldplug-equivalent state.
That means that the DRC effectively changes state over the migrate,
causing problems later on.
In addition, logging hotplug events for these devices isn't what we
want because a) those events should already have been issued on the
source host and b) the event queue should get wiped out by the
incoming state anyway.
In short, what we really want is to treat devices added before an
incoming migration as if they were coldplugged.
To do this, we first add a spapr_drc_hotplugged() helper which
determines if the device is hotplugged in the sense relevant for DRC
state management. We only send hotplug events when this is true.
Second, when we add a device which isn't hotplugged in this sense, we
force a reset of the DRC state - this ensures the DRC is in a
coldplug-equivalent state (there isn't usually a system reset between
these device adds and the incoming migration).
This is based on an earlier patch by Laurent Vivier, cleaned up and
extended.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
Tested-by: Daniel Barboza <danielhb@linux.vnet.ibm.com>
2017-06-09 13:08:10 +02:00
|
|
|
bool hotplugged = spapr_drc_hotplugged(dev);
|
2017-07-03 16:54:36 +02:00
|
|
|
Error *local_err = NULL;
|
2015-09-01 03:22:35 +02:00
|
|
|
|
|
|
|
for (i = 0; i < nr_lmbs; i++) {
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
|
|
|
|
addr / SPAPR_MEMORY_BLOCK_SIZE);
|
2015-09-01 03:22:35 +02:00
|
|
|
g_assert(drc);
|
|
|
|
|
|
|
|
fdt = create_device_tree(&fdt_size);
|
|
|
|
fdt_offset = spapr_populate_memory_node(fdt, node, addr,
|
|
|
|
SPAPR_MEMORY_BLOCK_SIZE);
|
|
|
|
|
2017-07-03 16:54:36 +02:00
|
|
|
spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
while (addr > addr_start) {
|
|
|
|
addr -= SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
|
|
|
|
addr / SPAPR_MEMORY_BLOCK_SIZE);
|
2017-07-04 13:07:14 +02:00
|
|
|
spapr_drc_detach(drc);
|
2017-07-03 16:54:36 +02:00
|
|
|
}
|
|
|
|
g_free(fdt);
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
spapr: Treat devices added before inbound migration as coldplugged
When migrating a guest which has already had devices hotplugged,
libvirt typically starts the destination qemu with -incoming defer,
adds those hotplugged devices with qmp, then initiates the incoming
migration.
This causes problems for the management of spapr DRC state. Because
the device is treated as hotplugged, it goes into a DRC state for a
device immediately after it's plugged, but before the guest has
acknowledged its presence. However, chances are the guest on the
source machine *has* acknowledged the device's presence and configured
it.
If the source has fully configured the device, then DRC state won't be
sent in the migration stream: for maximum migration compatibility with
earlier versions we don't migrate DRCs in coldplug-equivalent state.
That means that the DRC effectively changes state over the migrate,
causing problems later on.
In addition, logging hotplug events for these devices isn't what we
want because a) those events should already have been issued on the
source host and b) the event queue should get wiped out by the
incoming state anyway.
In short, what we really want is to treat devices added before an
incoming migration as if they were coldplugged.
To do this, we first add a spapr_drc_hotplugged() helper which
determines if the device is hotplugged in the sense relevant for DRC
state management. We only send hotplug events when this is true.
Second, when we add a device which isn't hotplugged in this sense, we
force a reset of the DRC state - this ensures the DRC is in a
coldplug-equivalent state (there isn't usually a system reset between
these device adds and the incoming migration).
This is based on an earlier patch by Laurent Vivier, cleaned up and
extended.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
Tested-by: Daniel Barboza <danielhb@linux.vnet.ibm.com>
2017-06-09 13:08:10 +02:00
|
|
|
if (!hotplugged) {
|
|
|
|
spapr_drc_reset(drc);
|
|
|
|
}
|
2015-09-01 03:22:35 +02:00
|
|
|
addr += SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
}
|
2016-05-24 19:55:04 +02:00
|
|
|
/* send hotplug notification to the
|
|
|
|
* guest only in case of hotplugged memory
|
|
|
|
*/
|
spapr: Treat devices added before inbound migration as coldplugged
When migrating a guest which has already had devices hotplugged,
libvirt typically starts the destination qemu with -incoming defer,
adds those hotplugged devices with qmp, then initiates the incoming
migration.
This causes problems for the management of spapr DRC state. Because
the device is treated as hotplugged, it goes into a DRC state for a
device immediately after it's plugged, but before the guest has
acknowledged its presence. However, chances are the guest on the
source machine *has* acknowledged the device's presence and configured
it.
If the source has fully configured the device, then DRC state won't be
sent in the migration stream: for maximum migration compatibility with
earlier versions we don't migrate DRCs in coldplug-equivalent state.
That means that the DRC effectively changes state over the migrate,
causing problems later on.
In addition, logging hotplug events for these devices isn't what we
want because a) those events should already have been issued on the
source host and b) the event queue should get wiped out by the
incoming state anyway.
In short, what we really want is to treat devices added before an
incoming migration as if they were coldplugged.
To do this, we first add a spapr_drc_hotplugged() helper which
determines if the device is hotplugged in the sense relevant for DRC
state management. We only send hotplug events when this is true.
Second, when we add a device which isn't hotplugged in this sense, we
force a reset of the DRC state - this ensures the DRC is in a
coldplug-equivalent state (there isn't usually a system reset between
these device adds and the incoming migration).
This is based on an earlier patch by Laurent Vivier, cleaned up and
extended.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
Tested-by: Daniel Barboza <danielhb@linux.vnet.ibm.com>
2017-06-09 13:08:10 +02:00
|
|
|
if (hotplugged) {
|
2016-10-27 04:20:29 +02:00
|
|
|
if (dedicated_hp_event_source) {
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
|
|
|
|
addr_start / SPAPR_MEMORY_BLOCK_SIZE);
|
2016-10-27 04:20:29 +02:00
|
|
|
spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
|
|
|
|
nr_lmbs,
|
2017-06-02 05:49:20 +02:00
|
|
|
spapr_drc_index(drc));
|
2016-10-27 04:20:29 +02:00
|
|
|
} else {
|
|
|
|
spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
|
|
|
|
nr_lmbs);
|
|
|
|
}
|
2016-05-24 19:55:04 +02:00
|
|
|
}
|
2015-09-01 03:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
uint32_t node, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
|
|
|
sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev);
|
|
|
|
PCDIMMDevice *dimm = PC_DIMM(dev);
|
|
|
|
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
|
2017-08-21 08:30:29 +02:00
|
|
|
MemoryRegion *mr;
|
|
|
|
uint64_t align, size, addr;
|
|
|
|
|
|
|
|
mr = ddc->get_memory_region(dimm, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
align = memory_region_get_alignment(mr);
|
|
|
|
size = memory_region_size(mr);
|
2017-02-15 10:21:44 +01:00
|
|
|
|
2015-10-28 17:55:06 +01:00
|
|
|
pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
|
2015-09-01 03:22:35 +02:00
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-06-07 18:36:13 +02:00
|
|
|
addr = object_property_get_uint(OBJECT(dimm),
|
|
|
|
PC_DIMM_ADDR_PROP, &local_err);
|
2015-09-01 03:22:35 +02:00
|
|
|
if (local_err) {
|
2017-07-03 16:54:36 +02:00
|
|
|
goto out_unplug;
|
2015-09-01 03:22:35 +02:00
|
|
|
}
|
|
|
|
|
2016-10-27 04:20:29 +02:00
|
|
|
spapr_add_lmbs(dev, addr, size, node,
|
|
|
|
spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
|
2017-07-03 16:54:36 +02:00
|
|
|
&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
goto out_unplug;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
2015-09-01 03:22:35 +02:00
|
|
|
|
2017-07-03 16:54:36 +02:00
|
|
|
out_unplug:
|
|
|
|
pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
|
2015-09-01 03:22:35 +02:00
|
|
|
out:
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
|
2017-05-23 13:18:09 +02:00
|
|
|
static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
PCDIMMDevice *dimm = PC_DIMM(dev);
|
|
|
|
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
|
2017-08-21 08:30:29 +02:00
|
|
|
MemoryRegion *mr;
|
|
|
|
uint64_t size;
|
2017-05-23 13:18:09 +02:00
|
|
|
char *mem_dev;
|
|
|
|
|
2017-08-21 08:30:29 +02:00
|
|
|
mr = ddc->get_memory_region(dimm, errp);
|
|
|
|
if (!mr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
size = memory_region_size(mr);
|
|
|
|
|
2017-05-23 13:18:09 +02:00
|
|
|
if (size % SPAPR_MEMORY_BLOCK_SIZE) {
|
|
|
|
error_setg(errp, "Hotplugged memory size must be a multiple of "
|
|
|
|
"%lld MB", SPAPR_MEMORY_BLOCK_SIZE / M_BYTE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mem_dev = object_property_get_str(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, NULL);
|
|
|
|
if (mem_dev && !kvmppc_is_mem_backend_page_size_ok(mem_dev)) {
|
|
|
|
error_setg(errp, "Memory backend has bad page size. "
|
|
|
|
"Use 'memory-backend-file' with correct mem-path.");
|
2017-06-06 17:22:58 +02:00
|
|
|
goto out;
|
2017-05-23 13:18:09 +02:00
|
|
|
}
|
2017-06-06 17:22:58 +02:00
|
|
|
|
|
|
|
out:
|
|
|
|
g_free(mem_dev);
|
2017-05-23 13:18:09 +02:00
|
|
|
}
|
|
|
|
|
2017-05-24 09:01:48 +02:00
|
|
|
struct sPAPRDIMMState {
|
|
|
|
PCDIMMDevice *dimm;
|
2016-10-27 04:20:30 +02:00
|
|
|
uint32_t nr_lmbs;
|
2017-05-24 09:01:48 +02:00
|
|
|
QTAILQ_ENTRY(sPAPRDIMMState) next;
|
|
|
|
};
|
|
|
|
|
|
|
|
static sPAPRDIMMState *spapr_pending_dimm_unplugs_find(sPAPRMachineState *s,
|
|
|
|
PCDIMMDevice *dimm)
|
|
|
|
{
|
|
|
|
sPAPRDIMMState *dimm_state = NULL;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) {
|
|
|
|
if (dimm_state->dimm == dimm) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dimm_state;
|
|
|
|
}
|
|
|
|
|
2017-07-21 06:51:06 +02:00
|
|
|
static sPAPRDIMMState *spapr_pending_dimm_unplugs_add(sPAPRMachineState *spapr,
|
|
|
|
uint32_t nr_lmbs,
|
|
|
|
PCDIMMDevice *dimm)
|
2017-05-24 09:01:48 +02:00
|
|
|
{
|
2017-07-21 06:51:06 +02:00
|
|
|
sPAPRDIMMState *ds = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this request is for a DIMM whose removal had failed earlier
|
|
|
|
* (due to guest's refusal to remove the LMBs), we would have this
|
|
|
|
* dimm already in the pending_dimm_unplugs list. In that
|
|
|
|
* case don't add again.
|
|
|
|
*/
|
|
|
|
ds = spapr_pending_dimm_unplugs_find(spapr, dimm);
|
|
|
|
if (!ds) {
|
|
|
|
ds = g_malloc0(sizeof(sPAPRDIMMState));
|
|
|
|
ds->nr_lmbs = nr_lmbs;
|
|
|
|
ds->dimm = dimm;
|
|
|
|
QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next);
|
|
|
|
}
|
|
|
|
return ds;
|
2017-05-24 09:01:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_pending_dimm_unplugs_remove(sPAPRMachineState *spapr,
|
|
|
|
sPAPRDIMMState *dimm_state)
|
|
|
|
{
|
|
|
|
QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next);
|
|
|
|
g_free(dimm_state);
|
|
|
|
}
|
2016-10-27 04:20:30 +02:00
|
|
|
|
2017-05-22 21:35:50 +02:00
|
|
|
static sPAPRDIMMState *spapr_recover_pending_dimm_state(sPAPRMachineState *ms,
|
|
|
|
PCDIMMDevice *dimm)
|
|
|
|
{
|
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
|
2017-08-21 08:30:29 +02:00
|
|
|
MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort);
|
2017-05-22 21:35:50 +02:00
|
|
|
uint64_t size = memory_region_size(mr);
|
|
|
|
uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
uint32_t avail_lmbs = 0;
|
|
|
|
uint64_t addr_start, addr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP,
|
|
|
|
&error_abort);
|
|
|
|
|
|
|
|
addr = addr_start;
|
|
|
|
for (i = 0; i < nr_lmbs; i++) {
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
|
|
|
|
addr / SPAPR_MEMORY_BLOCK_SIZE);
|
2017-05-22 21:35:50 +02:00
|
|
|
g_assert(drc);
|
2017-06-06 09:01:21 +02:00
|
|
|
if (drc->dev) {
|
2017-05-22 21:35:50 +02:00
|
|
|
avail_lmbs++;
|
|
|
|
}
|
|
|
|
addr += SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
|
2017-07-21 06:51:06 +02:00
|
|
|
return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm);
|
2017-05-22 21:35:50 +02:00
|
|
|
}
|
|
|
|
|
2017-05-22 21:35:48 +02:00
|
|
|
/* Callback to be called during DRC release. */
|
|
|
|
void spapr_lmb_release(DeviceState *dev)
|
2016-10-27 04:20:30 +02:00
|
|
|
{
|
2017-07-13 02:45:35 +02:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_hotplug_handler(dev));
|
|
|
|
PCDIMMDevice *dimm = PC_DIMM(dev);
|
|
|
|
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
|
2017-08-21 08:30:29 +02:00
|
|
|
MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort);
|
2017-05-24 09:01:48 +02:00
|
|
|
sPAPRDIMMState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev));
|
2016-10-27 04:20:30 +02:00
|
|
|
|
2017-05-22 21:35:50 +02:00
|
|
|
/* This information will get lost if a migration occurs
|
|
|
|
* during the unplug process. In this case recover it. */
|
|
|
|
if (ds == NULL) {
|
|
|
|
ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev));
|
2017-07-21 06:51:06 +02:00
|
|
|
g_assert(ds);
|
2017-06-06 09:01:21 +02:00
|
|
|
/* The DRC being examined by the caller at least must be counted */
|
|
|
|
g_assert(ds->nr_lmbs);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (--ds->nr_lmbs) {
|
2016-10-27 04:20:30 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-05-24 09:01:48 +02:00
|
|
|
spapr_pending_dimm_unplugs_remove(spapr, ds);
|
2016-10-27 04:20:30 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that all the LMBs have been removed by the guest, call the
|
|
|
|
* pc-dimm unplug handler to cleanup up the pc-dimm device.
|
|
|
|
*/
|
2017-07-13 02:45:35 +02:00
|
|
|
pc_dimm_memory_unplug(dev, &spapr->hotplug_memory, mr);
|
2016-10-27 04:20:30 +02:00
|
|
|
object_unparent(OBJECT(dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
2017-05-24 09:01:48 +02:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
|
2016-10-27 04:20:30 +02:00
|
|
|
Error *local_err = NULL;
|
|
|
|
PCDIMMDevice *dimm = PC_DIMM(dev);
|
|
|
|
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
|
2017-08-21 08:30:29 +02:00
|
|
|
MemoryRegion *mr;
|
|
|
|
uint32_t nr_lmbs;
|
|
|
|
uint64_t size, addr_start, addr;
|
2017-05-24 09:01:48 +02:00
|
|
|
int i;
|
|
|
|
sPAPRDRConnector *drc;
|
2017-08-21 08:30:29 +02:00
|
|
|
|
|
|
|
mr = ddc->get_memory_region(dimm, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
size = memory_region_size(mr);
|
|
|
|
nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
|
2017-06-07 18:36:13 +02:00
|
|
|
addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
|
2017-05-24 09:01:48 +02:00
|
|
|
&local_err);
|
2016-10-27 04:20:30 +02:00
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-07-21 06:51:06 +02:00
|
|
|
spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm);
|
2017-05-24 09:01:48 +02:00
|
|
|
|
|
|
|
addr = addr_start;
|
|
|
|
for (i = 0; i < nr_lmbs; i++) {
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
|
|
|
|
addr / SPAPR_MEMORY_BLOCK_SIZE);
|
2017-05-24 09:01:48 +02:00
|
|
|
g_assert(drc);
|
|
|
|
|
2017-07-04 13:07:14 +02:00
|
|
|
spapr_drc_detach(drc);
|
2017-05-24 09:01:48 +02:00
|
|
|
addr += SPAPR_MEMORY_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
|
|
|
|
addr_start / SPAPR_MEMORY_BLOCK_SIZE);
|
2017-05-24 09:01:48 +02:00
|
|
|
spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
|
2017-06-02 05:49:20 +02:00
|
|
|
nr_lmbs, spapr_drc_index(drc));
|
2016-10-27 04:20:30 +02:00
|
|
|
out:
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
|
2017-06-30 15:18:10 +02:00
|
|
|
static void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
|
|
|
|
sPAPRMachineState *spapr)
|
2016-06-10 02:59:04 +02:00
|
|
|
{
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
DeviceClass *dc = DEVICE_GET_CLASS(cs);
|
2017-08-09 07:38:56 +02:00
|
|
|
int id = spapr_vcpu_id(cpu);
|
2016-06-10 02:59:04 +02:00
|
|
|
void *fdt;
|
|
|
|
int offset, fdt_size;
|
|
|
|
char *nodename;
|
|
|
|
|
|
|
|
fdt = create_device_tree(&fdt_size);
|
|
|
|
nodename = g_strdup_printf("%s@%x", dc->fw_name, id);
|
|
|
|
offset = fdt_add_subnode(fdt, 0, nodename);
|
|
|
|
|
|
|
|
spapr_populate_cpu_dt(cs, fdt, offset, spapr);
|
|
|
|
g_free(nodename);
|
|
|
|
|
|
|
|
*fdt_offset = offset;
|
|
|
|
return fdt;
|
|
|
|
}
|
|
|
|
|
2017-07-13 02:45:35 +02:00
|
|
|
/* Callback to be called during DRC release. */
|
|
|
|
void spapr_core_release(DeviceState *dev)
|
2017-02-02 16:02:34 +01:00
|
|
|
{
|
2017-07-13 02:45:35 +02:00
|
|
|
MachineState *ms = MACHINE(qdev_get_hotplug_handler(dev));
|
2017-06-14 15:29:19 +02:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
|
2017-02-02 16:02:34 +01:00
|
|
|
CPUCore *cc = CPU_CORE(dev);
|
2017-02-10 11:18:49 +01:00
|
|
|
CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
|
2017-02-02 16:02:34 +01:00
|
|
|
|
2017-06-14 15:29:19 +02:00
|
|
|
if (smc->pre_2_10_has_unused_icps) {
|
|
|
|
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
|
|
|
|
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
|
|
|
|
const char *typename = object_class_get_name(scc->cpu_class);
|
|
|
|
size_t size = object_type_get_instance_size(typename);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < cc->nr_threads; i++) {
|
|
|
|
CPUState *cs = CPU(sc->threads + i * size);
|
|
|
|
|
|
|
|
pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-18 15:58:31 +02:00
|
|
|
assert(core_slot);
|
2017-02-10 11:18:49 +01:00
|
|
|
core_slot->cpu = NULL;
|
2017-02-02 16:02:34 +01:00
|
|
|
object_unparent(OBJECT(dev));
|
|
|
|
}
|
|
|
|
|
2017-02-02 16:02:35 +01:00
|
|
|
static
|
|
|
|
void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
Error **errp)
|
2017-02-02 16:02:34 +01:00
|
|
|
{
|
2017-02-10 11:18:49 +01:00
|
|
|
int index;
|
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
CPUCore *cc = CPU_CORE(dev);
|
|
|
|
int smt = kvmppc_smt_threads();
|
2017-02-02 16:02:34 +01:00
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) {
|
|
|
|
error_setg(errp, "Unable to find CPU core with core-id: %d",
|
|
|
|
cc->core_id);
|
|
|
|
return;
|
|
|
|
}
|
2017-02-02 16:02:34 +01:00
|
|
|
if (index == 0) {
|
|
|
|
error_setg(errp, "Boot CPU core may not be unplugged");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index * smt);
|
2017-02-02 16:02:34 +01:00
|
|
|
g_assert(drc);
|
|
|
|
|
2017-07-04 13:07:14 +02:00
|
|
|
spapr_drc_detach(drc);
|
2017-02-02 16:02:34 +01:00
|
|
|
|
|
|
|
spapr_hotplug_req_remove_by_index(drc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(spapr);
|
2017-06-14 15:29:19 +02:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
2017-02-02 16:02:34 +01:00
|
|
|
sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
|
|
|
|
CPUCore *cc = CPU_CORE(dev);
|
|
|
|
CPUState *cs = CPU(core->threads);
|
|
|
|
sPAPRDRConnector *drc;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
int smt = kvmppc_smt_threads();
|
2017-02-10 11:18:49 +01:00
|
|
|
CPUArchId *core_slot;
|
|
|
|
int index;
|
spapr: Treat devices added before inbound migration as coldplugged
When migrating a guest which has already had devices hotplugged,
libvirt typically starts the destination qemu with -incoming defer,
adds those hotplugged devices with qmp, then initiates the incoming
migration.
This causes problems for the management of spapr DRC state. Because
the device is treated as hotplugged, it goes into a DRC state for a
device immediately after it's plugged, but before the guest has
acknowledged its presence. However, chances are the guest on the
source machine *has* acknowledged the device's presence and configured
it.
If the source has fully configured the device, then DRC state won't be
sent in the migration stream: for maximum migration compatibility with
earlier versions we don't migrate DRCs in coldplug-equivalent state.
That means that the DRC effectively changes state over the migrate,
causing problems later on.
In addition, logging hotplug events for these devices isn't what we
want because a) those events should already have been issued on the
source host and b) the event queue should get wiped out by the
incoming state anyway.
In short, what we really want is to treat devices added before an
incoming migration as if they were coldplugged.
To do this, we first add a spapr_drc_hotplugged() helper which
determines if the device is hotplugged in the sense relevant for DRC
state management. We only send hotplug events when this is true.
Second, when we add a device which isn't hotplugged in this sense, we
force a reset of the DRC state - this ensures the DRC is in a
coldplug-equivalent state (there isn't usually a system reset between
these device adds and the incoming migration).
This is based on an earlier patch by Laurent Vivier, cleaned up and
extended.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
Tested-by: Daniel Barboza <danielhb@linux.vnet.ibm.com>
2017-06-09 13:08:10 +02:00
|
|
|
bool hotplugged = spapr_drc_hotplugged(dev);
|
2017-02-02 16:02:34 +01:00
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
|
|
|
|
if (!core_slot) {
|
|
|
|
error_setg(errp, "Unable to find CPU core with core-id: %d",
|
|
|
|
cc->core_id);
|
|
|
|
return;
|
|
|
|
}
|
2017-06-04 12:26:03 +02:00
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index * smt);
|
2017-02-02 16:02:34 +01:00
|
|
|
|
2017-02-10 11:20:57 +01:00
|
|
|
g_assert(drc || !mc->has_hotpluggable_cpus);
|
2017-02-02 16:02:34 +01:00
|
|
|
|
|
|
|
if (drc) {
|
2017-07-12 11:48:39 +02:00
|
|
|
void *fdt;
|
|
|
|
int fdt_offset;
|
|
|
|
|
|
|
|
fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
|
|
|
|
|
2017-06-19 07:16:21 +02:00
|
|
|
spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
|
2017-02-02 16:02:34 +01:00
|
|
|
if (local_err) {
|
|
|
|
g_free(fdt);
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
spapr: Treat devices added before inbound migration as coldplugged
When migrating a guest which has already had devices hotplugged,
libvirt typically starts the destination qemu with -incoming defer,
adds those hotplugged devices with qmp, then initiates the incoming
migration.
This causes problems for the management of spapr DRC state. Because
the device is treated as hotplugged, it goes into a DRC state for a
device immediately after it's plugged, but before the guest has
acknowledged its presence. However, chances are the guest on the
source machine *has* acknowledged the device's presence and configured
it.
If the source has fully configured the device, then DRC state won't be
sent in the migration stream: for maximum migration compatibility with
earlier versions we don't migrate DRCs in coldplug-equivalent state.
That means that the DRC effectively changes state over the migrate,
causing problems later on.
In addition, logging hotplug events for these devices isn't what we
want because a) those events should already have been issued on the
source host and b) the event queue should get wiped out by the
incoming state anyway.
In short, what we really want is to treat devices added before an
incoming migration as if they were coldplugged.
To do this, we first add a spapr_drc_hotplugged() helper which
determines if the device is hotplugged in the sense relevant for DRC
state management. We only send hotplug events when this is true.
Second, when we add a device which isn't hotplugged in this sense, we
force a reset of the DRC state - this ensures the DRC is in a
coldplug-equivalent state (there isn't usually a system reset between
these device adds and the incoming migration).
This is based on an earlier patch by Laurent Vivier, cleaned up and
extended.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
Tested-by: Daniel Barboza <danielhb@linux.vnet.ibm.com>
2017-06-09 13:08:10 +02:00
|
|
|
if (hotplugged) {
|
|
|
|
/*
|
|
|
|
* Send hotplug notification interrupt to the guest only
|
|
|
|
* in case of hotplugged CPUs.
|
|
|
|
*/
|
|
|
|
spapr_hotplug_req_add_by_index(drc);
|
|
|
|
} else {
|
|
|
|
spapr_drc_reset(drc);
|
|
|
|
}
|
2017-02-02 16:02:34 +01:00
|
|
|
}
|
spapr: Treat devices added before inbound migration as coldplugged
When migrating a guest which has already had devices hotplugged,
libvirt typically starts the destination qemu with -incoming defer,
adds those hotplugged devices with qmp, then initiates the incoming
migration.
This causes problems for the management of spapr DRC state. Because
the device is treated as hotplugged, it goes into a DRC state for a
device immediately after it's plugged, but before the guest has
acknowledged its presence. However, chances are the guest on the
source machine *has* acknowledged the device's presence and configured
it.
If the source has fully configured the device, then DRC state won't be
sent in the migration stream: for maximum migration compatibility with
earlier versions we don't migrate DRCs in coldplug-equivalent state.
That means that the DRC effectively changes state over the migrate,
causing problems later on.
In addition, logging hotplug events for these devices isn't what we
want because a) those events should already have been issued on the
source host and b) the event queue should get wiped out by the
incoming state anyway.
In short, what we really want is to treat devices added before an
incoming migration as if they were coldplugged.
To do this, we first add a spapr_drc_hotplugged() helper which
determines if the device is hotplugged in the sense relevant for DRC
state management. We only send hotplug events when this is true.
Second, when we add a device which isn't hotplugged in this sense, we
force a reset of the DRC state - this ensures the DRC is in a
coldplug-equivalent state (there isn't usually a system reset between
these device adds and the incoming migration).
This is based on an earlier patch by Laurent Vivier, cleaned up and
extended.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Greg Kurz <groug@kaod.org>
Tested-by: Daniel Barboza <danielhb@linux.vnet.ibm.com>
2017-06-09 13:08:10 +02:00
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
core_slot->cpu = OBJECT(dev);
|
2017-06-14 15:29:19 +02:00
|
|
|
|
|
|
|
if (smc->pre_2_10_has_unused_icps) {
|
|
|
|
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
|
|
|
|
const char *typename = object_class_get_name(scc->cpu_class);
|
|
|
|
size_t size = object_type_get_instance_size(typename);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < cc->nr_threads; i++) {
|
|
|
|
sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev);
|
|
|
|
void *obj = sc->threads + i * size;
|
|
|
|
|
|
|
|
cs = CPU(obj);
|
|
|
|
pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
|
|
|
|
}
|
|
|
|
}
|
2017-02-02 16:02:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(OBJECT(hotplug_dev));
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
|
|
|
|
Error *local_err = NULL;
|
|
|
|
CPUCore *cc = CPU_CORE(dev);
|
|
|
|
char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model);
|
|
|
|
const char *type = object_get_typename(OBJECT(dev));
|
2017-02-10 11:18:49 +01:00
|
|
|
CPUArchId *core_slot;
|
|
|
|
int index;
|
2017-02-02 16:02:34 +01:00
|
|
|
|
2017-02-10 11:20:57 +01:00
|
|
|
if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
|
2017-02-02 16:02:34 +01:00
|
|
|
error_setg(&local_err, "CPU hotplug not supported for this machine");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(base_core_type, type)) {
|
|
|
|
error_setg(&local_err, "CPU core type should be %s", base_core_type);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cc->core_id % smp_threads) {
|
|
|
|
error_setg(&local_err, "invalid core id %d", cc->core_id);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-05-23 08:33:06 +02:00
|
|
|
/*
|
|
|
|
* In general we should have homogeneous threads-per-core, but old
|
|
|
|
* (pre hotplug support) machine types allow the last core to have
|
|
|
|
* reduced threads as a compatibility hack for when we allowed
|
|
|
|
* total vcpus not a multiple of threads-per-core.
|
|
|
|
*/
|
|
|
|
if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) {
|
2017-07-14 09:50:40 +02:00
|
|
|
error_setg(&local_err, "invalid nr-threads %d, must be %d",
|
2017-04-02 08:14:30 +02:00
|
|
|
cc->nr_threads, smp_threads);
|
2017-07-14 09:50:40 +02:00
|
|
|
goto out;
|
2017-04-02 08:14:30 +02:00
|
|
|
}
|
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index);
|
|
|
|
if (!core_slot) {
|
2017-02-02 16:02:34 +01:00
|
|
|
error_setg(&local_err, "core id %d out of range", cc->core_id);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
if (core_slot->cpu) {
|
2017-02-02 16:02:34 +01:00
|
|
|
error_setg(&local_err, "core %d already populated", cc->core_id);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:23:56 +02:00
|
|
|
numa_cpu_pre_plug(core_slot, dev, &local_err);
|
2017-05-10 13:29:46 +02:00
|
|
|
|
2017-02-02 16:02:34 +01:00
|
|
|
out:
|
|
|
|
g_free(base_core_type);
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
|
2015-09-01 03:22:35 +02:00
|
|
|
static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
2017-09-06 20:43:05 +02:00
|
|
|
MachineState *ms = MACHINE(hotplug_dev);
|
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
|
2015-09-01 03:22:35 +02:00
|
|
|
|
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
|
2015-06-29 10:44:32 +02:00
|
|
|
int node;
|
2015-09-01 03:22:35 +02:00
|
|
|
|
|
|
|
if (!smc->dr_lmb_enabled) {
|
|
|
|
error_setg(errp, "Memory hotplug not supported for this machine");
|
|
|
|
return;
|
|
|
|
}
|
2017-06-07 18:36:13 +02:00
|
|
|
node = object_property_get_uint(OBJECT(dev), PC_DIMM_NODE_PROP, errp);
|
2015-09-01 03:22:35 +02:00
|
|
|
if (*errp) {
|
|
|
|
return;
|
|
|
|
}
|
2016-03-03 10:43:42 +01:00
|
|
|
if (node < 0 || node >= MAX_NODES) {
|
|
|
|
error_setg(errp, "Invaild node %d", node);
|
|
|
|
return;
|
|
|
|
}
|
2015-09-01 03:22:35 +02:00
|
|
|
|
2015-06-29 10:44:32 +02:00
|
|
|
/*
|
|
|
|
* Currently PowerPC kernel doesn't allow hot-adding memory to
|
|
|
|
* memory-less node, but instead will silently add the memory
|
|
|
|
* to the first node that has some memory. This causes two
|
|
|
|
* unexpected behaviours for the user.
|
|
|
|
*
|
|
|
|
* - Memory gets hotplugged to a different node than what the user
|
|
|
|
* specified.
|
|
|
|
* - Since pc-dimm subsystem in QEMU still thinks that memory belongs
|
|
|
|
* to memory-less node, a reboot will set things accordingly
|
|
|
|
* and the previously hotplugged memory now ends in the right node.
|
|
|
|
* This appears as if some memory moved from one node to another.
|
|
|
|
*
|
|
|
|
* So until kernel starts supporting memory hotplug to memory-less
|
|
|
|
* nodes, just prevent such attempts upfront in QEMU.
|
|
|
|
*/
|
|
|
|
if (nb_numa_nodes && !numa_info[node].node_mem) {
|
|
|
|
error_setg(errp, "Can't hotplug memory to memory-less node %d",
|
|
|
|
node);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-09-01 03:22:35 +02:00
|
|
|
spapr_memory_plug(hotplug_dev, dev, node, errp);
|
2016-06-10 02:59:04 +02:00
|
|
|
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
|
|
|
|
spapr_core_plug(hotplug_dev, dev, errp);
|
2015-09-01 03:22:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-27 04:20:30 +02:00
|
|
|
static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
2017-09-06 20:43:05 +02:00
|
|
|
sPAPRMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev));
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(sms);
|
2016-10-27 04:20:30 +02:00
|
|
|
|
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
|
|
|
|
if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) {
|
|
|
|
spapr_memory_unplug_request(hotplug_dev, dev, errp);
|
|
|
|
} else {
|
|
|
|
/* NOTE: this means there is a window after guest reset, prior to
|
|
|
|
* CAS negotiation, where unplug requests will fail due to the
|
|
|
|
* capability not being detected yet. This is a bit different than
|
|
|
|
* the case with PCI unplug, where the events will be queued and
|
|
|
|
* eventually handled by the guest after boot
|
|
|
|
*/
|
|
|
|
error_setg(errp, "Memory hot unplug not supported for this guest");
|
|
|
|
}
|
2016-06-10 02:59:05 +02:00
|
|
|
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
|
2017-02-10 11:20:57 +01:00
|
|
|
if (!mc->has_hotpluggable_cpus) {
|
2016-06-10 02:59:05 +02:00
|
|
|
error_setg(errp, "CPU hot unplug not supported on this machine");
|
|
|
|
return;
|
|
|
|
}
|
2017-02-02 16:02:35 +01:00
|
|
|
spapr_core_unplug_request(hotplug_dev, dev, errp);
|
2015-09-01 03:22:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-10 02:59:03 +02:00
|
|
|
static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
|
|
|
|
DeviceState *dev, Error **errp)
|
|
|
|
{
|
2017-05-23 13:18:09 +02:00
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
|
|
|
|
spapr_memory_pre_plug(hotplug_dev, dev, errp);
|
|
|
|
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
|
2016-06-10 02:59:03 +02:00
|
|
|
spapr_core_pre_plug(hotplug_dev, dev, errp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-12 09:57:20 +02:00
|
|
|
static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
|
|
|
|
DeviceState *dev)
|
2015-09-01 03:22:35 +02:00
|
|
|
{
|
2016-06-10 02:59:03 +02:00
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
|
|
|
|
object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
|
2015-09-01 03:22:35 +02:00
|
|
|
return HOTPLUG_HANDLER(machine);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-10 13:29:45 +02:00
|
|
|
static CpuInstanceProperties
|
|
|
|
spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index)
|
2015-09-08 03:21:52 +02:00
|
|
|
{
|
2017-05-10 13:29:45 +02:00
|
|
|
CPUArchId *core_slot;
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
|
|
|
|
|
|
|
/* make sure possible_cpu are intialized */
|
|
|
|
mc->possible_cpu_arch_ids(machine);
|
|
|
|
/* get CPU core slot containing thread that matches cpu_index */
|
|
|
|
core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL);
|
|
|
|
assert(core_slot);
|
|
|
|
return core_slot->props;
|
2015-09-08 03:21:52 +02:00
|
|
|
}
|
|
|
|
|
2017-06-01 12:53:28 +02:00
|
|
|
static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx)
|
|
|
|
{
|
|
|
|
return idx / smp_cores % nb_numa_nodes;
|
|
|
|
}
|
|
|
|
|
2017-02-10 11:18:49 +01:00
|
|
|
static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int spapr_max_cores = max_cpus / smp_threads;
|
|
|
|
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
|
|
|
|
2017-02-10 11:20:57 +01:00
|
|
|
if (!mc->has_hotpluggable_cpus) {
|
2017-02-10 11:18:49 +01:00
|
|
|
spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads;
|
|
|
|
}
|
|
|
|
if (machine->possible_cpus) {
|
|
|
|
assert(machine->possible_cpus->len == spapr_max_cores);
|
|
|
|
return machine->possible_cpus;
|
|
|
|
}
|
|
|
|
|
|
|
|
machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
|
|
|
|
sizeof(CPUArchId) * spapr_max_cores);
|
|
|
|
machine->possible_cpus->len = spapr_max_cores;
|
|
|
|
for (i = 0; i < machine->possible_cpus->len; i++) {
|
|
|
|
int core_id = i * smp_threads;
|
|
|
|
|
2017-02-09 12:08:38 +01:00
|
|
|
machine->possible_cpus->cpus[i].vcpus_count = smp_threads;
|
2017-02-10 11:18:49 +01:00
|
|
|
machine->possible_cpus->cpus[i].arch_id = core_id;
|
|
|
|
machine->possible_cpus->cpus[i].props.has_core_id = true;
|
|
|
|
machine->possible_cpus->cpus[i].props.core_id = core_id;
|
|
|
|
}
|
|
|
|
return machine->possible_cpus;
|
|
|
|
}
|
|
|
|
|
spapr_pci: Delegate placement of PCI host bridges to machine type
The 'spapr-pci-host-bridge' represents the virtual PCI host bridge (PHB)
for a PAPR guest. Unlike on x86, it's routine on Power (both bare metal
and PAPR guests) to have numerous independent PHBs, each controlling a
separate PCI domain.
There are two ways of configuring the spapr-pci-host-bridge device: first
it can be done fully manually, specifying the locations and sizes of all
the IO windows. This gives the most control, but is very awkward with 6
mandatory parameters. Alternatively just an "index" can be specified
which essentially selects from an array of predefined PHB locations.
The PHB at index 0 is automatically created as the default PHB.
The current set of default locations causes some problems for guests with
large RAM (> 1 TiB) or PCI devices with very large BARs (e.g. big nVidia
GPGPU cards via VFIO). Obviously, for migration we can only change the
locations on a new machine type, however.
This is awkward, because the placement is currently decided within the
spapr-pci-host-bridge code, so it breaks abstraction to look inside the
machine type version.
So, this patch delegates the "default mode" PHB placement from the
spapr-pci-host-bridge device back to the machine type via a public method
in sPAPRMachineClass. It's still a bit ugly, but it's about the best we
can do.
For now, this just changes where the calculation is done. It doesn't
change the actual location of the host bridges, or any other behaviour.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
2016-10-13 01:26:09 +02:00
|
|
|
static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
|
spapr_pci: Add a 64-bit MMIO window
On real hardware, and under pHyp, the PCI host bridges on Power machines
typically advertise two outbound MMIO windows from the guest's physical
memory space to PCI memory space:
- A 32-bit window which maps onto 2GiB..4GiB in the PCI address space
- A 64-bit window which maps onto a large region somewhere high in PCI
address space (traditionally this used an identity mapping from guest
physical address to PCI address, but that's not always the case)
The qemu implementation in spapr-pci-host-bridge, however, only supports a
single outbound MMIO window, however. At least some Linux versions expect
the two windows however, so we arranged this window to map onto the PCI
memory space from 2 GiB..~64 GiB, then advertised it as two contiguous
windows, the "32-bit" window from 2G..4G and the "64-bit" window from
4G..~64G.
This approach means, however, that the 64G window is not naturally aligned.
In turn this limits the size of the largest BAR we can map (which does have
to be naturally aligned) to roughly half of the total window. With some
large nVidia GPGPU cards which have huge memory BARs, this is starting to
be a problem.
This patch adds true support for separate 32-bit and 64-bit outbound MMIO
windows to the spapr-pci-host-bridge implementation, each of which can
be independently configured. The 32-bit window always maps to 2G.. in PCI
space, but the PCI address of the 64-bit window can be configured (it
defaults to the same as the guest physical address).
So as not to break possible existing configurations, as long as a 64-bit
window is not specified, a large single window can be specified. This
will appear the same way to the guest as the old approach, although it's
now implemented by two contiguous memory regions rather than a single one.
For now, this only adds the possibility of 64-bit windows. The default
configuration still uses the legacy mode.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
2016-10-11 05:23:33 +02:00
|
|
|
uint64_t *buid, hwaddr *pio,
|
|
|
|
hwaddr *mmio32, hwaddr *mmio64,
|
spapr_pci: Delegate placement of PCI host bridges to machine type
The 'spapr-pci-host-bridge' represents the virtual PCI host bridge (PHB)
for a PAPR guest. Unlike on x86, it's routine on Power (both bare metal
and PAPR guests) to have numerous independent PHBs, each controlling a
separate PCI domain.
There are two ways of configuring the spapr-pci-host-bridge device: first
it can be done fully manually, specifying the locations and sizes of all
the IO windows. This gives the most control, but is very awkward with 6
mandatory parameters. Alternatively just an "index" can be specified
which essentially selects from an array of predefined PHB locations.
The PHB at index 0 is automatically created as the default PHB.
The current set of default locations causes some problems for guests with
large RAM (> 1 TiB) or PCI devices with very large BARs (e.g. big nVidia
GPGPU cards via VFIO). Obviously, for migration we can only change the
locations on a new machine type, however.
This is awkward, because the placement is currently decided within the
spapr-pci-host-bridge code, so it breaks abstraction to look inside the
machine type version.
So, this patch delegates the "default mode" PHB placement from the
spapr-pci-host-bridge device back to the machine type via a public method
in sPAPRMachineClass. It's still a bit ugly, but it's about the best we
can do.
For now, this just changes where the calculation is done. It doesn't
change the actual location of the host bridges, or any other behaviour.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
2016-10-13 01:26:09 +02:00
|
|
|
unsigned n_dma, uint32_t *liobns, Error **errp)
|
|
|
|
{
|
2016-10-16 03:04:15 +02:00
|
|
|
/*
|
|
|
|
* New-style PHB window placement.
|
|
|
|
*
|
|
|
|
* Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
|
|
|
|
* for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
|
|
|
|
* windows.
|
|
|
|
*
|
|
|
|
* Some guest kernels can't work with MMIO windows above 1<<46
|
|
|
|
* (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
|
|
|
|
*
|
|
|
|
* 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
|
|
|
|
* PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the
|
|
|
|
* 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the
|
|
|
|
* 1TiB 64-bit MMIO windows for each PHB.
|
|
|
|
*/
|
spapr_pci: Delegate placement of PCI host bridges to machine type
The 'spapr-pci-host-bridge' represents the virtual PCI host bridge (PHB)
for a PAPR guest. Unlike on x86, it's routine on Power (both bare metal
and PAPR guests) to have numerous independent PHBs, each controlling a
separate PCI domain.
There are two ways of configuring the spapr-pci-host-bridge device: first
it can be done fully manually, specifying the locations and sizes of all
the IO windows. This gives the most control, but is very awkward with 6
mandatory parameters. Alternatively just an "index" can be specified
which essentially selects from an array of predefined PHB locations.
The PHB at index 0 is automatically created as the default PHB.
The current set of default locations causes some problems for guests with
large RAM (> 1 TiB) or PCI devices with very large BARs (e.g. big nVidia
GPGPU cards via VFIO). Obviously, for migration we can only change the
locations on a new machine type, however.
This is awkward, because the placement is currently decided within the
spapr-pci-host-bridge code, so it breaks abstraction to look inside the
machine type version.
So, this patch delegates the "default mode" PHB placement from the
spapr-pci-host-bridge device back to the machine type via a public method
in sPAPRMachineClass. It's still a bit ugly, but it's about the best we
can do.
For now, this just changes where the calculation is done. It doesn't
change the actual location of the host bridges, or any other behaviour.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
2016-10-13 01:26:09 +02:00
|
|
|
const uint64_t base_buid = 0x800000020000000ULL;
|
2017-01-27 17:27:16 +01:00
|
|
|
#define SPAPR_MAX_PHBS ((SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / \
|
|
|
|
SPAPR_PCI_MEM64_WIN_SIZE - 1)
|
spapr_pci: Delegate placement of PCI host bridges to machine type
The 'spapr-pci-host-bridge' represents the virtual PCI host bridge (PHB)
for a PAPR guest. Unlike on x86, it's routine on Power (both bare metal
and PAPR guests) to have numerous independent PHBs, each controlling a
separate PCI domain.
There are two ways of configuring the spapr-pci-host-bridge device: first
it can be done fully manually, specifying the locations and sizes of all
the IO windows. This gives the most control, but is very awkward with 6
mandatory parameters. Alternatively just an "index" can be specified
which essentially selects from an array of predefined PHB locations.
The PHB at index 0 is automatically created as the default PHB.
The current set of default locations causes some problems for guests with
large RAM (> 1 TiB) or PCI devices with very large BARs (e.g. big nVidia
GPGPU cards via VFIO). Obviously, for migration we can only change the
locations on a new machine type, however.
This is awkward, because the placement is currently decided within the
spapr-pci-host-bridge code, so it breaks abstraction to look inside the
machine type version.
So, this patch delegates the "default mode" PHB placement from the
spapr-pci-host-bridge device back to the machine type via a public method
in sPAPRMachineClass. It's still a bit ugly, but it's about the best we
can do.
For now, this just changes where the calculation is done. It doesn't
change the actual location of the host bridges, or any other behaviour.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
2016-10-13 01:26:09 +02:00
|
|
|
int i;
|
|
|
|
|
2016-10-16 03:04:15 +02:00
|
|
|
/* Sanity check natural alignments */
|
|
|
|
QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
|
|
|
|
QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
|
|
|
|
QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
|
|
|
|
QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
|
|
|
|
/* Sanity check bounds */
|
2017-01-27 17:27:16 +01:00
|
|
|
QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) >
|
|
|
|
SPAPR_PCI_MEM32_WIN_SIZE);
|
|
|
|
QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) >
|
|
|
|
SPAPR_PCI_MEM64_WIN_SIZE);
|
|
|
|
|
|
|
|
if (index >= SPAPR_MAX_PHBS) {
|
|
|
|
error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)",
|
|
|
|
SPAPR_MAX_PHBS - 1);
|
spapr_pci: Delegate placement of PCI host bridges to machine type
The 'spapr-pci-host-bridge' represents the virtual PCI host bridge (PHB)
for a PAPR guest. Unlike on x86, it's routine on Power (both bare metal
and PAPR guests) to have numerous independent PHBs, each controlling a
separate PCI domain.
There are two ways of configuring the spapr-pci-host-bridge device: first
it can be done fully manually, specifying the locations and sizes of all
the IO windows. This gives the most control, but is very awkward with 6
mandatory parameters. Alternatively just an "index" can be specified
which essentially selects from an array of predefined PHB locations.
The PHB at index 0 is automatically created as the default PHB.
The current set of default locations causes some problems for guests with
large RAM (> 1 TiB) or PCI devices with very large BARs (e.g. big nVidia
GPGPU cards via VFIO). Obviously, for migration we can only change the
locations on a new machine type, however.
This is awkward, because the placement is currently decided within the
spapr-pci-host-bridge code, so it breaks abstraction to look inside the
machine type version.
So, this patch delegates the "default mode" PHB placement from the
spapr-pci-host-bridge device back to the machine type via a public method
in sPAPRMachineClass. It's still a bit ugly, but it's about the best we
can do.
For now, this just changes where the calculation is done. It doesn't
change the actual location of the host bridges, or any other behaviour.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
2016-10-13 01:26:09 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*buid = base_buid + index;
|
|
|
|
for (i = 0; i < n_dma; ++i) {
|
|
|
|
liobns[i] = SPAPR_PCI_LIOBN(index, i);
|
|
|
|
}
|
|
|
|
|
2016-10-16 03:04:15 +02:00
|
|
|
*pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
|
|
|
|
*mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
|
|
|
|
*mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
|
spapr_pci: Delegate placement of PCI host bridges to machine type
The 'spapr-pci-host-bridge' represents the virtual PCI host bridge (PHB)
for a PAPR guest. Unlike on x86, it's routine on Power (both bare metal
and PAPR guests) to have numerous independent PHBs, each controlling a
separate PCI domain.
There are two ways of configuring the spapr-pci-host-bridge device: first
it can be done fully manually, specifying the locations and sizes of all
the IO windows. This gives the most control, but is very awkward with 6
mandatory parameters. Alternatively just an "index" can be specified
which essentially selects from an array of predefined PHB locations.
The PHB at index 0 is automatically created as the default PHB.
The current set of default locations causes some problems for guests with
large RAM (> 1 TiB) or PCI devices with very large BARs (e.g. big nVidia
GPGPU cards via VFIO). Obviously, for migration we can only change the
locations on a new machine type, however.
This is awkward, because the placement is currently decided within the
spapr-pci-host-bridge code, so it breaks abstraction to look inside the
machine type version.
So, this patch delegates the "default mode" PHB placement from the
spapr-pci-host-bridge device back to the machine type via a public method
in sPAPRMachineClass. It's still a bit ugly, but it's about the best we
can do.
For now, this just changes where the calculation is done. It doesn't
change the actual location of the host bridges, or any other behaviour.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
2016-10-13 01:26:09 +02:00
|
|
|
}
|
|
|
|
|
2017-02-27 15:29:15 +01:00
|
|
|
static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(dev);
|
|
|
|
|
|
|
|
return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_ics_resend(XICSFabric *dev)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(dev);
|
|
|
|
|
|
|
|
ics_resend(spapr->ics);
|
|
|
|
}
|
|
|
|
|
2017-08-03 08:28:44 +02:00
|
|
|
static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
|
2017-02-27 15:29:21 +01:00
|
|
|
{
|
2017-08-09 07:38:56 +02:00
|
|
|
PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
|
2017-02-27 15:29:21 +01:00
|
|
|
|
2017-04-03 09:45:58 +02:00
|
|
|
return cpu ? ICP(cpu->intc) : NULL;
|
2017-02-27 15:29:21 +01:00
|
|
|
}
|
|
|
|
|
2017-02-27 15:29:32 +01:00
|
|
|
static void spapr_pic_print_info(InterruptStatsProvider *obj,
|
|
|
|
Monitor *mon)
|
|
|
|
{
|
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
2017-04-03 09:45:58 +02:00
|
|
|
CPUState *cs;
|
|
|
|
|
|
|
|
CPU_FOREACH(cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
2017-02-27 15:29:32 +01:00
|
|
|
|
2017-04-03 09:45:58 +02:00
|
|
|
icp_pic_print_info(ICP(cpu->intc), mon);
|
2017-02-27 15:29:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ics_pic_print_info(spapr->ics, mon);
|
|
|
|
}
|
|
|
|
|
2017-08-09 07:38:56 +02:00
|
|
|
int spapr_vcpu_id(PowerPCCPU *cpu)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(cpu);
|
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
return kvm_arch_vcpu_id(cs);
|
|
|
|
} else {
|
|
|
|
return cs->cpu_index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
PowerPCCPU *spapr_find_cpu(int vcpu_id)
|
|
|
|
{
|
|
|
|
CPUState *cs;
|
|
|
|
|
|
|
|
CPU_FOREACH(cs) {
|
|
|
|
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
|
|
|
|
|
|
|
if (spapr_vcpu_id(cpu) == vcpu_id) {
|
|
|
|
return cpu;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-03-17 03:40:26 +01:00
|
|
|
static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
MachineClass *mc = MACHINE_CLASS(oc);
|
2015-08-12 05:16:48 +02:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
|
2014-03-17 03:40:27 +01:00
|
|
|
FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
|
2014-08-20 14:16:36 +02:00
|
|
|
NMIClass *nc = NMI_CLASS(oc);
|
2015-09-01 03:22:35 +02:00
|
|
|
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
|
2016-10-28 13:06:21 +02:00
|
|
|
PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
|
2017-02-27 15:29:15 +01:00
|
|
|
XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
|
2017-02-27 15:29:32 +01:00
|
|
|
InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
|
2014-04-09 19:34:53 +02:00
|
|
|
|
2015-12-07 04:29:35 +01:00
|
|
|
mc->desc = "pSeries Logical Partition (PAPR compliant)";
|
2015-12-07 04:27:21 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We set up the default / latest behaviour here. The class_init
|
|
|
|
* functions for the specific versioned machine types can override
|
|
|
|
* these details for backwards compatibility
|
|
|
|
*/
|
2014-04-09 19:34:53 +02:00
|
|
|
mc->init = ppc_spapr_init;
|
|
|
|
mc->reset = ppc_spapr_reset;
|
|
|
|
mc->block_default_type = IF_SCSI;
|
2017-02-24 05:55:31 +01:00
|
|
|
mc->max_cpus = 1024;
|
2014-04-09 19:34:53 +02:00
|
|
|
mc->no_parallel = 1;
|
2015-03-18 13:30:44 +01:00
|
|
|
mc->default_boot_order = "";
|
2015-05-07 07:33:58 +02:00
|
|
|
mc->default_ram_size = 512 * M_BYTE;
|
2014-04-09 19:34:53 +02:00
|
|
|
mc->kvm_type = spapr_kvm_type;
|
2014-11-04 23:22:54 +01:00
|
|
|
mc->has_dynamic_sysbus = true;
|
2015-07-24 10:35:13 +02:00
|
|
|
mc->pci_allow_0_address = true;
|
2016-09-12 09:57:20 +02:00
|
|
|
mc->get_hotplug_handler = spapr_get_hotplug_handler;
|
2016-06-10 02:59:03 +02:00
|
|
|
hc->pre_plug = spapr_machine_device_pre_plug;
|
2015-09-01 03:22:35 +02:00
|
|
|
hc->plug = spapr_machine_device_plug;
|
2017-05-10 13:29:45 +02:00
|
|
|
mc->cpu_index_to_instance_props = spapr_cpu_index_to_props;
|
2017-06-01 12:53:28 +02:00
|
|
|
mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id;
|
2017-02-10 11:18:49 +01:00
|
|
|
mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids;
|
2016-10-27 04:20:30 +02:00
|
|
|
hc->unplug_request = spapr_machine_device_unplug_request;
|
2014-04-09 19:34:50 +02:00
|
|
|
|
2015-12-07 04:27:21 +01:00
|
|
|
smc->dr_lmb_enabled = true;
|
2016-10-05 09:44:51 +02:00
|
|
|
smc->tcg_default_cpu = "POWER8";
|
2017-02-10 11:20:57 +01:00
|
|
|
mc->has_hotpluggable_cpus = true;
|
2017-07-12 09:53:17 +02:00
|
|
|
smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
|
2014-03-17 03:40:27 +01:00
|
|
|
fwc->get_dev_path = spapr_get_fw_dev_path;
|
2014-08-20 14:16:36 +02:00
|
|
|
nc->nmi_monitor_handler = spapr_nmi;
|
spapr_pci: Delegate placement of PCI host bridges to machine type
The 'spapr-pci-host-bridge' represents the virtual PCI host bridge (PHB)
for a PAPR guest. Unlike on x86, it's routine on Power (both bare metal
and PAPR guests) to have numerous independent PHBs, each controlling a
separate PCI domain.
There are two ways of configuring the spapr-pci-host-bridge device: first
it can be done fully manually, specifying the locations and sizes of all
the IO windows. This gives the most control, but is very awkward with 6
mandatory parameters. Alternatively just an "index" can be specified
which essentially selects from an array of predefined PHB locations.
The PHB at index 0 is automatically created as the default PHB.
The current set of default locations causes some problems for guests with
large RAM (> 1 TiB) or PCI devices with very large BARs (e.g. big nVidia
GPGPU cards via VFIO). Obviously, for migration we can only change the
locations on a new machine type, however.
This is awkward, because the placement is currently decided within the
spapr-pci-host-bridge code, so it breaks abstraction to look inside the
machine type version.
So, this patch delegates the "default mode" PHB placement from the
spapr-pci-host-bridge device back to the machine type via a public method
in sPAPRMachineClass. It's still a bit ugly, but it's about the best we
can do.
For now, this just changes where the calculation is done. It doesn't
change the actual location of the host bridges, or any other behaviour.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
2016-10-13 01:26:09 +02:00
|
|
|
smc->phb_placement = spapr_phb_placement;
|
2016-10-28 13:06:21 +02:00
|
|
|
vhc->hypercall = emulate_spapr_hypercall;
|
2017-02-23 01:39:18 +01:00
|
|
|
vhc->hpt_mask = spapr_hpt_mask;
|
|
|
|
vhc->map_hptes = spapr_map_hptes;
|
|
|
|
vhc->unmap_hptes = spapr_unmap_hptes;
|
|
|
|
vhc->store_hpte = spapr_store_hpte;
|
2017-03-01 07:54:36 +01:00
|
|
|
vhc->get_patbe = spapr_get_patbe;
|
2017-09-25 13:00:02 +02:00
|
|
|
vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr;
|
2017-02-27 15:29:15 +01:00
|
|
|
xic->ics_get = spapr_ics_get;
|
|
|
|
xic->ics_resend = spapr_ics_resend;
|
2017-02-27 15:29:21 +01:00
|
|
|
xic->icp_get = spapr_icp_get;
|
2017-02-27 15:29:32 +01:00
|
|
|
ispc->print_info = spapr_pic_print_info;
|
2017-03-21 11:25:42 +01:00
|
|
|
/* Force NUMA node memory size to be a multiple of
|
|
|
|
* SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
|
|
|
|
* in which LMBs are represented and hot-added
|
|
|
|
*/
|
|
|
|
mc->numa_mem_align_shift = 28;
|
2014-03-17 03:40:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo spapr_machine_info = {
|
|
|
|
.name = TYPE_SPAPR_MACHINE,
|
|
|
|
.parent = TYPE_MACHINE,
|
2014-09-08 07:30:31 +02:00
|
|
|
.abstract = true,
|
2014-06-25 06:10:24 +02:00
|
|
|
.instance_size = sizeof(sPAPRMachineState),
|
2014-05-30 23:24:32 +02:00
|
|
|
.instance_init = spapr_machine_initfn,
|
2015-12-28 07:38:26 +01:00
|
|
|
.instance_finalize = spapr_machine_finalizefn,
|
2015-07-02 08:23:07 +02:00
|
|
|
.class_size = sizeof(sPAPRMachineClass),
|
2014-03-17 03:40:26 +01:00
|
|
|
.class_init = spapr_machine_class_init,
|
2014-03-17 03:40:27 +01:00
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ TYPE_FW_PATH_PROVIDER },
|
2014-08-20 14:16:36 +02:00
|
|
|
{ TYPE_NMI },
|
2015-09-01 03:22:35 +02:00
|
|
|
{ TYPE_HOTPLUG_HANDLER },
|
2016-10-28 13:06:21 +02:00
|
|
|
{ TYPE_PPC_VIRTUAL_HYPERVISOR },
|
2017-02-27 15:29:15 +01:00
|
|
|
{ TYPE_XICS_FABRIC },
|
2017-02-27 15:29:32 +01:00
|
|
|
{ TYPE_INTERRUPT_STATS_PROVIDER },
|
2014-03-17 03:40:27 +01:00
|
|
|
{ }
|
|
|
|
},
|
2014-03-17 03:40:26 +01:00
|
|
|
};
|
|
|
|
|
2015-12-07 04:25:50 +01:00
|
|
|
#define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
|
|
|
|
void *data) \
|
|
|
|
{ \
|
|
|
|
MachineClass *mc = MACHINE_CLASS(oc); \
|
|
|
|
spapr_machine_##suffix##_class_options(mc); \
|
2015-12-07 04:25:50 +01:00
|
|
|
if (latest) { \
|
|
|
|
mc->alias = "pseries"; \
|
|
|
|
mc->is_default = 1; \
|
|
|
|
} \
|
2015-12-07 04:23:20 +01:00
|
|
|
} \
|
|
|
|
static void spapr_machine_##suffix##_instance_init(Object *obj) \
|
|
|
|
{ \
|
|
|
|
MachineState *machine = MACHINE(obj); \
|
|
|
|
spapr_machine_##suffix##_instance_options(machine); \
|
|
|
|
} \
|
|
|
|
static const TypeInfo spapr_machine_##suffix##_info = { \
|
|
|
|
.name = MACHINE_TYPE_NAME("pseries-" verstr), \
|
|
|
|
.parent = TYPE_SPAPR_MACHINE, \
|
|
|
|
.class_init = spapr_machine_##suffix##_class_init, \
|
|
|
|
.instance_init = spapr_machine_##suffix##_instance_init, \
|
|
|
|
}; \
|
|
|
|
static void spapr_machine_register_##suffix(void) \
|
|
|
|
{ \
|
|
|
|
type_register(&spapr_machine_##suffix##_info); \
|
|
|
|
} \
|
2016-02-16 21:59:04 +01:00
|
|
|
type_init(spapr_machine_register_##suffix)
|
2015-12-07 04:23:20 +01:00
|
|
|
|
2017-07-25 20:01:12 +02:00
|
|
|
/*
|
|
|
|
* pseries-2.11
|
|
|
|
*/
|
|
|
|
static void spapr_machine_2_11_instance_options(MachineState *machine)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_11_class_options(MachineClass *mc)
|
|
|
|
{
|
|
|
|
/* Defaults for the latest behaviour inherited from the base class */
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_SPAPR_MACHINE(2_11, "2.11", true);
|
|
|
|
|
2017-03-07 01:02:28 +01:00
|
|
|
/*
|
|
|
|
* pseries-2.10
|
|
|
|
*/
|
2017-07-25 20:01:12 +02:00
|
|
|
#define SPAPR_COMPAT_2_10 \
|
|
|
|
HW_COMPAT_2_10 \
|
|
|
|
|
2017-03-07 01:02:28 +01:00
|
|
|
static void spapr_machine_2_10_instance_options(MachineState *machine)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_10_class_options(MachineClass *mc)
|
|
|
|
{
|
2017-07-25 20:01:12 +02:00
|
|
|
spapr_machine_2_11_class_options(mc);
|
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_10);
|
2017-03-07 01:02:28 +01:00
|
|
|
}
|
|
|
|
|
2017-07-25 20:01:12 +02:00
|
|
|
DEFINE_SPAPR_MACHINE(2_10, "2.10", false);
|
2017-03-07 01:02:28 +01:00
|
|
|
|
2016-12-08 06:39:18 +01:00
|
|
|
/*
|
|
|
|
* pseries-2.9
|
|
|
|
*/
|
2017-03-07 01:02:28 +01:00
|
|
|
#define SPAPR_COMPAT_2_9 \
|
ppc: Rework CPU compatibility testing across migration
Migrating between different CPU versions is a bit complicated for ppc.
A long time ago, we ensured identical CPU versions at either end by
checking the PVR had the same value. However, this breaks under KVM
HV, because we always have to use the host's PVR - it's not
virtualized. That would mean we couldn't migrate between hosts with
different PVRs, even if the CPUs are close enough to compatible in
practice (sometimes identical cores with different surrounding logic
have different PVRs, so this happens in practice quite often).
So, we removed the PVR check, but instead checked that several flags
indicating supported instructions matched. This turns out to be a bad
idea, because those instruction masks are not architected information, but
essentially a TCG implementation detail. So changes to qemu internal CPU
modelling can break migration - this happened between qemu-2.6 and
qemu-2.7. That was addressed by 146c11f1 "target-ppc: Allow eventual
removal of old migration mistakes".
Now, verification of CPU compatibility across a migration basically doesn't
happen. We simply ignore the PVR of the incoming migration, and hope the
cpu on the destination is close enough to work.
Now that we've cleaned up handling of processor compatibility modes
for pseries machine type, we can do better. For new machine types
(pseries-2.10+) We allow migration if:
* The source and destination PVRs are for the same type of CPU, as
determined by CPU class's pvr_match function
OR * When the source was in a compatibility mode, and the destination CPU
supports the same compatibility mode
For older machine types we retain the existing behaviour - current CAS
code will usually set a compat mode which would break backwards
migration if we made them use the new behaviour. [Fixed from an
earlier version by Greg Kurz].
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Tested-by: Andrea Bolognani <abologna@redhat.com>
2017-06-02 04:26:11 +02:00
|
|
|
HW_COMPAT_2_9 \
|
|
|
|
{ \
|
|
|
|
.driver = TYPE_POWERPC_CPU, \
|
|
|
|
.property = "pre-2.10-migration", \
|
|
|
|
.value = "on", \
|
|
|
|
}, \
|
2017-03-07 01:02:28 +01:00
|
|
|
|
2016-12-08 06:39:18 +01:00
|
|
|
static void spapr_machine_2_9_instance_options(MachineState *machine)
|
|
|
|
{
|
2017-03-07 01:02:28 +01:00
|
|
|
spapr_machine_2_10_instance_options(machine);
|
2016-12-08 06:39:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_9_class_options(MachineClass *mc)
|
|
|
|
{
|
2017-06-14 15:29:19 +02:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
|
|
|
|
2017-03-07 01:02:28 +01:00
|
|
|
spapr_machine_2_10_class_options(mc);
|
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_9);
|
2017-05-02 18:29:55 +02:00
|
|
|
mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram;
|
2017-06-14 15:29:19 +02:00
|
|
|
smc->pre_2_10_has_unused_icps = true;
|
2017-07-12 09:53:17 +02:00
|
|
|
smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED;
|
2016-12-08 06:39:18 +01:00
|
|
|
}
|
|
|
|
|
2017-03-07 01:02:28 +01:00
|
|
|
DEFINE_SPAPR_MACHINE(2_9, "2.9", false);
|
2016-12-08 06:39:18 +01:00
|
|
|
|
2016-09-28 06:31:55 +02:00
|
|
|
/*
|
|
|
|
* pseries-2.8
|
|
|
|
*/
|
2017-03-14 01:54:17 +01:00
|
|
|
#define SPAPR_COMPAT_2_8 \
|
|
|
|
HW_COMPAT_2_8 \
|
|
|
|
{ \
|
|
|
|
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
|
|
|
|
.property = "pcie-extended-configuration-space", \
|
|
|
|
.value = "off", \
|
|
|
|
},
|
2016-12-08 06:39:18 +01:00
|
|
|
|
2016-09-28 06:31:55 +02:00
|
|
|
static void spapr_machine_2_8_instance_options(MachineState *machine)
|
|
|
|
{
|
2016-12-08 06:39:18 +01:00
|
|
|
spapr_machine_2_9_instance_options(machine);
|
2016-09-28 06:31:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_8_class_options(MachineClass *mc)
|
|
|
|
{
|
2016-12-08 06:39:18 +01:00
|
|
|
spapr_machine_2_9_class_options(mc);
|
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_8);
|
2017-03-21 11:25:42 +01:00
|
|
|
mc->numa_mem_align_shift = 23;
|
2016-09-28 06:31:55 +02:00
|
|
|
}
|
|
|
|
|
2016-12-08 06:39:18 +01:00
|
|
|
DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
|
2016-09-28 06:31:55 +02:00
|
|
|
|
2016-06-03 07:49:42 +02:00
|
|
|
/*
|
|
|
|
* pseries-2.7
|
|
|
|
*/
|
2016-10-16 03:04:15 +02:00
|
|
|
#define SPAPR_COMPAT_2_7 \
|
|
|
|
HW_COMPAT_2_7 \
|
|
|
|
{ \
|
|
|
|
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
|
|
|
|
.property = "mem_win_size", \
|
|
|
|
.value = stringify(SPAPR_PCI_2_7_MMIO_WIN_SIZE),\
|
|
|
|
}, \
|
|
|
|
{ \
|
|
|
|
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
|
|
|
|
.property = "mem64_win_size", \
|
|
|
|
.value = "0", \
|
2016-11-21 06:29:30 +01:00
|
|
|
}, \
|
|
|
|
{ \
|
|
|
|
.driver = TYPE_POWERPC_CPU, \
|
|
|
|
.property = "pre-2.8-migration", \
|
|
|
|
.value = "on", \
|
2016-11-23 00:26:38 +01:00
|
|
|
}, \
|
|
|
|
{ \
|
|
|
|
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
|
|
|
|
.property = "pre-2.8-migration", \
|
|
|
|
.value = "on", \
|
2016-10-16 03:04:15 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
|
|
|
|
uint64_t *buid, hwaddr *pio,
|
|
|
|
hwaddr *mmio32, hwaddr *mmio64,
|
|
|
|
unsigned n_dma, uint32_t *liobns, Error **errp)
|
|
|
|
{
|
|
|
|
/* Legacy PHB placement for pseries-2.7 and earlier machine types */
|
|
|
|
const uint64_t base_buid = 0x800000020000000ULL;
|
|
|
|
const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
|
|
|
|
const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
|
|
|
|
const hwaddr pio_offset = 0x80000000; /* 2 GiB */
|
|
|
|
const uint32_t max_index = 255;
|
|
|
|
const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
|
|
|
|
|
|
|
|
uint64_t ram_top = MACHINE(spapr)->ram_size;
|
|
|
|
hwaddr phb0_base, phb_base;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Do we have hotpluggable memory? */
|
|
|
|
if (MACHINE(spapr)->maxram_size > ram_top) {
|
|
|
|
/* Can't just use maxram_size, because there may be an
|
|
|
|
* alignment gap between normal and hotpluggable memory
|
|
|
|
* regions */
|
|
|
|
ram_top = spapr->hotplug_memory.base +
|
|
|
|
memory_region_size(&spapr->hotplug_memory.mr);
|
|
|
|
}
|
|
|
|
|
|
|
|
phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
|
|
|
|
|
|
|
|
if (index > max_index) {
|
|
|
|
error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
|
|
|
|
max_index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*buid = base_buid + index;
|
|
|
|
for (i = 0; i < n_dma; ++i) {
|
|
|
|
liobns[i] = SPAPR_PCI_LIOBN(index, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
phb_base = phb0_base + index * phb_spacing;
|
|
|
|
*pio = phb_base + pio_offset;
|
|
|
|
*mmio32 = phb_base + mmio_offset;
|
|
|
|
/*
|
|
|
|
* We don't set the 64-bit MMIO window, relying on the PHB's
|
|
|
|
* fallback behaviour of automatically splitting a large "32-bit"
|
|
|
|
* window into contiguous 32-bit and 64-bit windows
|
|
|
|
*/
|
|
|
|
}
|
2016-09-28 06:31:55 +02:00
|
|
|
|
2016-06-03 07:49:42 +02:00
|
|
|
static void spapr_machine_2_7_instance_options(MachineState *machine)
|
|
|
|
{
|
spapr: add hotplug interrupt machine options
This adds machine options of the form:
-machine pseries,modern-hotplug-events=true
-machine pseries,modern-hotplug-events=false
If false, QEMU will force the use of "legacy" style hotplug events,
which are surfaced through EPOW events instead of a dedicated
hot plug event source, and lack certain features necessary, mainly,
for memory unplug support.
If true, QEMU will enable support for "modern" dedicated hot plug
event source. Note that we will still default to "legacy" style unless
the guest advertises support for the "modern" hotplug events via
ibm,client-architecture-support hcall during early boot.
For pseries-2.7 and earlier we default to false, for newer machine
types we default to true.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-27 04:20:27 +02:00
|
|
|
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
|
|
|
|
2016-10-13 01:13:53 +02:00
|
|
|
spapr_machine_2_8_instance_options(machine);
|
spapr: add hotplug interrupt machine options
This adds machine options of the form:
-machine pseries,modern-hotplug-events=true
-machine pseries,modern-hotplug-events=false
If false, QEMU will force the use of "legacy" style hotplug events,
which are surfaced through EPOW events instead of a dedicated
hot plug event source, and lack certain features necessary, mainly,
for memory unplug support.
If true, QEMU will enable support for "modern" dedicated hot plug
event source. Note that we will still default to "legacy" style unless
the guest advertises support for the "modern" hotplug events via
ibm,client-architecture-support hcall during early boot.
For pseries-2.7 and earlier we default to false, for newer machine
types we default to true.
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2016-10-27 04:20:27 +02:00
|
|
|
spapr->use_hotplug_event_source = false;
|
2016-06-03 07:49:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_7_class_options(MachineClass *mc)
|
|
|
|
{
|
2016-10-05 09:44:51 +02:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
|
|
|
|
2016-09-28 06:31:55 +02:00
|
|
|
spapr_machine_2_8_class_options(mc);
|
2016-10-05 09:44:51 +02:00
|
|
|
smc->tcg_default_cpu = "POWER7";
|
2016-09-28 06:31:55 +02:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_7);
|
2016-10-16 03:04:15 +02:00
|
|
|
smc->phb_placement = phb_placement_2_7;
|
2016-06-03 07:49:42 +02:00
|
|
|
}
|
|
|
|
|
2016-09-28 06:31:55 +02:00
|
|
|
DEFINE_SPAPR_MACHINE(2_7, "2.7", false);
|
2016-06-03 07:49:42 +02:00
|
|
|
|
2015-12-07 04:28:15 +01:00
|
|
|
/*
|
|
|
|
* pseries-2.6
|
|
|
|
*/
|
2016-06-03 07:49:42 +02:00
|
|
|
#define SPAPR_COMPAT_2_6 \
|
2016-07-04 05:33:07 +02:00
|
|
|
HW_COMPAT_2_6 \
|
|
|
|
{ \
|
|
|
|
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
|
|
|
|
.property = "ddw",\
|
|
|
|
.value = stringify(off),\
|
|
|
|
},
|
2016-06-03 07:49:42 +02:00
|
|
|
|
2015-12-07 04:28:15 +01:00
|
|
|
static void spapr_machine_2_6_instance_options(MachineState *machine)
|
|
|
|
{
|
2016-10-13 01:13:53 +02:00
|
|
|
spapr_machine_2_7_instance_options(machine);
|
2015-12-07 04:28:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_6_class_options(MachineClass *mc)
|
|
|
|
{
|
2016-06-03 07:49:42 +02:00
|
|
|
spapr_machine_2_7_class_options(mc);
|
2017-02-10 11:20:57 +01:00
|
|
|
mc->has_hotpluggable_cpus = false;
|
2016-06-03 07:49:42 +02:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6);
|
2015-12-07 04:28:15 +01:00
|
|
|
}
|
|
|
|
|
2016-06-03 07:49:42 +02:00
|
|
|
DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
|
2015-12-07 04:28:15 +01:00
|
|
|
|
2015-12-03 07:34:10 +01:00
|
|
|
/*
|
|
|
|
* pseries-2.5
|
|
|
|
*/
|
2015-12-07 04:28:15 +01:00
|
|
|
#define SPAPR_COMPAT_2_5 \
|
2016-03-21 17:25:24 +01:00
|
|
|
HW_COMPAT_2_5 \
|
|
|
|
{ \
|
|
|
|
.driver = "spapr-vlan", \
|
|
|
|
.property = "use-rx-buffer-pools", \
|
|
|
|
.value = "off", \
|
|
|
|
},
|
2015-12-07 04:28:15 +01:00
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_5_instance_options(MachineState *machine)
|
2015-12-03 07:34:10 +01:00
|
|
|
{
|
2016-10-13 01:13:53 +02:00
|
|
|
spapr_machine_2_6_instance_options(machine);
|
2015-12-07 04:23:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_machine_2_5_class_options(MachineClass *mc)
|
|
|
|
{
|
2015-12-09 13:34:13 +01:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
|
|
|
|
2015-12-07 04:28:15 +01:00
|
|
|
spapr_machine_2_6_class_options(mc);
|
2015-12-09 13:34:13 +01:00
|
|
|
smc->use_ohci_by_default = true;
|
2015-12-07 04:28:15 +01:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_5);
|
2015-12-03 07:34:10 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 04:28:15 +01:00
|
|
|
DEFINE_SPAPR_MACHINE(2_5, "2.5", false);
|
2015-12-03 07:34:10 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* pseries-2.4
|
|
|
|
*/
|
2015-10-16 12:25:53 +02:00
|
|
|
#define SPAPR_COMPAT_2_4 \
|
|
|
|
HW_COMPAT_2_4
|
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_4_instance_options(MachineState *machine)
|
2015-12-03 07:34:10 +01:00
|
|
|
{
|
2015-12-07 04:23:20 +01:00
|
|
|
spapr_machine_2_5_instance_options(machine);
|
|
|
|
}
|
2015-12-03 07:34:10 +01:00
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_4_class_options(MachineClass *mc)
|
|
|
|
{
|
2015-12-07 04:27:21 +01:00
|
|
|
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
|
|
|
|
|
|
|
spapr_machine_2_5_class_options(mc);
|
|
|
|
smc->dr_lmb_enabled = false;
|
2015-12-03 07:47:22 +01:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_4);
|
2015-12-03 07:34:10 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 04:25:50 +01:00
|
|
|
DEFINE_SPAPR_MACHINE(2_4, "2.4", false);
|
2015-12-03 07:34:10 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* pseries-2.3
|
|
|
|
*/
|
2015-05-14 20:53:05 +02:00
|
|
|
#define SPAPR_COMPAT_2_3 \
|
2015-05-07 07:33:52 +02:00
|
|
|
HW_COMPAT_2_3 \
|
|
|
|
{\
|
|
|
|
.driver = "spapr-pci-host-bridge",\
|
|
|
|
.property = "dynamic-reconfiguration",\
|
|
|
|
.value = "off",\
|
|
|
|
},
|
2015-05-14 20:53:05 +02:00
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_3_instance_options(MachineState *machine)
|
2015-04-23 08:21:37 +02:00
|
|
|
{
|
2015-12-07 04:23:20 +01:00
|
|
|
spapr_machine_2_4_instance_options(machine);
|
2015-04-23 08:21:37 +02:00
|
|
|
}
|
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_3_class_options(MachineClass *mc)
|
2014-06-25 06:08:45 +02:00
|
|
|
{
|
2015-12-07 04:27:21 +01:00
|
|
|
spapr_machine_2_4_class_options(mc);
|
2015-12-03 07:47:22 +01:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_3);
|
2014-06-25 06:08:45 +02:00
|
|
|
}
|
2015-12-07 04:25:50 +01:00
|
|
|
DEFINE_SPAPR_MACHINE(2_3, "2.3", false);
|
2014-06-25 06:08:45 +02:00
|
|
|
|
2015-12-03 07:34:10 +01:00
|
|
|
/*
|
|
|
|
* pseries-2.2
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define SPAPR_COMPAT_2_2 \
|
|
|
|
HW_COMPAT_2_2 \
|
|
|
|
{\
|
|
|
|
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
|
|
|
|
.property = "mem_win_size",\
|
|
|
|
.value = "0x20000000",\
|
|
|
|
},
|
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_2_instance_options(MachineState *machine)
|
2015-12-03 07:34:10 +01:00
|
|
|
{
|
2015-12-07 04:23:20 +01:00
|
|
|
spapr_machine_2_3_instance_options(machine);
|
2016-02-23 17:47:59 +01:00
|
|
|
machine->suppress_vmdesc = true;
|
2015-12-03 07:34:10 +01:00
|
|
|
}
|
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_2_class_options(MachineClass *mc)
|
2014-09-08 07:30:31 +02:00
|
|
|
{
|
2015-12-07 04:27:21 +01:00
|
|
|
spapr_machine_2_3_class_options(mc);
|
2015-12-03 07:47:22 +01:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_2);
|
2014-09-08 07:30:31 +02:00
|
|
|
}
|
2015-12-07 04:25:50 +01:00
|
|
|
DEFINE_SPAPR_MACHINE(2_2, "2.2", false);
|
2014-09-08 07:30:31 +02:00
|
|
|
|
2015-12-03 07:34:10 +01:00
|
|
|
/*
|
|
|
|
* pseries-2.1
|
|
|
|
*/
|
|
|
|
#define SPAPR_COMPAT_2_1 \
|
|
|
|
HW_COMPAT_2_1
|
2015-01-30 02:53:18 +01:00
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_1_instance_options(MachineState *machine)
|
2015-12-03 07:34:10 +01:00
|
|
|
{
|
2015-12-07 04:23:20 +01:00
|
|
|
spapr_machine_2_2_instance_options(machine);
|
2015-12-03 07:34:10 +01:00
|
|
|
}
|
2015-04-23 08:21:37 +02:00
|
|
|
|
2015-12-07 04:23:20 +01:00
|
|
|
static void spapr_machine_2_1_class_options(MachineClass *mc)
|
2015-04-23 08:21:37 +02:00
|
|
|
{
|
2015-12-07 04:27:21 +01:00
|
|
|
spapr_machine_2_2_class_options(mc);
|
2015-12-03 07:47:22 +01:00
|
|
|
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_1);
|
2015-04-23 08:21:37 +02:00
|
|
|
}
|
2015-12-07 04:25:50 +01:00
|
|
|
DEFINE_SPAPR_MACHINE(2_1, "2.1", false);
|
2015-08-12 05:15:56 +02:00
|
|
|
|
2014-03-17 03:40:26 +01:00
|
|
|
static void spapr_machine_register_types(void)
|
2011-04-01 06:15:20 +02:00
|
|
|
{
|
2014-03-17 03:40:26 +01:00
|
|
|
type_register_static(&spapr_machine_info);
|
2011-04-01 06:15:20 +02:00
|
|
|
}
|
|
|
|
|
2014-03-17 03:40:26 +01:00
|
|
|
type_init(spapr_machine_register_types)
|