ppc patch queue 2020-10-28

Here's the next pull request for ppc and spapr related patches, which
 should be the last things for soft freeze.  Includes:
 
  * Numerous error handling cleanups from Greg Kurz
  * Cleanups to cpu realization and hotplug handling from Greg Kurz
  * A handful of other small fixes and cleanups
 
 This does include a change to pc_dimm_plug() that isn't in my normal
 areas of concern.  That's there as a a prerequisite for ppc specific
 changes, and has an ack from Igor.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAl+YKwEACgkQbDjKyiDZ
 s5LNmg/+J/nKfe494WPSDhOgpQNz4NJD1GZUOsADwuvDPQWmMdl4dY3MKd+ipN1X
 HYmor7t+A5cvtm5zS0sHEZR/lg9qz3Oqc1nWpR+IJ7JmZMlmPbCKc6Iah4R1rQgA
 N17C2LSdBXGN84cmi3B0IbMPxpogFM7SqiSG7T4QIy7nZ/gm/aYwejVs+lLA7vjl
 hZmRnX76GE8fU0SzzmvIKTYwIqPbaZELP4LDmzKczdsWKvwOfeyUEp2loZ+xtyzc
 s1ecJTaTRcgaiB5Ylu10Bv5/L07P4YxV2lMhd8WJqq6Ki/fdUIeVyP6lnZyTQVkj
 YxtfrFsObVjNkHl8JtXG07QoQkGvkJLXGNyoPoCQy8ZEahYjjwP88UltAhENCllY
 uDtR6fLj0YnpHu9gW1onGpkxjI93QaiWA0ePoF/z1gQ8E6G3vwSmI4h5FC4UHNv5
 NNtHIJNgFj1icf8C8lyh5DBNqYNt8FFTS2iAmtl7eDbWcaDj1+fZWXQHqJS1CUI1
 P8d2TNJXEyJ7fxfzk56LovdwvLs4/fLb4Lu5loi5CC4JPaYiwC7dEbhRPsxq8F++
 l9gwfnnKo1WYkx8Y3PZEpXcoypCOGQToVAi519lx8YfRYeFTIXrR8vqOAfpeQpXY
 9tktUFMgFZDoJl/Jc81enMjWy+mnmcIPfFR12vZ2LIR3X0tHE/g=
 =igPS
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-5.2-20201028' into staging

ppc patch queue 2020-10-28

Here's the next pull request for ppc and spapr related patches, which
should be the last things for soft freeze.  Includes:

 * Numerous error handling cleanups from Greg Kurz
 * Cleanups to cpu realization and hotplug handling from Greg Kurz
 * A handful of other small fixes and cleanups

This does include a change to pc_dimm_plug() that isn't in my normal
areas of concern.  That's there as a a prerequisite for ppc specific
changes, and has an ack from Igor.

# gpg: Signature made Tue 27 Oct 2020 14:13:21 GMT
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-5.2-20201028:
  ppc/: fix some comment spelling errors
  spapr: Improve spapr_reallocate_hpt() error reporting
  target/ppc: Fix kvmppc_load_htab_chunk() error reporting
  spapr: Use error_append_hint() in spapr_reallocate_hpt()
  spapr: Simplify error handling in spapr_memory_plug()
  spapr: Pass &error_abort when getting some PC DIMM properties
  spapr: Use appropriate getter for PC_DIMM_SLOT_PROP
  spapr: Use appropriate getter for PC_DIMM_ADDR_PROP
  pc-dimm: Drop @errp argument of pc_dimm_plug()
  spapr: Simplify spapr_cpu_core_realize() and spapr_cpu_core_unrealize()
  spapr: Make spapr_cpu_core_unrealize() idempotent
  spapr: Drop spapr_delete_vcpu() unused argument
  spapr: Unrealize vCPUs with qdev_unrealize()
  spapr: Fix leak of CPU machine specific data
  spapr: Move spapr_create_nvdimm_dr_connectors() to core machine code
  hw/net: move allocation to the heap due to very large stack frame
  ppc/spapr: re-assert IRQs during event-scan if there are pending
  spapr: Clarify why DR connectors aren't user creatable

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-10-29 14:30:58 +00:00
commit a19d4bc452
23 changed files with 131 additions and 141 deletions

View File

@ -9,6 +9,10 @@ void cpu_resume(CPUState *cpu)
{
}
void cpu_remove_sync(CPUState *cpu)
{
}
void qemu_init_vcpu(CPUState *cpu)
{
}

View File

@ -2261,12 +2261,8 @@ static void virt_memory_plug(HotplugHandler *hotplug_dev,
VirtMachineState *vms = VIRT_MACHINE(hotplug_dev);
MachineState *ms = MACHINE(hotplug_dev);
bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
Error *local_err = NULL;
pc_dimm_plug(PC_DIMM(dev), MACHINE(vms), &local_err);
if (local_err) {
goto out;
}
pc_dimm_plug(PC_DIMM(dev), MACHINE(vms));
if (is_nvdimm) {
nvdimm_plug(ms->nvdimms_state);
@ -2274,9 +2270,6 @@ static void virt_memory_plug(HotplugHandler *hotplug_dev,
hotplug_handler_plug(HOTPLUG_HANDLER(vms->acpi_dev),
dev, &error_abort);
out:
error_propagate(errp, local_err);
}
static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,

View File

@ -1265,24 +1265,18 @@ static void pc_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
static void pc_memory_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
Error *local_err = NULL;
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
MachineState *ms = MACHINE(hotplug_dev);
bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
pc_dimm_plug(PC_DIMM(dev), MACHINE(pcms), &local_err);
if (local_err) {
goto out;
}
pc_dimm_plug(PC_DIMM(dev), MACHINE(pcms));
if (is_nvdimm) {
nvdimm_plug(ms->nvdimms_state);
}
hotplug_handler_plug(x86ms->acpi_dev, dev, &error_abort);
out:
error_propagate(errp, local_err);
}
static void pc_memory_unplug_request(HotplugHandler *hotplug_dev,

View File

@ -64,7 +64,7 @@ void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine,
errp);
}
void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine, Error **errp)
void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine)
{
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm,

View File

@ -688,7 +688,8 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
unsigned total_len;
uint8_t *lbuf, *p;
uint8_t *p;
g_autofree uint8_t *lbuf = NULL;
int i, nbufs;
int ret;
@ -729,7 +730,7 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
return H_RESOURCE;
}
lbuf = alloca(total_len);
lbuf = g_malloc(total_len);
p = lbuf;
for (i = 0; i < nbufs; i++) {
ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]),

View File

@ -1483,9 +1483,9 @@ void spapr_free_hpt(SpaprMachineState *spapr)
close_htab_fd(spapr);
}
void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
Error **errp)
int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp)
{
ERRP_GUARD();
long rc;
/* Clean up any HPT info from a previous boot */
@ -1495,22 +1495,23 @@ void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
if (rc == -EOPNOTSUPP) {
error_setg(errp, "HPT not supported in nested guests");
return;
return -EOPNOTSUPP;
}
if (rc < 0) {
/* kernel-side HPT needed, but couldn't allocate one */
error_setg_errno(errp, errno,
"Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d",
shift);
/* This is almost certainly fatal, but if the caller really
* wants to carry on with shift == 0, it's welcome to try */
error_append_hint(errp, "Try smaller maxmem?\n");
return -errno;
} else if (rc > 0) {
/* kernel-side HPT allocated */
if (rc != shift) {
error_setg(errp,
"Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
"Requested order %d HPT, but kernel allocated order %ld",
shift, rc);
error_append_hint(errp, "Try smaller maxmem?\n");
return -ENOSPC;
}
spapr->htab_shift = shift;
@ -1524,7 +1525,7 @@ void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
if (!spapr->htab) {
error_setg_errno(errp, errno,
"Could not allocate HPT of order %d", shift);
return;
return -ENOMEM;
}
memset(spapr->htab, 0, size);
@ -1537,6 +1538,7 @@ void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
/* We're setting up a hash table, so that means we're not radix */
spapr->patb_entry = 0;
spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT);
return 0;
}
void spapr_setup_hpt(SpaprMachineState *spapr)
@ -2290,11 +2292,13 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
}
if (section_hdr) {
int ret;
/* First section gives the htab size */
spapr_reallocate_hpt(spapr, section_hdr, &local_err);
if (local_err) {
ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err);
if (ret < 0) {
error_report_err(local_err);
return -EINVAL;
return ret;
}
return 0;
}
@ -2345,8 +2349,10 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
assert(fd >= 0);
rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid,
&local_err);
if (rc < 0) {
error_report_err(local_err);
return rc;
}
}
@ -2641,6 +2647,16 @@ static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp)
return rma_size;
}
static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
int i;
for (i = 0; i < machine->ram_slots; i++) {
spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
}
}
/* pSeries LPAR / sPAPR hardware init */
static void spapr_machine_init(MachineState *machine)
{
@ -3372,7 +3388,7 @@ int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
return 0;
}
static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
static bool spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
bool dedicated_hp_event_source, Error **errp)
{
SpaprDrc *drc;
@ -3393,7 +3409,7 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
addr / SPAPR_MEMORY_BLOCK_SIZE);
spapr_drc_detach(drc);
}
return;
return false;
}
if (!hotplugged) {
spapr_drc_reset(drc);
@ -3415,52 +3431,43 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
nr_lmbs);
}
}
return true;
}
static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp)
{
Error *local_err = NULL;
SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev);
PCDIMMDevice *dimm = PC_DIMM(dev);
uint64_t size, addr, slot;
uint64_t size, addr;
int64_t slot;
bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
pc_dimm_plug(dimm, MACHINE(ms), &local_err);
if (local_err) {
goto out;
}
pc_dimm_plug(dimm, MACHINE(ms));
if (!is_nvdimm) {
addr = object_property_get_uint(OBJECT(dimm),
PC_DIMM_ADDR_PROP, &local_err);
if (local_err) {
PC_DIMM_ADDR_PROP, &error_abort);
if (!spapr_add_lmbs(dev, addr, size,
spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT), errp)) {
goto out_unplug;
}
spapr_add_lmbs(dev, addr, size,
spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
&local_err);
} else {
slot = object_property_get_uint(OBJECT(dimm),
PC_DIMM_SLOT_PROP, &local_err);
if (local_err) {
slot = object_property_get_int(OBJECT(dimm),
PC_DIMM_SLOT_PROP, &error_abort);
/* We should have valid slot number at this point */
g_assert(slot >= 0);
if (!spapr_add_nvdimm(dev, slot, errp)) {
goto out_unplug;
}
spapr_add_nvdimm(dev, slot, &local_err);
}
if (local_err) {
goto out_unplug;
}
return;
out_unplug:
pc_dimm_unplug(dimm, MACHINE(ms));
out:
error_propagate(errp, local_err);
}
static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
@ -3565,8 +3572,8 @@ static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms,
uint64_t addr_start, addr;
int i;
addr_start = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP,
&error_abort);
addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
&error_abort);
addr = addr_start;
for (i = 0; i < nr_lmbs; i++) {
@ -3624,7 +3631,6 @@ static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
Error *local_err = NULL;
PCDIMMDevice *dimm = PC_DIMM(dev);
uint32_t nr_lmbs;
uint64_t size, addr_start, addr;
@ -3640,11 +3646,7 @@ static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP,
&local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
&error_abort);
/*
* An existing pending dimm state for this DIMM means that there is an

View File

@ -187,8 +187,7 @@ static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc)
vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data);
}
spapr_irq_cpu_intc_destroy(SPAPR_MACHINE(qdev_get_machine()), cpu);
cpu_remove_sync(CPU(cpu));
object_unparent(OBJECT(cpu));
qdev_unrealize(DEVICE(cpu));
}
/*
@ -213,18 +212,37 @@ static void spapr_cpu_core_reset_handler(void *opaque)
spapr_cpu_core_reset(opaque);
}
static void spapr_delete_vcpu(PowerPCCPU *cpu)
{
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
cpu->machine_data = NULL;
g_free(spapr_cpu);
object_unparent(OBJECT(cpu));
}
static void spapr_cpu_core_unrealize(DeviceState *dev)
{
SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
CPUCore *cc = CPU_CORE(dev);
int i;
qemu_unregister_reset(spapr_cpu_core_reset_handler, sc);
for (i = 0; i < cc->nr_threads; i++) {
spapr_unrealize_vcpu(sc->threads[i], sc);
if (sc->threads[i]) {
/*
* Since this we can get here from the error path of
* spapr_cpu_core_realize(), make sure we only unrealize
* vCPUs that have already been realized.
*/
if (object_property_get_bool(OBJECT(sc->threads[i]), "realized",
&error_abort)) {
spapr_unrealize_vcpu(sc->threads[i], sc);
}
spapr_delete_vcpu(sc->threads[i]);
}
}
g_free(sc->threads);
qemu_unregister_reset(spapr_cpu_core_reset_handler, sc);
}
static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
@ -244,7 +262,7 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
kvmppc_set_papr(cpu);
if (spapr_irq_cpu_intc_create(spapr, cpu, errp) < 0) {
cpu_remove_sync(CPU(cpu));
qdev_unrealize(DEVICE(cpu));
return false;
}
@ -294,15 +312,6 @@ err:
return NULL;
}
static void spapr_delete_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc)
{
SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
cpu->machine_data = NULL;
g_free(spapr_cpu);
object_unparent(OBJECT(cpu));
}
static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
{
/* We don't use SPAPR_MACHINE() in order to exit gracefully if the user
@ -313,39 +322,23 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
TYPE_SPAPR_MACHINE);
SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
CPUCore *cc = CPU_CORE(OBJECT(dev));
int i, j;
int i;
if (!spapr) {
error_setg(errp, TYPE_SPAPR_CPU_CORE " needs a pseries machine");
return;
}
sc->threads = g_new(PowerPCCPU *, cc->nr_threads);
qemu_register_reset(spapr_cpu_core_reset_handler, sc);
sc->threads = g_new0(PowerPCCPU *, cc->nr_threads);
for (i = 0; i < cc->nr_threads; i++) {
sc->threads[i] = spapr_create_vcpu(sc, i, errp);
if (!sc->threads[i]) {
goto err;
if (!sc->threads[i] ||
!spapr_realize_vcpu(sc->threads[i], spapr, sc, errp)) {
spapr_cpu_core_unrealize(dev);
return;
}
}
for (j = 0; j < cc->nr_threads; j++) {
if (!spapr_realize_vcpu(sc->threads[j], spapr, sc, errp)) {
goto err_unrealize;
}
}
qemu_register_reset(spapr_cpu_core_reset_handler, sc);
return;
err_unrealize:
while (--j >= 0) {
spapr_unrealize_vcpu(sc->threads[j], sc);
}
err:
while (--i >= 0) {
spapr_delete_vcpu(sc->threads[i], sc);
}
g_free(sc->threads);
}
static Property spapr_cpu_core_properties[] = {

View File

@ -586,7 +586,8 @@ static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
dk->realize = realize;
dk->unrealize = unrealize;
/*
* Reason: it crashes FIXME find and document the real reason
* Reason: DR connector needs to be wired to either the machine or to a
* PHB in spapr_dr_connector_new().
*/
dk->user_creatable = false;
}

View File

@ -1000,10 +1000,22 @@ static void event_scan(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong args,
uint32_t nret, target_ulong rets)
{
int i;
if (nargs != 4 || nret != 1) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
for (i = 0; i < EVENT_CLASS_MAX; i++) {
if (rtas_event_log_contains(EVENT_CLASS_MASK(i))) {
const SpaprEventSource *source =
spapr_event_sources_get_source(spapr->event_sources, i);
g_assert(source->enabled);
qemu_irq_pulse(spapr_qirq(spapr, source->irq));
}
}
rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
}

View File

@ -89,7 +89,7 @@ bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
}
void spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp)
bool spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp)
{
SpaprDrc *drc;
bool hotplugged = spapr_drc_hotplugged(dev);
@ -98,25 +98,15 @@ void spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp)
g_assert(drc);
if (!spapr_drc_attach(drc, dev, errp)) {
return;
return false;
}
if (hotplugged) {
spapr_hotplug_req_add_by_index(drc);
}
return true;
}
void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
int i;
for (i = 0; i < machine->ram_slots; i++) {
spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i);
}
}
static int spapr_dt_nvdimm(SpaprMachineState *spapr, void *fdt,
int parent_offset, NVDIMMDevice *nvdimm)
{

View File

@ -72,6 +72,6 @@ struct PCDIMMDeviceClass {
void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine,
const uint64_t *legacy_align, Error **errp);
void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine, Error **errp);
void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine);
void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine);
#endif

View File

@ -846,8 +846,7 @@ void spapr_hotplug_req_add_by_count_indexed(SpaprDrcType drc_type,
void spapr_hotplug_req_remove_by_count_indexed(SpaprDrcType drc_type,
uint32_t count, uint32_t index);
int spapr_hpt_shift_for_ramsize(uint64_t ramsize);
void spapr_reallocate_hpt(SpaprMachineState *spapr, int shift,
Error **errp);
int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp);
void spapr_clear_pending_events(SpaprMachineState *spapr);
void spapr_clear_pending_hotplug_events(SpaprMachineState *spapr);
int spapr_max_server_number(SpaprMachineState *spapr);

View File

@ -30,7 +30,6 @@ int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt);
bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
uint64_t size, Error **errp);
void spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp);
void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr);
bool spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp);
#endif

View File

@ -615,7 +615,7 @@ enum {
#define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */
#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */
#define FPSCR_OE 6 /* Floating-point overflow exception enable */
#define FPSCR_UE 5 /* Floating-point undeflow exception enable */
#define FPSCR_UE 5 /* Floating-point underflow exception enable */
#define FPSCR_ZE 4 /* Floating-point zero divide exception enable */
#define FPSCR_XE 3 /* Floating-point inexact exception enable */
#define FPSCR_NI 2 /* Floating-point non-IEEE mode */
@ -2331,13 +2331,13 @@ enum {
/* Internal hardware exception sources */
PPC_INTERRUPT_DECR, /* Decrementer exception */
PPC_INTERRUPT_HDECR, /* Hypervisor decrementer exception */
PPC_INTERRUPT_PIT, /* Programmable inteval timer interrupt */
PPC_INTERRUPT_PIT, /* Programmable interval timer interrupt */
PPC_INTERRUPT_FIT, /* Fixed interval timer interrupt */
PPC_INTERRUPT_WDT, /* Watchdog timer interrupt */
PPC_INTERRUPT_CDOORBELL, /* Critical doorbell interrupt */
PPC_INTERRUPT_DOORBELL, /* Doorbell interrupt */
PPC_INTERRUPT_PERFM, /* Performance monitor interrupt */
PPC_INTERRUPT_HMI, /* Hypervisor Maintainance interrupt */
PPC_INTERRUPT_HMI, /* Hypervisor Maintenance interrupt */
PPC_INTERRUPT_HDOORBELL, /* Hypervisor Doorbell interrupt */
PPC_INTERRUPT_HVIRT, /* Hypervisor virtualization interrupt */
};

View File

@ -231,7 +231,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
/*
* Exception targetting modifiers
* Exception targeting modifiers
*
* LPES0 is supported on POWER7/8/9
* LPES1 is not supported (old iSeries mode)
@ -1015,7 +1015,7 @@ static void ppc_hw_interrupt(CPUPPCState *env)
* This means we will incorrectly execute past the power management
* instruction instead of triggering a reset.
*
* It generally means a discrepancy between the wakup conditions in the
* It generally means a discrepancy between the wakeup conditions in the
* processor has_work implementation and the logic in this function.
*/
cpu_abort(env_cpu(env),
@ -1191,7 +1191,7 @@ void helper_rfi(CPUPPCState *env)
void helper_rfid(CPUPPCState *env)
{
/*
* The architeture defines a number of rules for which bits can
* The architecture defines a number of rules for which bits can
* change but in practice, we handle this in hreg_store_msr()
* which will be called by do_rfi(), so there is no need to filter
* here

View File

@ -1804,7 +1804,7 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
/*
* VSX_ADD_SUB - VSX floating point add/subract
* VSX_ADD_SUB - VSX floating point add/subtract
* name - instruction mnemonic
* op - operation (add or sub)
* nels - number of elements (1, 2 or 4)

View File

@ -1,5 +1,5 @@
/*
* PowerPC interal definitions for qemu.
* PowerPC internal definitions for qemu.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public

View File

@ -487,7 +487,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
/*
* KVM-HV has transactional memory on POWER8 also without
* the KVM_CAP_PPC_HTM extension, so enable it here
* instead as long as it's availble to userspace on the
* instead as long as it's available to userspace on the
* host.
*/
if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
@ -2683,7 +2683,7 @@ int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
}
int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
uint16_t n_valid, uint16_t n_invalid)
uint16_t n_valid, uint16_t n_invalid, Error **errp)
{
struct kvm_get_htab_header *buf;
size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
@ -2698,14 +2698,13 @@ int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
rc = write(fd, buf, chunksize);
if (rc < 0) {
fprintf(stderr, "Error writing KVM hash table: %s\n",
strerror(errno));
return rc;
error_setg_errno(errp, errno, "Error writing the KVM hash table");
return -errno;
}
if (rc != chunksize) {
/* We should never get a short write on a single chunk */
fprintf(stderr, "Short write, restoring KVM hash table\n");
return -1;
error_setg(errp, "Short write while restoring the KVM hash table");
return -ENOSPC;
}
return 0;
}

View File

@ -56,7 +56,7 @@ int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function);
int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp);
int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns);
int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
uint16_t n_valid, uint16_t n_invalid);
uint16_t n_valid, uint16_t n_invalid, Error **errp);
void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n);
void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1);
bool kvmppc_has_cap_fixup_hcalls(void);
@ -316,7 +316,8 @@ static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize,
}
static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
uint16_t n_valid, uint16_t n_invalid)
uint16_t n_valid, uint16_t n_invalid,
Error **errp)
{
abort();
}

View File

@ -337,7 +337,7 @@ static int cpu_post_load(void *opaque, int version_id)
/*
* If we're operating in compat mode, we should be ok as long as
* the destination supports the same compatiblity mode.
* the destination supports the same compatibility mode.
*
* Otherwise, however, we require that the destination has exactly
* the same CPU model as the source.

View File

@ -883,7 +883,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
/*
* Note on LPCR usage: 970 uses HID4, but our special variant of
* store_spr copies relevant fields into env->spr[SPR_LPCR].
* Similarily we filter unimplemented bits when storing into LPCR
* Similarly we filter unimplemented bits when storing into LPCR
* depending on the MMU version. This code can thus just use the
* LPCR "as-is".
*/

View File

@ -179,7 +179,7 @@ static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
}
/* Compute access rights */
access = pp_check(ctx->key, pp, ctx->nx);
/* Keep the matching PTE informations */
/* Keep the matching PTE information */
ctx->raddr = pte1;
ctx->prot = access;
ret = check_prot(ctx->prot, rw, type);
@ -2176,7 +2176,7 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
env->sr[srnum] = value;
/*
* Invalidating 256MB of virtual memory in 4kB pages is way
* longer than flusing the whole TLB.
* longer than flushing the whole TLB.
*/
#if !defined(FLUSH_ALL_TLBS) && 0
{

View File

@ -792,7 +792,7 @@ static void gen_spr_generic(CPUPPCState *env)
&spr_read_xer, &spr_write_xer,
&spr_read_xer, &spr_write_xer,
0x00000000);
/* Branch contol */
/* Branch control */
spr_register(env, SPR_LR, "LR",
&spr_read_lr, &spr_write_lr,
&spr_read_lr, &spr_write_lr,
@ -10328,6 +10328,8 @@ static void ppc_cpu_unrealize(DeviceState *dev)
pcc->parent_unrealize(dev);
cpu_remove_sync(CPU(cpu));
for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
if (cpu->opcodes[i] == &invalid_handler) {
continue;