ppc patch queue 2017-07-11

* Several minor cleanups from Greg Kurz
   * Fix for migration of pseries-2.7 and earlier machine types
   * More reworking of the DRC hotplug code, fixing several problems
     though there are still more to go
   * Fixes for CPU family / alias handling on POWER9
   * Preliminary patches for POWER9 XIVE (new interrupt controller)
     support
   * Assorted other fixes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJZZFWEAAoJEGw4ysog2bOSxgAQAI85Vv8RuK1mgN0w0aIguP09
 JIM+iZ3zJwSFM3A/D8CnWxMGEQkjkVfKWT8cB97v5vPGTu21WD2hdQ26ZrcjC8Do
 Y5sPuCGRRSZvz+tnz17HU2aZMQwteNNgdes9MGr61kdVUk+1uvcyqTdhqxka5rF7
 SYcIEf95+Fcu00+bhwGaGg0ZXHer4rSTjDXbT3CcxT64sgQW8X36SceFBkFH0P40
 tX1bn9gdQgBNOT11O0MNeq6ewxHhSSusTwyYXpHTvK6p0EXPqfm+vM9dQSmXeKsk
 T7/yDmKplutVnWlfbxrdG+wp+ObE1h7KljGdWLx4jIX58dHVvjDJ+kZ+OJbcb6Xj
 oEV947tYkZaDC7q7TkwXjYltbq+A6HFFKEwxJ59L4zYgVYVkTUMRJ3Apl66sq5a1
 SHEBXAA5SDq8jxdKKqvwzh4ZtkkxIelOO8lTVjOAg8ffcNfEwbJOuom2h0kgzOgz
 Sn2PxC/jwk2RZZ4T+qe1KNpVbV3RYpGanMXYDMFUnTRw2RAU2io0R2bBwOlm/0I7
 ZUrjD2xCFrMPuthxr5/5/w0P1StALVN50S5YqWvDuQYIbMYhSjSh3tDgAHVrqL4W
 Yc1Zr5X9X91qgUjAkejBuirvWLvgofiw8jlqAZ6K2zTUcvtn0KdQGe7eiK+wostA
 PhLW9tYrkpt/BmzEMi1X
 =8Wy2
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.10-20170711' into staging

ppc patch queue 2017-07-11

  * Several minor cleanups from Greg Kurz
  * Fix for migration of pseries-2.7 and earlier machine types
  * More reworking of the DRC hotplug code, fixing several problems
    though there are still more to go
  * Fixes for CPU family / alias handling on POWER9
  * Preliminary patches for POWER9 XIVE (new interrupt controller)
    support
  * Assorted other fixes

# gpg: Signature made Tue 11 Jul 2017 05:35:16 BST
# gpg:                using RSA key 0x6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>"
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>"
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>"
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>"
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-2.10-20170711:
  spapr: populate device tree depending on XIVE_EXPLOIT option
  spapr: introduce the XIVE_EXPLOIT option in CAS
  ppc/kvm: have the "family" CPU alias to point to TYPE_HOST_POWERPC_CPU
  spapr: Only report host/guest IOMMU page size mismatches on KVM
  spapr: fix memory hotplug error path
  target/ppc: Add debug function for radix mmu translation
  target/ppc: Refactor tcg radix mmu code
  spapr: Use unplug_request for PCI hot unplug
  spapr: Remove unnecessary differences between hotplug and coldplug paths
  spapr: Add DRC release method
  spapr: Uniform DRC reset paths
  spapr: Leave DR-indicator management to the guest
  target-ppc: SPR_BOOKE_ESR not set on FP exceptions
  spapr: fix migration to pseries machine < 2.8
  spapr: fix bogus function name in comment
  spapr: refresh "platform-specific" hcalls comment
  spapr: make spapr_populate_hotplug_cpu_dt() static

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-07-11 16:34:09 +01:00
commit aa916e409c
11 changed files with 145 additions and 122 deletions

View File

@ -778,6 +778,11 @@ static int spapr_dt_cas_updates(sPAPRMachineState *spapr, void *fdt,
}
}
/* /interrupt controller */
if (!spapr_ovec_test(ov5_updates, OV5_XIVE_EXPLOIT)) {
spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP);
}
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0) {
offset = fdt_add_subnode(fdt, 0, "chosen");
@ -801,7 +806,7 @@ int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
size -= sizeof(hdr);
/* Create sceleton */
/* Create skeleton */
fdt_skel = g_malloc0(size);
_FDT((fdt_create(fdt_skel, size)));
_FDT((fdt_begin_node(fdt_skel, "")));
@ -910,7 +915,8 @@ static void spapr_dt_ov5_platform_support(void *fdt, int chosen)
{
PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
char val[2 * 3] = {
char val[2 * 4] = {
23, 0x00, /* Xive mode: 0 = legacy (as in ISA 2.7), 1 = Exploitation */
24, 0x00, /* Hash/Radix, filled in below. */
25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
26, 0x40, /* Radix options: GTSE == yes. */
@ -918,19 +924,19 @@ static void spapr_dt_ov5_platform_support(void *fdt, int chosen)
if (kvm_enabled()) {
if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
val[1] = 0x80; /* OV5_MMU_BOTH */
val[3] = 0x80; /* OV5_MMU_BOTH */
} else if (kvmppc_has_cap_mmu_radix()) {
val[1] = 0x40; /* OV5_MMU_RADIX_300 */
val[3] = 0x40; /* OV5_MMU_RADIX_300 */
} else {
val[1] = 0x00; /* Hash */
val[3] = 0x00; /* Hash */
}
} else {
if (first_ppc_cpu->env.mmu_model & POWERPC_MMU_V3) {
/* V3 MMU supports both hash and radix (with dynamic switching) */
val[1] = 0xC0;
val[3] = 0xC0;
} else {
/* Otherwise we can only do hash */
val[1] = 0x00;
val[3] = 0x00;
}
}
_FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
@ -1068,9 +1074,6 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
_FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
/* /interrupt controller */
spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP);
ret = spapr_populate_memory(spapr, fdt);
if (ret < 0) {
error_report("couldn't setup memory nodes in fdt");
@ -1967,24 +1970,6 @@ static void spapr_boot_set(void *opaque, const char *boot_device,
machine->boot_order = g_strdup(boot_device);
}
/*
* Reset routine for LMB DR devices.
*
* Unlike PCI DR devices, LMB DR devices explicitly register this reset
* routine. Reset for PCI DR devices will be handled by PHB reset routine
* when it walks all its children devices. LMB devices reset occurs
* as part of spapr_ppc_reset().
*/
static void spapr_drc_reset(void *opaque)
{
sPAPRDRConnector *drc = opaque;
DeviceState *d = DEVICE(drc);
if (d) {
device_reset(d);
}
}
static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
@ -1993,13 +1978,11 @@ static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
int i;
for (i = 0; i < nr_lmbs; i++) {
sPAPRDRConnector *drc;
uint64_t addr;
addr = i * lmb_size + spapr->hotplug_memory.base;
drc = spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
addr/lmb_size);
qemu_register_reset(spapr_drc_reset, drc);
spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
addr / lmb_size);
}
}
@ -2093,11 +2076,8 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
int core_id = i * smp_threads;
if (mc->has_hotpluggable_cpus) {
sPAPRDRConnector *drc =
spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
(core_id / smp_threads) * smt);
qemu_register_reset(spapr_drc_reset, drc);
spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
(core_id / smp_threads) * smt);
}
if (i < boot_cores_nr) {
@ -2624,6 +2604,7 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
int i, fdt_offset, fdt_size;
void *fdt;
uint64_t addr = addr_start;
Error *local_err = NULL;
for (i = 0; i < nr_lmbs; i++) {
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
@ -2634,7 +2615,18 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
fdt_offset = spapr_populate_memory_node(fdt, node, addr,
SPAPR_MEMORY_BLOCK_SIZE);
spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
if (local_err) {
while (addr > addr_start) {
addr -= SPAPR_MEMORY_BLOCK_SIZE;
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
addr / SPAPR_MEMORY_BLOCK_SIZE);
spapr_drc_detach(drc, dev, NULL);
}
g_free(fdt);
error_propagate(errp, local_err);
return;
}
addr += SPAPR_MEMORY_BLOCK_SIZE;
}
/* send hotplug notification to the
@ -2674,14 +2666,20 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
addr = object_property_get_uint(OBJECT(dimm),
PC_DIMM_ADDR_PROP, &local_err);
if (local_err) {
pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
goto out;
goto out_unplug;
}
spapr_add_lmbs(dev, addr, size, node,
spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
&error_abort);
&local_err);
if (local_err) {
goto out_unplug;
}
return;
out_unplug:
pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
out:
error_propagate(errp, local_err);
}
@ -2863,8 +2861,8 @@ out:
error_propagate(errp, local_err);
}
void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
sPAPRMachineState *spapr)
static void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
sPAPRMachineState *spapr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
DeviceClass *dc = DEVICE_GET_CLASS(cs);
@ -2979,17 +2977,10 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
g_assert(drc || !mc->has_hotpluggable_cpus);
/*
* Setup CPU DT entries only for hotplugged CPUs. For boot time or
* coldplugged CPUs DT entries are setup in spapr_build_fdt().
*/
if (dev->hotplugged) {
fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
}
fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
if (drc) {
spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged,
&local_err);
spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
if (local_err) {
g_free(fdt);
error_propagate(errp, local_err);

View File

@ -340,7 +340,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
}
void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
int fdt_start_offset, bool coldplug, Error **errp)
int fdt_start_offset, Error **errp)
{
trace_spapr_drc_attach(spapr_drc_index(drc));
@ -351,14 +351,11 @@ void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) {
g_assert(drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE);
}
g_assert(fdt || coldplug);
drc->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE;
g_assert(fdt);
drc->dev = d;
drc->fdt = fdt;
drc->fdt_start_offset = fdt_start_offset;
drc->configured = coldplug;
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->awaiting_allocation = true;
@ -372,24 +369,9 @@ void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
static void spapr_drc_release(sPAPRDRConnector *drc)
{
drc->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE;
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
/* Calling release callbacks based on spapr_drc_type(drc). */
switch (spapr_drc_type(drc)) {
case SPAPR_DR_CONNECTOR_TYPE_CPU:
spapr_core_release(drc->dev);
break;
case SPAPR_DR_CONNECTOR_TYPE_PCI:
spapr_phb_remove_pci_device_cb(drc->dev);
break;
case SPAPR_DR_CONNECTOR_TYPE_LMB:
spapr_lmb_release(drc->dev);
break;
case SPAPR_DR_CONNECTOR_TYPE_PHB:
case SPAPR_DR_CONNECTOR_TYPE_VIO:
default:
g_assert(false);
}
drck->release(drc->dev);
drc->awaiting_release = false;
g_free(drc->fdt);
@ -430,9 +412,9 @@ static bool release_pending(sPAPRDRConnector *drc)
return drc->awaiting_release;
}
static void reset(DeviceState *d)
static void drc_reset(void *opaque)
{
sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(opaque);
trace_spapr_drc_reset(spapr_drc_index(drc));
@ -454,12 +436,14 @@ static void reset(DeviceState *d)
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
}
drc->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE;
} else {
/* Otherwise device is absent, but might be hotplugged */
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED;
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE;
}
drc->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE;
}
}
@ -540,6 +524,7 @@ static void realize(DeviceState *d, Error **errp)
g_free(child_name);
vmstate_register(DEVICE(drc), spapr_drc_index(drc), &vmstate_spapr_drc,
drc);
qemu_register_reset(drc_reset, drc);
trace_spapr_drc_realize_complete(spapr_drc_index(drc));
}
@ -598,7 +583,6 @@ static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
DeviceClass *dk = DEVICE_CLASS(k);
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
dk->reset = reset;
dk->realize = realize;
dk->unrealize = unrealize;
drck->release_pending = release_pending;
@ -633,6 +617,7 @@ static void spapr_drc_cpu_class_init(ObjectClass *k, void *data)
drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_CPU;
drck->typename = "CPU";
drck->drc_name_prefix = "CPU ";
drck->release = spapr_core_release;
}
static void spapr_drc_pci_class_init(ObjectClass *k, void *data)
@ -642,6 +627,7 @@ static void spapr_drc_pci_class_init(ObjectClass *k, void *data)
drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_PCI;
drck->typename = "28";
drck->drc_name_prefix = "C";
drck->release = spapr_phb_remove_pci_device_cb;
}
static void spapr_drc_lmb_class_init(ObjectClass *k, void *data)
@ -651,6 +637,7 @@ static void spapr_drc_lmb_class_init(ObjectClass *k, void *data)
drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_LMB;
drck->typename = "MEM";
drck->drc_name_prefix = "LMB ";
drck->release = spapr_lmb_release;
}
static const TypeInfo spapr_dr_connector_info = {

View File

@ -1388,8 +1388,8 @@ static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
return spapr_drc_index(drc);
}
static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
DeviceState *plugged_dev, Error **errp)
static void spapr_pci_plug(HotplugHandler *plug_handler,
DeviceState *plugged_dev, Error **errp)
{
sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
@ -1435,8 +1435,7 @@ static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
goto out;
}
spapr_drc_attach(drc, DEVICE(pdev), fdt, fdt_start_offset,
!plugged_dev->hotplugged, &local_err);
spapr_drc_attach(drc, DEVICE(pdev), fdt, fdt_start_offset, &local_err);
if (local_err) {
goto out;
}
@ -1470,8 +1469,8 @@ out:
}
}
static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
DeviceState *plugged_dev, Error **errp)
static void spapr_pci_unplug_request(HotplugHandler *plug_handler,
DeviceState *plugged_dev, Error **errp)
{
sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
@ -1486,6 +1485,7 @@ static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
}
g_assert(drc);
g_assert(drc->dev == plugged_dev);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
if (!drck->release_pending(drc)) {
@ -1745,7 +1745,8 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
}
/* DMA setup */
if ((sphb->page_size_mask & qemu_getrampagesize()) == 0) {
if (((sphb->page_size_mask & qemu_getrampagesize()) == 0)
&& kvm_enabled()) {
error_report("System page size 0x%lx is not enabled in page_size_mask "
"(0x%"PRIx64"). Performance may be slow",
qemu_getrampagesize(), sphb->page_size_mask);
@ -1873,20 +1874,6 @@ static void spapr_pci_pre_save(void *opaque)
gpointer key, value;
int i;
g_free(sphb->msi_devs);
sphb->msi_devs = NULL;
sphb->msi_devs_num = g_hash_table_size(sphb->msi);
if (!sphb->msi_devs_num) {
return;
}
sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig));
g_hash_table_iter_init(&iter, sphb->msi);
for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
sphb->msi_devs[i].key = *(uint32_t *) key;
sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
}
if (sphb->pre_2_8_migration) {
sphb->mig_liobn = sphb->dma_liobn[0];
sphb->mig_mem_win_addr = sphb->mem_win_addr;
@ -1900,6 +1887,20 @@ static void spapr_pci_pre_save(void *opaque)
sphb->mig_mem_win_size += sphb->mem64_win_size;
}
}
g_free(sphb->msi_devs);
sphb->msi_devs = NULL;
sphb->msi_devs_num = g_hash_table_size(sphb->msi);
if (!sphb->msi_devs_num) {
return;
}
sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig));
g_hash_table_iter_init(&iter, sphb->msi);
for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
sphb->msi_devs[i].key = *(uint32_t *) key;
sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
}
}
static int spapr_pci_post_load(void *opaque, int version_id)
@ -1973,8 +1974,8 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data)
/* Supported by TYPE_SPAPR_MACHINE */
dc->user_creatable = true;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
hp->plug = spapr_phb_hot_plug_child;
hp->unplug = spapr_phb_hot_unplug_child;
hp->plug = spapr_pci_plug;
hp->unplug_request = spapr_pci_unplug_request;
}
static const TypeInfo spapr_phb_info = {

View File

@ -377,9 +377,8 @@ struct sPAPRMachineState {
* as well.
*
* We also need some hcalls which are specific to qemu / KVM-on-POWER.
* So far we just need one for H_RTAS, but in future we'll need more
* for extensions like virtio. We put those into the 0xf000-0xfffc
* range which is reserved by PAPR for "platform-specific" hcalls.
* We put those into the 0xf000-0xfffc range which is reserved by PAPR
* for "platform-specific" hcalls.
*/
#define KVMPPC_HCALL_BASE 0xf000
#define KVMPPC_H_RTAS (KVMPPC_HCALL_BASE + 0x0)
@ -640,8 +639,6 @@ void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type,
void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type,
uint32_t count, uint32_t index);
void spapr_cpu_parse_features(sPAPRMachineState *spapr);
void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
sPAPRMachineState *spapr);
/* CPU and LMB DRC release callbacks. */
void spapr_core_release(DeviceState *dev);

View File

@ -217,6 +217,7 @@ typedef struct sPAPRDRConnectorClass {
sPAPRDREntitySense (*dr_entity_sense)(sPAPRDRConnector *drc);
uint32_t (*isolate)(sPAPRDRConnector *drc);
uint32_t (*unisolate)(sPAPRDRConnector *drc);
void (*release)(DeviceState *dev);
/* QEMU interfaces for managing hotplug operations */
bool (*release_pending)(sPAPRDRConnector *drc);
@ -233,7 +234,7 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
uint32_t drc_type_mask);
void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
int fdt_start_offset, bool coldplug, Error **errp);
int fdt_start_offset, Error **errp);
void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp);
#endif /* HW_SPAPR_DRC_H */

View File

@ -50,6 +50,7 @@ typedef struct sPAPROptionVector sPAPROptionVector;
#define OV5_DRCONF_MEMORY OV_BIT(2, 2)
#define OV5_FORM1_AFFINITY OV_BIT(5, 0)
#define OV5_HP_EVT OV_BIT(6, 5)
#define OV5_XIVE_EXPLOIT OV_BIT(23, 7)
/* ISA 3.00 MMU features: */
#define OV5_MMU_BOTH OV_BIT(24, 0) /* Radix and hash */

View File

@ -283,6 +283,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
* precise in the MSR.
*/
msr |= 0x00100000;
env->spr[SPR_BOOKE_ESR] = ESR_FP;
break;
case POWERPC_EXCP_INVAL:
LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);

View File

@ -2445,6 +2445,7 @@ static int kvm_ppc_register_host_cpu_type(void)
.class_init = kvmppc_host_cpu_class_init,
};
PowerPCCPUClass *pvr_pcc;
ObjectClass *oc;
DeviceClass *dc;
int i;
@ -2455,6 +2456,9 @@ static int kvm_ppc_register_host_cpu_type(void)
type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
type_register(&type_info);
oc = object_class_by_name(type_info.name);
g_assert(oc);
#if defined(TARGET_PPC64)
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
type_info.parent = TYPE_SPAPR_CPU_CORE,
@ -2474,7 +2478,6 @@ static int kvm_ppc_register_host_cpu_type(void)
dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
ObjectClass *oc = OBJECT_CLASS(pvr_pcc);
char *suffix;
ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));

View File

@ -147,11 +147,10 @@ static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
}
}
static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
uint64_t base_addr, uint64_t nls,
hwaddr *raddr, int *psize,
int *fault_cause, int *prot,
hwaddr *pte_addr)
int *fault_cause, hwaddr *pte_addr)
{
CPUState *cs = CPU(cpu);
uint64_t index, pde;
@ -177,10 +176,6 @@ static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
uint64_t rpn = pde & R_PTE_RPN;
uint64_t mask = (1UL << *psize) - 1;
if (ppc_radix64_check_prot(cpu, rwx, pde, fault_cause, prot)) {
return 0; /* Protection Denied Access */
}
/* Or high bits of rpn and low bits to ea to form whole real addr */
*raddr = (rpn & ~mask) | (eaddr & mask);
*pte_addr = base_addr + (index * sizeof(pde));
@ -188,9 +183,8 @@ static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
}
/* Next Level of Radix Tree */
return ppc_radix64_walk_tree(cpu, rwx, eaddr, pde & R_PDE_NLB,
pde & R_PDE_NLS, raddr, psize,
fault_cause, prot, pte_addr);
return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS,
raddr, psize, fault_cause, pte_addr);
}
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
@ -241,11 +235,11 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* Walk Radix Tree from Process Table Entry to Convert EA to RA */
page_size = PRTBE_R_GET_RTS(prtbe0);
pte = ppc_radix64_walk_tree(cpu, rwx, eaddr & R_EADDR_MASK,
pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
&raddr, &page_size, &fault_cause, &prot,
&pte_addr);
if (!pte) {
&raddr, &page_size, &fault_cause, &pte_addr);
if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) {
/* Couldn't get pte or access denied due to protection */
ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
return 1;
}
@ -257,3 +251,48 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
prot, mmu_idx, 1UL << page_size);
return 0;
}
hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
PPCVirtualHypervisorClass *vhc =
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
hwaddr raddr, pte_addr;
uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte;
int page_size, fault_cause = 0;
/* Handle Real Mode */
if (msr_dr == 0) {
/* In real mode top 4 effective addr bits (mostly) ignored */
return eaddr & 0x0FFFFFFFFFFFFFFFULL;
}
/* Virtual Mode Access - get the fully qualified address */
if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
return -1;
}
/* Get Process Table */
patbe = vhc->get_patbe(cpu->vhyp);
/* Index Process Table by PID to Find Corresponding Process Table Entry */
offset = pid * sizeof(struct prtb_entry);
size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12);
if (offset >= size) {
/* offset exceeds size of the process table */
return -1;
}
prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset);
/* Walk Radix Tree from Process Table Entry to Convert EA to RA */
page_size = PRTBE_R_GET_RTS(prtbe0);
pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
&raddr, &page_size, &fault_cause, &pte_addr);
if (!pte) {
return -1;
}
return raddr & TARGET_PAGE_MASK;
}

View File

@ -46,6 +46,7 @@
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
int mmu_idx);
hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
static inline int ppc_radix64_get_prot_eaa(uint64_t pte)
{

View File

@ -30,6 +30,7 @@
#include "helper_regs.h"
#include "qemu/error-report.h"
#include "mmu-book3s-v3.h"
#include "mmu-radix64.h"
//#define DEBUG_MMU
//#define DEBUG_BATS
@ -1432,7 +1433,7 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return ppc_hash64_get_phys_page_debug(cpu, addr);
case POWERPC_MMU_VER_3_00:
if (ppc64_radix_guest(ppc_env_get_cpu(env))) {
/* TODO - Unsupported */
return ppc_radix64_get_phys_page_debug(cpu, addr);
} else {
return ppc_hash64_get_phys_page_debug(cpu, addr);
}