ppc patch queue 2019-03-29

Here's a set of bugfixes for ppc, aimed at qemu-4.0 during hard freeze.
 
 We have one cleanup that's not strictly a bugfix, but will avoid an
 ugly external interface making it to a released version.
 
 We have one change to generic code to tweak the semantics of
 qemu_getrampagesize() which fixes a bug for ppc.  This does have a
 possible impact on s390x which uses this function for a different
 purpose.  I've discussed with David Hildenbrand and Igor Mammedov,
 however and we think it won't immediately break anything due to some
 existing bugs in the s390 usage.  David H will be following up with
 some s390 fixes in that area.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAlydkLUACgkQbDjKyiDZ
 s5KYWw/+MWHPjYrgq2YqWr5VwRFhjV3gg86sQKCq+k/OhRl1ALmQ8DIDA5IBU/zf
 EmFPOdE4hvaCbYRLDyBL5ayVR/obu1+J3SHo1gDNAoXnpSIoTN+a2DeL8qfQekQL
 0EyPnARHlZZiHVM7YWyKIKUKpsptT5TRuZrUmM4pWXNWhk3qF39XQ0gCnMqdfd5U
 n8qcKKz9UxlFPkhyw/mGveMV1eJTlw1EQCyvUsfCgoK0LljDUvlZPCOI/O2jupPn
 mL7CifCm2yPs9ZuEgr/YSNYUCk96gf4hTdN2FiqdZWYbgUQaMDW/HtL6DwR5jI1W
 IXqnD7qsJxrHzsw9ZWBTjcK1PGaw2UMnuHNemI/T6sZP7UIsU3DmKsVA14GEWVBa
 zdO9HxQNa+UjKKtpTijqGxga3jya236a2ssgTr871chWs6cPH2db/iHNUM15ri1J
 wVFmQpAJrLNy5KLGL8Mgs6muH4DVaefKhWdGy3A/l5ThDZp0yAb27sYwVqzHm7P8
 L+jbEoWqRgplkVOtx9jCBjiuca5Sdwi2uhZ5Q6bjEwTPcMU90J6NcsN92/QvEuod
 Tmx/SZs81Pzg8Icq7AMbPbHaVYLy6pDlxIE8KX5wCrhqbWaH2UaxBU2M8mDOdxS5
 AihcQA+KUwnBiRRLaANTGnfJ3FZMNak7qwBP6clNKpazsv53f7Q=
 =sHfP
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.0-20190329' into staging

ppc patch queue 2019-03-29

Here's a set of bugfixes for ppc, aimed at qemu-4.0 during hard freeze.

We have one cleanup that's not strictly a bugfix, but will avoid an
ugly external interface making it to a released version.

We have one change to generic code to tweak the semantics of
qemu_getrampagesize() which fixes a bug for ppc.  This does have a
possible impact on s390x which uses this function for a different
purpose.  I've discussed with David Hildenbrand and Igor Mammedov,
however and we think it won't immediately break anything due to some
existing bugs in the s390 usage.  David H will be following up with
some s390 fixes in that area.

# gpg: Signature made Fri 29 Mar 2019 03:27:49 GMT
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-4.0-20190329:
  exec: Only count mapped memory backends for qemu_getrampagesize()
  spapr/irq: Add XIVE sanity checks on non-P9 machines
  spapr: Simplify handling of host-serial and host-model values
  target/ppc: Fix QEMU crash with stxsdx
  target/ppc: Improve comment of bcctr used for spectre v2 mitigation
  target/ppc: Consolidate 64-bit server processor detection in a helper
  target/ppc: Enable "decrement and test CTR" version of bcctr
  target/ppc: Fix TCG temporary leaks in gen_bcond()

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-03-29 09:36:29 +00:00
commit 94c01767aa
9 changed files with 131 additions and 68 deletions

5
exec.c
View File

@ -1692,9 +1692,10 @@ static int find_max_supported_pagesize(Object *obj, void *opaque)
long *hpsize_min = opaque;
if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
long hpsize = host_memory_backend_pagesize(MEMORY_BACKEND(obj));
HostMemoryBackend *backend = MEMORY_BACKEND(obj);
long hpsize = host_memory_backend_pagesize(backend);
if (hpsize < *hpsize_min) {
if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) {
*hpsize_min = hpsize;
}
}

View File

@ -1101,7 +1101,7 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
tb_env = g_malloc0(sizeof(ppc_tb_t));
env->tb_env = tb_env;
tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
if (env->insns_flags & PPC_SEGMENT_64B) {
if (is_book3s_arch2x(env)) {
/* All Book3S 64bit CPUs implement level based DEC logic */
tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
}

View File

@ -1252,38 +1252,8 @@ static void *spapr_build_fdt(SpaprMachineState *spapr)
_FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
_FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
/*
* Add info to guest to indentify which host is it being run on
* and what is the uuid of the guest
*/
if (spapr->host_model && !g_str_equal(spapr->host_model, "none")) {
if (g_str_equal(spapr->host_model, "passthrough")) {
/* -M host-model=passthrough */
if (kvmppc_get_host_model(&buf)) {
_FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
g_free(buf);
}
} else {
/* -M host-model=<user-string> */
_FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
}
}
if (spapr->host_serial && !g_str_equal(spapr->host_serial, "none")) {
if (g_str_equal(spapr->host_serial, "passthrough")) {
/* -M host-serial=passthrough */
if (kvmppc_get_host_serial(&buf)) {
_FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
g_free(buf);
}
} else {
/* -M host-serial=<user-string> */
_FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
}
}
/* Guest UUID & Name*/
buf = qemu_uuid_unparse_strdup(&qemu_uuid);
_FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
if (qemu_uuid_set) {
_FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
@ -1295,6 +1265,21 @@ static void *spapr_build_fdt(SpaprMachineState *spapr)
qemu_get_vm_name()));
}
/* Host Model & Serial Number */
if (spapr->host_model) {
_FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model));
} else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) {
_FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
g_free(buf);
}
if (spapr->host_serial) {
_FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial));
} else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) {
_FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
g_free(buf);
}
_FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
@ -2795,13 +2780,7 @@ static void spapr_machine_init(MachineState *machine)
/* advertise XIVE on POWER9 machines */
if (spapr->irq->ov5 & (SPAPR_OV5_XIVE_EXPLOIT | SPAPR_OV5_XIVE_BOTH)) {
if (ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
0, spapr->max_compat_pvr)) {
spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
} else if (spapr->irq->ov5 & SPAPR_OV5_XIVE_EXPLOIT) {
error_report("XIVE-only machines require a POWER9 CPU");
exit(1);
}
spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
}
/* init CPUs */
@ -3352,12 +3331,12 @@ static void spapr_instance_init(Object *obj)
spapr_get_host_model, spapr_set_host_model,
&error_abort);
object_property_set_description(obj, "host-model",
"Set host's model-id to use - none|passthrough|string", &error_abort);
"Host model to advertise in guest device tree", &error_abort);
object_property_add_str(obj, "host-serial",
spapr_get_host_serial, spapr_set_host_serial,
&error_abort);
object_property_set_description(obj, "host-serial",
"Set host's system-id to use - none|passthrough|string", &error_abort);
"Host serial number to advertise in guest device tree", &error_abort);
}
static void spapr_machine_finalizefn(Object *obj)
@ -4381,18 +4360,14 @@ DEFINE_SPAPR_MACHINE(4_0, "4.0", true);
static void spapr_machine_3_1_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
static GlobalProperty compat[] = {
{ TYPE_SPAPR_MACHINE, "host-model", "passthrough" },
{ TYPE_SPAPR_MACHINE, "host-serial", "passthrough" },
};
spapr_machine_4_0_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
smc->update_dt_enabled = false;
smc->dr_phb_enabled = false;
smc->broken_host_serial_model = true;
smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN;
smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN;
smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN;

View File

@ -16,6 +16,7 @@
#include "hw/ppc/spapr_xive.h"
#include "hw/ppc/xics.h"
#include "hw/ppc/xics_spapr.h"
#include "cpu-models.h"
#include "sysemu/kvm.h"
#include "trace.h"
@ -582,12 +583,55 @@ SpaprIrq spapr_irq_dual = {
.get_nodename = spapr_irq_get_nodename_dual,
};
static void spapr_irq_check(SpaprMachineState *spapr, Error **errp)
{
MachineState *machine = MACHINE(spapr);
/*
* Sanity checks on non-P9 machines. On these, XIVE is not
* advertised, see spapr_dt_ov5_platform_support()
*/
if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
0, spapr->max_compat_pvr)) {
/*
* If the 'dual' interrupt mode is selected, force XICS as CAS
* negotiation is useless.
*/
if (spapr->irq == &spapr_irq_dual) {
spapr->irq = &spapr_irq_xics;
return;
}
/*
* Non-P9 machines using only XIVE is a bogus setup. We have two
* scenarios to take into account because of the compat mode:
*
* 1. POWER7/8 machines should fail to init later on when creating
* the XIVE interrupt presenters because a POWER9 exception
* model is required.
* 2. POWER9 machines using the POWER8 compat mode won't fail and
* will let the OS boot with a partial XIVE setup : DT
* properties but no hcalls.
*
* To cover both and not confuse the OS, add an early failure in
* QEMU.
*/
if (spapr->irq == &spapr_irq_xive) {
error_setg(errp, "XIVE-only machines require a POWER9 CPU");
return;
}
}
}
/*
* sPAPR IRQ frontend routines for devices
*/
void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
{
MachineState *machine = MACHINE(spapr);
Error *local_err = NULL;
if (machine_kernel_irqchip_split(machine)) {
error_setg(errp, "kernel_irqchip split mode not supported on pseries");
@ -600,6 +644,12 @@ void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
return;
}
spapr_irq_check(spapr, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
/* Initialize the MSI IRQ allocator. */
if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
spapr_irq_msi_init(spapr, spapr->irq->nr_msis);

View File

@ -118,6 +118,7 @@ struct SpaprMachineClass {
bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */
bool pre_2_10_has_unused_icps;
bool legacy_irq_allocation;
bool broken_host_serial_model; /* present real host info to the guest */
void (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,

View File

@ -2409,6 +2409,12 @@ enum {
target_ulong cpu_read_xer(CPUPPCState *env);
void cpu_write_xer(CPUPPCState *env, target_ulong xer);
/*
* All 64-bit server processors compliant with arch 2.x, ie. 970 and newer,
* have PPC_SEGMENT_64B.
*/
#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B))
static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{

View File

@ -152,7 +152,7 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
* - 64-bit embedded implementations do not need any operation to be
* performed when PR is set.
*/
if ((env->insns_flags & PPC_SEGMENT_64B) && ((value >> MSR_PR) & 1)) {
if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) {
value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
}
#endif

View File

@ -3747,20 +3747,52 @@ static void gen_bcond(DisasContext *ctx, int type)
if ((bo & 0x4) == 0) {
/* Decrement and test CTR */
TCGv temp = tcg_temp_new();
if (unlikely(type == BCOND_CTR)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
if (NARROW_MODE(ctx)) {
tcg_gen_ext32u_tl(temp, cpu_ctr);
if (type == BCOND_CTR) {
/*
* All ISAs up to v3 describe this form of bcctr as invalid but
* some processors, ie. 64-bit server processors compliant with
* arch 2.x, do implement a "test and decrement" logic instead,
* as described in their respective UMs. This logic involves CTR
* to act as both the branch target and a counter, which makes
* it basically useless and thus never used in real code.
*
* This form was hence chosen to trigger extra micro-architectural
* side-effect on real HW needed for the Spectre v2 workaround.
* It is up to guests that implement such workaround, ie. linux, to
* use this form in a way it just triggers the side-effect without
* doing anything else harmful.
*/
if (unlikely(!is_book3s_arch2x(ctx))) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
tcg_temp_free(temp);
tcg_temp_free(target);
return;
}
if (NARROW_MODE(ctx)) {
tcg_gen_ext32u_tl(temp, cpu_ctr);
} else {
tcg_gen_mov_tl(temp, cpu_ctr);
}
if (bo & 0x2) {
tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
} else {
tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
}
tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
} else {
tcg_gen_mov_tl(temp, cpu_ctr);
}
if (bo & 0x2) {
tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
} else {
tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
if (NARROW_MODE(ctx)) {
tcg_gen_ext32u_tl(temp, cpu_ctr);
} else {
tcg_gen_mov_tl(temp, cpu_ctr);
}
if (bo & 0x2) {
tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
} else {
tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
}
}
tcg_temp_free(temp);
}
@ -3889,7 +3921,7 @@ static void gen_rfi(DisasContext *ctx)
/* This instruction doesn't exist anymore on 64-bit server
* processors compliant with arch 2.x
*/
if (ctx->insns_flags & PPC_SEGMENT_64B) {
if (is_book3s_arch2x(ctx)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
@ -6511,8 +6543,7 @@ static void gen_msgclr(DisasContext *ctx)
GEN_PRIV;
#else
CHK_HV;
/* 64-bit server processors compliant with arch 2.x */
if (ctx->insns_flags & PPC_SEGMENT_64B) {
if (is_book3s_arch2x(ctx)) {
gen_helper_book3s_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
} else {
gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
@ -6526,8 +6557,7 @@ static void gen_msgsnd(DisasContext *ctx)
GEN_PRIV;
#else
CHK_HV;
/* 64-bit server processors compliant with arch 2.x */
if (ctx->insns_flags & PPC_SEGMENT_64B) {
if (is_book3s_arch2x(ctx)) {
gen_helper_book3s_msgsnd(cpu_gpr[rB(ctx->opcode)]);
} else {
gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]);

View File

@ -356,8 +356,8 @@ static void gen_##name(DisasContext *ctx) \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
get_cpu_vsrh(t0, xS(ctx->opcode)); \
gen_qemu_##operation(ctx, t0, EA); \
set_cpu_vsrh(xS(ctx->opcode), t0); \
tcg_temp_free(EA); \
tcg_temp_free_i64(t0); \
}