ppc patch queue for 2020-04-17

Here are a few late bugfixes for qemu-5.0 in the ppc target code.
 Unless some really nasty last minute bug shows up, I expect this to be
 the last ppc pull request for qemu-5.0.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAl6ZOFUACgkQbDjKyiDZ
 s5Kkuw//RoF+vcv70ZzoS7f9MgehObiTvfgTyamTGr7pDNlYnGJuK9OXz1e3sl6w
 acM/L+iE/AmoFA3+gWC9RxL2qOwTiLRJedk5l7PvESXoLHQek+idR0V5nt0VmG2S
 IEpMIRDtWFTOk5WbouFvuUnYaZyhxKZPZxEHvI3bv0/bI0AAgVtq3HTmy+CiRh3u
 SgbVJyvmEdlUeaozvMWcFfclLpN6sA1hwrx8C7+0Q1L5ONz8D6HL5zwmlsorPMlm
 owtHVT2rYtfsKGDVTmb76rwGZm8pj2Kd6kA3Fdo2mFUyxnvOcRrQ25P3ii0uhv8G
 htRuqXT5Da3OKiCxDOUpuEuoaZCQf2cliVDhapFl53HZ4upG5l7ZIYoQEPTAOmrx
 a29oRvNWR3hkFwuuXM3PIigf5bwKh2eyWBBGA0DgDA0wudSHJIvkjmiq8j+t2/h5
 9H9RWPpvYpkRYk5vCbKQyeYTdYcTribuIQ83/5FuLbWoK/54tkxPk+gfLvT8uprT
 6Ij3+nilKQehKcQJ8lqC8dMqB9KjkAWgO2tfPhkMjbBLPPcBuepWTt5Qu+DuCqxv
 kmE0vA1HxUJq4d09FRkMymf+zDdgKb1imNnS47pnp4vBrzxb3lAzLFsU2kl8oWTf
 +WXxRNuHOOIsO/nqdvGvId6j+0ZPbqYS5QiwAgtDtd3M0FccbNg=
 =KdBh
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-5.0-20200417' into staging

ppc patch queue for 2020-04-17

Here are a few late bugfixes for qemu-5.0 in the ppc target code.
Unless some really nasty last minute bug shows up, I expect this to be
the last ppc pull request for qemu-5.0.

# gpg: Signature made Fri 17 Apr 2020 06:02:13 BST
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-5.0-20200417:
  target/ppc: Fix mtmsr(d) L=1 variant that loses interrupts
  target/ppc: Fix wrong interpretation of the disposition flag.
  linux-user/ppc: Fix padding in mcontext_t for ppc64

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-04-20 19:57:18 +01:00
commit 5b4273e462
3 changed files with 58 additions and 61 deletions

View File

@ -35,12 +35,26 @@ struct target_mcontext {
target_ulong mc_gregs[48];
/* Includes fpscr. */
uint64_t mc_fregs[33];
#if defined(TARGET_PPC64)
/* Pointer to the vector regs */
target_ulong v_regs;
/*
* On ppc64, this mcontext structure is naturally *unaligned*,
* or rather it is aligned on a 8 bytes boundary but not on
* a 16 byte boundary. This pad fixes it up. This is why we
* cannot use ppc_avr_t, which would force alignment. This is
* also why the vector regs are referenced in the ABI by the
* v_regs pointer above so any amount of padding can be added here.
*/
target_ulong pad;
/* VSCR and VRSAVE are saved separately. Also reserve space for VSX. */
struct {
uint64_t altivec[34 + 16][2];
} mc_vregs;
#else
target_ulong mc_pad[2];
#endif
/* We need to handle Altivec and SPE at the same time, which no
kernel needs to do. Fortunately, the kernel defines this bit to
be Altivec-register-large all the time, rather than trying to
@ -48,32 +62,14 @@ struct target_mcontext {
union {
/* SPE vector registers. One extra for SPEFSCR. */
uint32_t spe[33];
/* Altivec vector registers. The packing of VSCR and VRSAVE
varies depending on whether we're PPC64 or not: PPC64 splits
them apart; PPC32 stuffs them together.
We also need to account for the VSX registers on PPC64
*/
#if defined(TARGET_PPC64)
#define QEMU_NVRREG (34 + 16)
/* On ppc64, this mcontext structure is naturally *unaligned*,
* or rather it is aligned on a 8 bytes boundary but not on
* a 16 bytes one. This pad fixes it up. This is also why the
* vector regs are referenced by the v_regs pointer above so
* any amount of padding can be added here
/*
* Altivec vector registers. One extra for VRSAVE.
* On ppc32, we are already aligned to 16 bytes. We could
* use ppc_avr_t, but choose to share the same type as ppc64.
*/
target_ulong pad;
#else
/* On ppc32, we are already aligned to 16 bytes */
#define QEMU_NVRREG 33
#endif
/* We cannot use ppc_avr_t here as we do *not* want the implied
* 16-bytes alignment that would result from it. This would have
* the effect of making the whole struct target_mcontext aligned
* which breaks the layout of struct target_ucontext on ppc64.
*/
uint64_t altivec[QEMU_NVRREG][2];
#undef QEMU_NVRREG
uint64_t altivec[33][2];
} mc_vregs;
#endif
};
/* See arch/powerpc/include/asm/sigcontext.h. */
@ -278,6 +274,7 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
__put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
}
#if defined(TARGET_PPC64)
/* Save VSX second halves */
if (env->insns_flags2 & PPC2_VSX) {
uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
@ -286,6 +283,7 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
__put_user(*vsrl, &vsregs[i]);
}
}
#endif
/* Save floating point registers. */
if (env->insns_flags & PPC_FLOAT) {
@ -296,22 +294,18 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
__put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
}
#if !defined(TARGET_PPC64)
/* Save SPE registers. The kernel only saves the high half. */
if (env->insns_flags & PPC_SPE) {
#if defined(TARGET_PPC64)
for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
__put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
}
#else
for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
__put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
}
#endif
/* Set MSR_SPE in the saved MSR value to indicate that
frame->mc_vregs contains valid data. */
msr |= MSR_SPE;
__put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
}
#endif
/* Store MSR. */
__put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
@ -392,6 +386,7 @@ static void restore_user_regs(CPUPPCState *env,
__get_user(env->spr[SPR_VRSAVE], vrsave);
}
#if defined(TARGET_PPC64)
/* Restore VSX second halves */
if (env->insns_flags2 & PPC2_VSX) {
uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
@ -400,6 +395,7 @@ static void restore_user_regs(CPUPPCState *env,
__get_user(*vsrl, &vsregs[i]);
}
}
#endif
/* Restore floating point registers. */
if (env->insns_flags & PPC_FLOAT) {
@ -412,22 +408,15 @@ static void restore_user_regs(CPUPPCState *env,
env->fpscr = (uint32_t) fpscr;
}
#if !defined(TARGET_PPC64)
/* Save SPE registers. The kernel only saves the high half. */
if (env->insns_flags & PPC_SPE) {
#if defined(TARGET_PPC64)
for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
uint32_t hi;
__get_user(hi, &frame->mc_vregs.spe[i]);
env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
}
#else
for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
__get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
}
#endif
__get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
}
#endif
}
#if !defined(TARGET_PPC64)

View File

@ -2816,11 +2816,11 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
#if defined(TARGET_PPC64)
int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
{
bool recovered = run->flags & KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
cpu_synchronize_state(CPU(cpu));
spapr_mce_req_event(cpu, recovered);
spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
return 0;
}

View File

@ -4361,30 +4361,34 @@ static void gen_mtmsrd(DisasContext *ctx)
CHK_SV;
#if !defined(CONFIG_USER_ONLY)
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
if (ctx->opcode & 0x00010000) {
/* Special form that does not need any synchronisation */
/* L=1 form only updates EE and RI */
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)],
(1 << MSR_RI) | (1 << MSR_EE));
tcg_gen_andi_tl(cpu_msr, cpu_msr,
tcg_gen_andi_tl(t1, cpu_msr,
~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
tcg_gen_or_tl(t1, t1, t0);
gen_helper_store_msr(cpu_env, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
} else {
/*
* XXX: we need to update nip before the store if we enter
* power saving mode, we will exit the loop directly from
* ppc_store_msr
*/
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_update_nip(ctx, ctx->base.pc_next);
gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]);
/* Must stop the translation as machine state (may have) changed */
/* Note that mtmsr is not always defined as context-synchronizing */
gen_stop_exception(ctx);
}
/* Must stop the translation as machine state (may have) changed */
gen_stop_exception(ctx);
#endif /* !defined(CONFIG_USER_ONLY) */
}
#endif /* defined(TARGET_PPC64) */
@ -4394,15 +4398,23 @@ static void gen_mtmsr(DisasContext *ctx)
CHK_SV;
#if !defined(CONFIG_USER_ONLY)
if (ctx->opcode & 0x00010000) {
/* Special form that does not need any synchronisation */
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
if (ctx->opcode & 0x00010000) {
/* L=1 form only updates EE and RI */
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)],
(1 << MSR_RI) | (1 << MSR_EE));
tcg_gen_andi_tl(cpu_msr, cpu_msr,
tcg_gen_andi_tl(t1, cpu_msr,
~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
tcg_gen_or_tl(t1, t1, t0);
gen_helper_store_msr(cpu_env, t1);
tcg_temp_free(t0);
tcg_temp_free(t1);
} else {
TCGv msr = tcg_temp_new();
@ -4411,9 +4423,6 @@ static void gen_mtmsr(DisasContext *ctx)
* power saving mode, we will exit the loop directly from
* ppc_store_msr
*/
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_update_nip(ctx, ctx->base.pc_next);
#if defined(TARGET_PPC64)
tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32);
@ -4422,10 +4431,9 @@ static void gen_mtmsr(DisasContext *ctx)
#endif
gen_helper_store_msr(cpu_env, msr);
tcg_temp_free(msr);
/* Must stop the translation as machine state (may have) changed */
/* Note that mtmsr is not always defined as context-synchronizing */
gen_stop_exception(ctx);
}
/* Must stop the translation as machine state (may have) changed */
gen_stop_exception(ctx);
#endif
}