target/hppa: Clean up conversion from/to MMU index and privilege level

Make the conversion between privilege level and QEMU MMU index
 consistent, and afterwards switch to MMU indices 11-15.
 
 Signed-off-by: Helge Deller <deller@gmx.de>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQS86RI+GtKfB8BJu973ErUQojoPXwUCZOtpFAAKCRD3ErUQojoP
 X0lxAPwKfsMZOO/e81XXLgxeEZ5R4yjtIelErvOWmMvBfxEDUwEA6HgJt4gOe1uR
 Dw7d+wTqr+CSOj5I87+sJYl1FmihzQU=
 =01eA
 -----END PGP SIGNATURE-----

Merge tag 'devel-hppa-priv-cleanup2-pull-request' of https://github.com/hdeller/qemu-hppa into staging

target/hppa: Clean up conversion from/to MMU index and privilege level

Make the conversion between privilege level and QEMU MMU index
consistent, and afterwards switch to MMU indices 11-15.

Signed-off-by: Helge Deller <deller@gmx.de>

# -----BEGIN PGP SIGNATURE-----
#
# iHUEABYKAB0WIQS86RI+GtKfB8BJu973ErUQojoPXwUCZOtpFAAKCRD3ErUQojoP
# X0lxAPwKfsMZOO/e81XXLgxeEZ5R4yjtIelErvOWmMvBfxEDUwEA6HgJt4gOe1uR
# Dw7d+wTqr+CSOj5I87+sJYl1FmihzQU=
# =01eA
# -----END PGP SIGNATURE-----
# gpg: Signature made Sun 27 Aug 2023 11:17:40 EDT
# gpg:                using EDDSA key BCE9123E1AD29F07C049BBDEF712B510A23A0F5F
# gpg: Good signature from "Helge Deller <deller@gmx.de>" [unknown]
# gpg:                 aka "Helge Deller <deller@kernel.org>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 4544 8228 2CD9 10DB EF3D  25F8 3E5F 3D04 A7A2 4603
#      Subkey fingerprint: BCE9 123E 1AD2 9F07 C049  BBDE F712 B510 A23A 0F5F

* tag 'devel-hppa-priv-cleanup2-pull-request' of https://github.com/hdeller/qemu-hppa:
  target/hppa: Switch to use MMU indices 11-15
  target/hppa: Use privilege helper in hppa_get_physical_address()
  target/hppa: Do not use hardcoded value for tlb_flush_*()
  target/hppa: Add privilege to MMU index conversion helpers
  target/hppa: Add missing PL1 and PL2 privilege levels

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-08-28 15:12:01 -04:00
commit 98bdf241be
4 changed files with 29 additions and 17 deletions

View File

@ -30,11 +30,22 @@
basis. It's probably easier to fall back to a strong memory model. */
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
#define MMU_KERNEL_IDX 0
#define MMU_USER_IDX 3
#define MMU_PHYS_IDX 4
#define MMU_KERNEL_IDX 11
#define MMU_PL1_IDX 12
#define MMU_PL2_IDX 13
#define MMU_USER_IDX 14
#define MMU_PHYS_IDX 15
#define PRIV_TO_MMU_IDX(priv) (MMU_KERNEL_IDX + (priv))
#define MMU_IDX_TO_PRIV(mmu_idx) ((mmu_idx) - MMU_KERNEL_IDX)
#define TARGET_INSN_START_EXTRA_WORDS 1
/* No need to flush MMU_PHYS_IDX */
#define HPPA_MMU_FLUSH_MASK \
(1 << MMU_KERNEL_IDX | 1 << MMU_PL1_IDX | \
1 << MMU_PL2_IDX | 1 << MMU_USER_IDX)
/* Hardware exceptions, interrupts, faults, and traps. */
#define EXCP_HPMC 1 /* high priority machine check */
#define EXCP_POWER_FAIL 2
@ -233,7 +244,7 @@ static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
return MMU_USER_IDX;
#else
if (env->psw & (ifetch ? PSW_C : PSW_D)) {
return env->iaoq_f & 3;
return PRIV_TO_MMU_IDX(env->iaoq_f & 3);
}
return MMU_PHYS_IDX; /* mmu disabled */
#endif

View File

@ -71,7 +71,7 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw)
/* If PSW_P changes, it affects how we translate addresses. */
if ((psw ^ old_psw) & PSW_P) {
#ifndef CONFIG_USER_ONLY
tlb_flush_by_mmuidx(env_cpu(env), 0xf);
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
#endif
}
}

View File

@ -50,8 +50,7 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
/* Do not flush MMU_PHYS_IDX. */
tlb_flush_page_by_mmuidx(cs, addr, 0xf);
tlb_flush_page_by_mmuidx(cs, addr, HPPA_MMU_FLUSH_MASK);
}
memset(ent, 0, sizeof(*ent));
@ -74,7 +73,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot)
{
hwaddr phys;
int prot, r_prot, w_prot, x_prot;
int prot, r_prot, w_prot, x_prot, priv;
hppa_tlb_entry *ent;
int ret = -1;
@ -98,9 +97,10 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
/* Map TLB access_rights field to QEMU protection. */
r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
priv = MMU_IDX_TO_PRIV(mmu_idx);
r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
switch (ent->ar_type) {
case 0: /* read-only: data page */
prot = r_prot;
@ -335,13 +335,13 @@ void HELPER(ptlbe)(CPUHPPAState *env)
{
trace_hppa_tlb_ptlbe(env);
memset(env->tlb, 0, sizeof(env->tlb));
tlb_flush_by_mmuidx(env_cpu(env), 0xf);
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
}
void cpu_hppa_change_prot_id(CPUHPPAState *env)
{
if (env->psw & PSW_P) {
tlb_flush_by_mmuidx(env_cpu(env), 0xf);
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
}
}

View File

@ -4057,14 +4057,15 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->tb_flags = ctx->base.tb->flags;
#ifdef CONFIG_USER_ONLY
ctx->privilege = MMU_USER_IDX;
ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
ctx->mmu_idx = MMU_USER_IDX;
ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
#else
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
ctx->mmu_idx = (ctx->tb_flags & PSW_D ?
PRIV_TO_MMU_IDX(ctx->privilege) : MMU_PHYS_IDX);
/* Recover the IAOQ values from the GVA + PRIV. */
uint64_t cs_base = ctx->base.tb->cs_base;