accel/tcg: Add TLB_CHECK_ALIGNED

This creates a per-page method for checking of alignment.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20240301204110.656742-5-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2024-03-01 10:41:08 -10:00 committed by Peter Maydell
parent a0ff4a879c
commit 49fa457ca5
2 changed files with 30 additions and 4 deletions

View File

@ -1453,9 +1453,8 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
flags |= full->slow_flags[access_type]; flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY)) if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
|| || (access_type != MMU_INST_FETCH && force_mmio)) {
(access_type != MMU_INST_FETCH && force_mmio)) {
*phost = NULL; *phost = NULL;
return TLB_MMIO; return TLB_MMIO;
} }
@ -1836,6 +1835,31 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0); tcg_debug_assert((flags & TLB_BSWAP) == 0);
} }
/*
* This alignment check differs from the one above, in that this is
* based on the atomicity of the operation. The intended use case is
* the ARM memory type field of each PTE, where access to pages with
* Device memory type require alignment.
*/
if (unlikely(flags & TLB_CHECK_ALIGNED)) {
MemOp size = l->memop & MO_SIZE;
switch (l->memop & MO_ATOM_MASK) {
case MO_ATOM_NONE:
size = MO_8;
break;
case MO_ATOM_IFALIGN_PAIR:
case MO_ATOM_WITHIN16_PAIR:
size = size ? size - 1 : 0;
break;
default:
break;
}
if (addr & ((1 << size) - 1)) {
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
}
return crosspage; return crosspage;
} }

View File

@ -357,8 +357,10 @@ static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
#define TLB_BSWAP (1 << 0) #define TLB_BSWAP (1 << 0)
/* Set if TLB entry contains a watchpoint. */ /* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << 1) #define TLB_WATCHPOINT (1 << 1)
/* Set if TLB entry requires aligned accesses. */
#define TLB_CHECK_ALIGNED (1 << 2)
#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT) #define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
/* The two sets of flags must not overlap. */ /* The two sets of flags must not overlap. */
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);