target/sh4: trap unaligned accesses

SH4 requires that memory accesses are naturally aligned, except for the
SH4-A movua.l instructions which can do unaligned loads.

Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
This commit is contained in:
Aurelien Jarno 2017-05-01 23:20:43 +02:00
parent 143021b26f
commit 34257c2117
4 changed files with 25 additions and 2 deletions

View File

@ -301,6 +301,7 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = superh_cpu_handle_mmu_fault;
#else
cc->do_unaligned_access = superh_cpu_do_unaligned_access;
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
#endif
cc->disas_set_info = superh_cpu_disas_set_info;

View File

@ -24,6 +24,7 @@
#include "cpu-qom.h"
#define TARGET_LONG_BITS 32
#define ALIGNED_ONLY
/* CPU Subtypes */
#define SH_CPU_SH7750 (1 << 0)
@ -215,6 +216,9 @@ void superh_cpu_dump_state(CPUState *cpu, FILE *f,
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int superh_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
void sh4_translate_init(void);
SuperHCPU *cpu_sh4_init(const char *cpu_model);

View File

@ -24,6 +24,22 @@
#ifndef CONFIG_USER_ONLY
void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
switch (access_type) {
case MMU_INST_FETCH:
case MMU_DATA_LOAD:
cs->exception_index = 0x0e0;
break;
case MMU_DATA_STORE:
cs->exception_index = 0x100;
break;
}
cpu_loop_exit_restore(cs, retaddr);
}
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{

View File

@ -1504,14 +1504,16 @@ static void _decode_opc(DisasContext * ctx)
case 0x40a9: /* movua.l @Rm,R0 */
/* Load non-boundary-aligned data */
if (ctx->features & SH_FEATURE_SH4A) {
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
MO_TEUL | MO_UNALN);
return;
}
break;
case 0x40e9: /* movua.l @Rm+,R0 */
/* Load non-boundary-aligned data */
if (ctx->features & SH_FEATURE_SH4A) {
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
MO_TEUL | MO_UNALN);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
return;
}