From cb2d627a00a6bf686bc221b05f136545639a1c37 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 15 Apr 2021 13:38:08 -0700 Subject: [PATCH 1/3] target/xtensa: don't generate extra EXCP_DEBUG on exception target/xtensa used to generate an extra EXCP_DEBUG exception before the first instruction executed after an interrupt or an exception is taken to allow single-stepping that instruction in the debugger. This is no longer needed after the following commits: a7ba744f4082 ("tcg/cpu-exec: precise single-stepping after an exception") ba3c35d9c402 ("tcg/cpu-exec: precise single-stepping after an interrupt") Drop exception state tracking/extra EXCP_DEBUG generation code. Cc: qemu-stable@nongnu.org # v5.1, v5.2, v6.0 Reviewed-by: Richard Henderson Signed-off-by: Max Filippov --- target/xtensa/cpu.c | 1 - target/xtensa/cpu.h | 7 ------- target/xtensa/exc_helper.c | 5 ----- target/xtensa/translate.c | 6 ------ 4 files changed, 19 deletions(-) diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index e2b2c7a71c..210ef80092 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -79,7 +79,6 @@ static void xtensa_cpu_reset(DeviceState *dev) xcc->parent_reset(dev); - env->exception_taken = 0; env->pc = env->config->exception_vector[EXC_RESET0 + env->static_vectors]; env->sregs[LITBASE] &= ~1; #ifndef CONFIG_USER_ONLY diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index 3bd4f691c1..2345cb59c7 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -540,7 +540,6 @@ typedef struct CPUXtensaState { uint32_t ccount_base; #endif - int exception_taken; int yield_needed; unsigned static_vectors; @@ -711,7 +710,6 @@ static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch) #define XTENSA_TBFLAG_ICOUNT 0x20 #define XTENSA_TBFLAG_CPENABLE_MASK 0x3fc0 #define XTENSA_TBFLAG_CPENABLE_SHIFT 6 -#define XTENSA_TBFLAG_EXCEPTION 0x4000 #define XTENSA_TBFLAG_WINDOW_MASK 0x18000 #define XTENSA_TBFLAG_WINDOW_SHIFT 15 #define XTENSA_TBFLAG_YIELD 0x20000 @@ -732,8 +730,6 @@ typedef XtensaCPU ArchCPU; static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { - CPUState *cs = env_cpu(env); - *pc = env->pc; *cs_base = 0; *flags = 0; @@ -782,9 +778,6 @@ static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc, if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) { *flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT; } - if (cs->singlestep_enabled && env->exception_taken) { - *flags |= XTENSA_TBFLAG_EXCEPTION; - } if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) && (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) { uint32_t windowstart = xtensa_replicate_windowstart(env) >> diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c index 2f032bc053..10e75ab070 100644 --- a/target/xtensa/exc_helper.c +++ b/target/xtensa/exc_helper.c @@ -40,9 +40,6 @@ void HELPER(exception)(CPUXtensaState *env, uint32_t excp) if (excp == EXCP_YIELD) { env->yield_needed = 0; } - if (excp == EXCP_DEBUG) { - env->exception_taken = 0; - } cpu_loop_exit(cs); } @@ -197,7 +194,6 @@ static void handle_interrupt(CPUXtensaState *env) } env->sregs[PS] |= PS_EXCM; } - env->exception_taken = 1; } } @@ -242,7 +238,6 @@ void xtensa_cpu_do_interrupt(CPUState *cs) vector = env->config->exception_vector[cs->exception_index]; env->pc = relocated_vector(env, vector); - env->exception_taken = 1; } else { qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n", diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 73584d9d60..f93df87ec4 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -1279,12 +1279,6 @@ static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) dc->base.is_jmp = DISAS_NORETURN; return; } - if (dc->base.tb->flags & XTENSA_TBFLAG_EXCEPTION) { - gen_exception(dc, EXCP_DEBUG); - dc->base.pc_next = dc->pc + 1; - dc->base.is_jmp = DISAS_NORETURN; - return; - } if (dc->icount) { TCGLabel *label = gen_new_label(); From 735aa900e4bf57b777ac620bed7c88234ec4b601 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Wed, 19 May 2021 03:40:00 -0700 Subject: [PATCH 2/3] target/xtensa: fix access ring in l32ex MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit l32ex does memory access as all regular load/store operations at CRING level. Fix apparent pasto from l32e that caused it to use RING instead. This is a correctness issue, not a security issue, because in the worst case the privilege level of memory access may be lowered, resulting in an exception when the correct implementation would've succeeded. In no case it would allow memory access that would've raised an exception in the correct implementation. Cc: qemu-stable@nongnu.org Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Max Filippov --- target/xtensa/translate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index f93df87ec4..95f6b21c2a 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -1814,7 +1814,7 @@ static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(addr, arg[1].in); gen_load_store_alignment(dc, 2, addr, true); gen_check_exclusive(dc, addr, false); - tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->ring, MO_TEUL); + tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->cring, MO_TEUL); tcg_gen_mov_i32(cpu_exclusive_addr, addr); tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out); tcg_temp_free(addr); From 583e6a5f55d4b02f04eda0cd70bf7b7701a08450 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 17 May 2021 12:31:08 -0700 Subject: [PATCH 3/3] target/xtensa: clean up unaligned access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Xtensa cores may or may not have hardware support for unaligned memory access. Remove TARGET_ALIGNED_ONLY=y from all xtensa configurations and pass MO_ALIGN in memory access flags for all operations that would raise an exception. Simplify use of gen_load_store_alignment by passing access size and alignment requirements in single parameter. Drop condition from xtensa_cpu_do_unaligned_access and replace it with assertion. Add a test. Suggested-by: Philippe Mathieu-Daudé Suggested-by: Richard Henderson Reviewed-by: Richard Henderson Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Max Filippov --- default-configs/targets/xtensa-linux-user.mak | 1 - default-configs/targets/xtensa-softmmu.mak | 1 - .../targets/xtensaeb-linux-user.mak | 1 - default-configs/targets/xtensaeb-softmmu.mak | 1 - target/xtensa/helper.c | 13 +- target/xtensa/translate.c | 122 +++++----- tests/tcg/xtensa/test_load_store.S | 221 ++++++++++++++++++ 7 files changed, 289 insertions(+), 71 deletions(-) create mode 100644 tests/tcg/xtensa/test_load_store.S diff --git a/default-configs/targets/xtensa-linux-user.mak b/default-configs/targets/xtensa-linux-user.mak index fc95cc60f5..420b30a68d 100644 --- a/default-configs/targets/xtensa-linux-user.mak +++ b/default-configs/targets/xtensa-linux-user.mak @@ -1,5 +1,4 @@ TARGET_ARCH=xtensa TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y TARGET_HAS_BFLT=y diff --git a/default-configs/targets/xtensa-softmmu.mak b/default-configs/targets/xtensa-softmmu.mak index 26c0285655..f075557bfa 100644 --- a/default-configs/targets/xtensa-softmmu.mak +++ b/default-configs/targets/xtensa-softmmu.mak @@ -1,3 +1,2 @@ TARGET_ARCH=xtensa -TARGET_ALIGNED_ONLY=y TARGET_SUPPORTS_MTTCG=y diff --git a/default-configs/targets/xtensaeb-linux-user.mak b/default-configs/targets/xtensaeb-linux-user.mak index cfc3518118..1ea0f1ba91 100644 --- a/default-configs/targets/xtensaeb-linux-user.mak +++ b/default-configs/targets/xtensaeb-linux-user.mak @@ -1,6 +1,5 @@ TARGET_ARCH=xtensa TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl -TARGET_ALIGNED_ONLY=y TARGET_WORDS_BIGENDIAN=y TARGET_HAS_BFLT=y diff --git a/default-configs/targets/xtensaeb-softmmu.mak b/default-configs/targets/xtensaeb-softmmu.mak index 14cb9289a6..405cf5acbb 100644 --- a/default-configs/targets/xtensaeb-softmmu.mak +++ b/default-configs/targets/xtensaeb-softmmu.mak @@ -1,4 +1,3 @@ TARGET_ARCH=xtensa -TARGET_ALIGNED_ONLY=y TARGET_WORDS_BIGENDIAN=y TARGET_SUPPORTS_MTTCG=y diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c index eeffee297d..f18ab383fd 100644 --- a/target/xtensa/helper.c +++ b/target/xtensa/helper.c @@ -270,13 +270,12 @@ void xtensa_cpu_do_unaligned_access(CPUState *cs, XtensaCPU *cpu = XTENSA_CPU(cs); CPUXtensaState *env = &cpu->env; - if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) && - !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { - cpu_restore_state(CPU(cpu), retaddr, true); - HELPER(exception_cause_vaddr)(env, - env->pc, LOAD_STORE_ALIGNMENT_CAUSE, - addr); - } + assert(xtensa_option_enabled(env->config, + XTENSA_OPTION_UNALIGNED_EXCEPTION)); + cpu_restore_state(CPU(cpu), retaddr, true); + HELPER(exception_cause_vaddr)(env, + env->pc, LOAD_STORE_ALIGNMENT_CAUSE, + addr); } bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 95f6b21c2a..14028d307d 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -339,16 +339,6 @@ static void gen_exception_cause(DisasContext *dc, uint32_t cause) } } -static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause, - TCGv_i32 vaddr) -{ - TCGv_i32 tpc = tcg_const_i32(dc->pc); - TCGv_i32 tcause = tcg_const_i32(cause); - gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr); - tcg_temp_free(tpc); - tcg_temp_free(tcause); -} - static void gen_debug_exception(DisasContext *dc, uint32_t cause) { TCGv_i32 tpc = tcg_const_i32(dc->pc); @@ -554,21 +544,20 @@ static uint32_t test_exceptions_hpi(DisasContext *dc, const OpcodeArg arg[], return test_exceptions_sr(dc, arg, par); } -static void gen_load_store_alignment(DisasContext *dc, int shift, - TCGv_i32 addr, bool no_hw_alignment) +static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop, + TCGv_i32 addr) { - if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) { - tcg_gen_andi_i32(addr, addr, ~0 << shift); - } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) && - no_hw_alignment) { - TCGLabel *label = gen_new_label(); - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_andi_i32(tmp, addr, ~(~0 << shift)); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); - gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr); - gen_set_label(label); - tcg_temp_free(tmp); + if ((mop & MO_SIZE) == MO_8) { + return mop; } + if ((mop & MO_AMASK) == MO_UNALN && + !option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT)) { + mop |= MO_ALIGN; + } + if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) { + tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop)); + } + return mop; } #ifndef CONFIG_USER_ONLY @@ -1781,10 +1770,11 @@ static void translate_l32e(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); + MemOp mop; tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); - gen_load_store_alignment(dc, 2, addr, false); - tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL); + mop = gen_load_store_alignment(dc, MO_TEUL, addr); + tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, mop); tcg_temp_free(addr); } @@ -1810,11 +1800,12 @@ static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); + MemOp mop; tcg_gen_mov_i32(addr, arg[1].in); - gen_load_store_alignment(dc, 2, addr, true); + mop = gen_load_store_alignment(dc, MO_TEUL | MO_ALIGN, addr); gen_check_exclusive(dc, addr, false); - tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->cring, MO_TEUL); + tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->cring, mop); tcg_gen_mov_i32(cpu_exclusive_addr, addr); tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out); tcg_temp_free(addr); @@ -1824,18 +1815,18 @@ static void translate_ldst(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); + MemOp mop; tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); - if (par[0] & MO_SIZE) { - gen_load_store_alignment(dc, par[0] & MO_SIZE, addr, par[1]); - } + mop = gen_load_store_alignment(dc, par[0], addr); + if (par[2]) { if (par[1]) { tcg_gen_mb(TCG_BAR_STRL | TCG_MO_ALL); } - tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, par[0]); + tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, mop); } else { - tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, par[0]); + tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, mop); if (par[1]) { tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL); } @@ -1906,9 +1897,11 @@ static void translate_mac16(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 mem32 = tcg_temp_new_i32(); if (ld_offset) { + MemOp mop; + tcg_gen_addi_i32(vaddr, arg[1].in, ld_offset); - gen_load_store_alignment(dc, 2, vaddr, false); - tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); + mop = gen_load_store_alignment(dc, MO_TEUL, vaddr); + tcg_gen_qemu_ld_tl(mem32, vaddr, dc->cring, mop); } if (op != MAC16_NONE) { TCGv_i32 m1 = gen_mac16_m(arg[off].in, @@ -2354,13 +2347,14 @@ static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[], { TCGv_i32 tmp = tcg_temp_local_new_i32(); TCGv_i32 addr = tcg_temp_local_new_i32(); + MemOp mop; tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); - gen_load_store_alignment(dc, 2, addr, true); + mop = gen_load_store_alignment(dc, MO_TEUL | MO_ALIGN, addr); gen_check_atomctl(dc, addr); tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1], - tmp, dc->cring, MO_TEUL); + tmp, dc->cring, mop); tcg_temp_free(addr); tcg_temp_free(tmp); } @@ -2369,10 +2363,11 @@ static void translate_s32e(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); + MemOp mop; tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); - gen_load_store_alignment(dc, 2, addr, false); - tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, MO_TEUL); + mop = gen_load_store_alignment(dc, MO_TEUL, addr); + tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, mop); tcg_temp_free(addr); } @@ -2383,14 +2378,15 @@ static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 addr = tcg_temp_local_new_i32(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGLabel *label = gen_new_label(); + MemOp mop; tcg_gen_movi_i32(res, 0); tcg_gen_mov_i32(addr, arg[1].in); - gen_load_store_alignment(dc, 2, addr, true); + mop = gen_load_store_alignment(dc, MO_TEUL | MO_ALIGN, addr); tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, label); gen_check_exclusive(dc, addr, true); tcg_gen_atomic_cmpxchg_i32(prev, cpu_exclusive_addr, cpu_exclusive_val, - arg[0].in, dc->cring, MO_TEUL); + arg[0].in, dc->cring, mop); tcg_gen_setcond_i32(TCG_COND_EQ, res, prev, cpu_exclusive_val); tcg_gen_movcond_i32(TCG_COND_EQ, cpu_exclusive_val, prev, cpu_exclusive_val, prev, cpu_exclusive_val); @@ -3377,7 +3373,7 @@ static const XtensaOpcodeOps core_ops[] = { }, { .name = "l32ai", .translate = translate_ldst, - .par = (const uint32_t[]){MO_TEUL, true, false}, + .par = (const uint32_t[]){MO_TEUL | MO_ALIGN, true, false}, .op_flags = XTENSA_OP_LOAD, }, { .name = "l32e", @@ -4704,7 +4700,7 @@ static const XtensaOpcodeOps core_ops[] = { }, { .name = "s32ri", .translate = translate_ldst, - .par = (const uint32_t[]){MO_TEUL, true, true}, + .par = (const uint32_t[]){MO_TEUL | MO_ALIGN, true, true}, .op_flags = XTENSA_OP_STORE, }, { .name = "s8i", @@ -6639,13 +6635,14 @@ static void translate_ldsti(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); + MemOp mop; tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); - gen_load_store_alignment(dc, 2, addr, false); + mop = gen_load_store_alignment(dc, MO_TEUL, addr); if (par[0]) { - tcg_gen_qemu_st32(arg[0].in, addr, dc->cring); + tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, mop); } else { - tcg_gen_qemu_ld32u(arg[0].out, addr, dc->cring); + tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, mop); } if (par[1]) { tcg_gen_mov_i32(arg[1].out, addr); @@ -6657,13 +6654,14 @@ static void translate_ldstx(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); + MemOp mop; tcg_gen_add_i32(addr, arg[1].in, arg[2].in); - gen_load_store_alignment(dc, 2, addr, false); + mop = gen_load_store_alignment(dc, MO_TEUL, addr); if (par[0]) { - tcg_gen_qemu_st32(arg[0].in, addr, dc->cring); + tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, mop); } else { - tcg_gen_qemu_ld32u(arg[0].out, addr, dc->cring); + tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, mop); } if (par[1]) { tcg_gen_mov_i32(arg[1].out, addr); @@ -7101,6 +7099,7 @@ static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr; + MemOp mop; if (par[1]) { addr = tcg_temp_new_i32(); @@ -7108,11 +7107,11 @@ static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[], } else { addr = arg[1].in; } - gen_load_store_alignment(dc, 3, addr, false); + mop = gen_load_store_alignment(dc, MO_TEQ, addr); if (par[0]) { - tcg_gen_qemu_st64(arg[0].in, addr, dc->cring); + tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop); } else { - tcg_gen_qemu_ld64(arg[0].out, addr, dc->cring); + tcg_gen_qemu_ld_i64(arg[0].out, addr, dc->cring, mop); } if (par[2]) { if (par[1]) { @@ -7131,6 +7130,7 @@ static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[], { TCGv_i32 addr; OpcodeArg arg32[1]; + MemOp mop; if (par[1]) { addr = tcg_temp_new_i32(); @@ -7138,14 +7138,14 @@ static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[], } else { addr = arg[1].in; } - gen_load_store_alignment(dc, 2, addr, false); + mop = gen_load_store_alignment(dc, MO_TEUL, addr); if (par[0]) { get_f32_i1(arg, arg32, 0); - tcg_gen_qemu_st32(arg32[0].in, addr, dc->cring); + tcg_gen_qemu_st_tl(arg32[0].in, addr, dc->cring, mop); put_f32_i1(arg, arg32, 0); } else { get_f32_o1(arg, arg32, 0); - tcg_gen_qemu_ld32u(arg32[0].out, addr, dc->cring); + tcg_gen_qemu_ld_tl(arg32[0].out, addr, dc->cring, mop); put_f32_o1(arg, arg32, 0); } if (par[2]) { @@ -7164,6 +7164,7 @@ static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr; + MemOp mop; if (par[1]) { addr = tcg_temp_new_i32(); @@ -7171,11 +7172,11 @@ static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[], } else { addr = arg[1].in; } - gen_load_store_alignment(dc, 3, addr, false); + mop = gen_load_store_alignment(dc, MO_TEQ, addr); if (par[0]) { - tcg_gen_qemu_st64(arg[0].in, addr, dc->cring); + tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop); } else { - tcg_gen_qemu_ld64(arg[0].out, addr, dc->cring); + tcg_gen_qemu_ld_i64(arg[0].out, addr, dc->cring, mop); } if (par[2]) { if (par[1]) { @@ -7194,6 +7195,7 @@ static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[], { TCGv_i32 addr; OpcodeArg arg32[1]; + MemOp mop; if (par[1]) { addr = tcg_temp_new_i32(); @@ -7201,14 +7203,14 @@ static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[], } else { addr = arg[1].in; } - gen_load_store_alignment(dc, 2, addr, false); + mop = gen_load_store_alignment(dc, MO_TEUL, addr); if (par[0]) { get_f32_i1(arg, arg32, 0); - tcg_gen_qemu_st32(arg32[0].in, addr, dc->cring); + tcg_gen_qemu_st_tl(arg32[0].in, addr, dc->cring, mop); put_f32_i1(arg, arg32, 0); } else { get_f32_o1(arg, arg32, 0); - tcg_gen_qemu_ld32u(arg32[0].out, addr, dc->cring); + tcg_gen_qemu_ld_tl(arg32[0].out, addr, dc->cring, mop); put_f32_o1(arg, arg32, 0); } if (par[2]) { diff --git a/tests/tcg/xtensa/test_load_store.S b/tests/tcg/xtensa/test_load_store.S new file mode 100644 index 0000000000..b339f40f12 --- /dev/null +++ b/tests/tcg/xtensa/test_load_store.S @@ -0,0 +1,221 @@ +#include "macros.inc" + +test_suite load_store + +.macro load_ok_test op, type, data, value + .data + .align 4 +1: + \type \data + .previous + + reset_ps + set_vector kernel, 0 + movi a3, 1b + addi a4, a4, 1 + mov a5, a4 + \op a5, a3, 0 + movi a6, \value + assert eq, a5, a6 +.endm + +#if XCHAL_UNALIGNED_LOAD_EXCEPTION +.macro load_unaligned_test will_trap, op, type, data, value + .data + .align 4 + .byte 0 +1: + \type \data + .previous + + reset_ps + .ifeq \will_trap + set_vector kernel, 0 + .else + set_vector kernel, 2f + .endif + movi a3, 1b + addi a4, a4, 1 + mov a5, a4 +1: + \op a5, a3, 0 + .ifeq \will_trap + movi a6, \value + assert eq, a5, a6 + .else + test_fail +2: + rsr a6, exccause + movi a7, 9 + assert eq, a6, a7 + rsr a6, epc1 + movi a7, 1b + assert eq, a6, a7 + rsr a6, excvaddr + assert eq, a6, a3 + assert eq, a5, a4 + .endif + reset_ps +.endm +#else +.macro load_unaligned_test will_trap, op, type, data, value + .data + .align 4 +1: + \type \data + .previous + + reset_ps + set_vector kernel, 0 + movi a3, 1b + 1 + addi a4, a4, 1 + mov a5, a4 + \op a5, a3, 0 + movi a6, \value + assert eq, a5, a6 +.endm +#endif + +.macro store_ok_test op, type, value + .data + .align 4 + .byte 0, 0, 0, 0x55 +1: + \type 0 +2: + .byte 0xaa + .previous + + reset_ps + set_vector kernel, 0 + movi a3, 1b + movi a5, \value + \op a5, a3, 0 + movi a3, 2b + l8ui a5, a3, 0 + movi a6, 0xaa + assert eq, a5, a6 + movi a3, 1b - 1 + l8ui a5, a3, 0 + movi a6, 0x55 + assert eq, a5, a6 +.endm + +#if XCHAL_UNALIGNED_STORE_EXCEPTION +.macro store_unaligned_test will_trap, op, nop, type, value + .data + .align 4 + .byte 0x55 +1: + \type 0 +2: + .byte 0xaa + .previous + + reset_ps + .ifeq \will_trap + set_vector kernel, 0 + .else + set_vector kernel, 4f + .endif + movi a3, 1b + movi a5, \value +3: + \op a5, a3, 0 + .ifne \will_trap + test_fail +4: + rsr a6, exccause + movi a7, 9 + assert eq, a6, a7 + rsr a6, epc1 + movi a7, 3b + assert eq, a6, a7 + rsr a6, excvaddr + assert eq, a6, a3 + l8ui a5, a3, 0 + assert eqi, a5, 0 + .endif + reset_ps + movi a3, 2b + l8ui a5, a3, 0 + movi a6, 0xaa + assert eq, a5, a6 + movi a3, 1b - 1 + l8ui a5, a3, 0 + movi a6, 0x55 + assert eq, a5, a6 +.endm +#else +.macro store_unaligned_test will_trap, sop, lop, type, value + .data + .align 4 + .byte 0x55 +1: + \type 0 + .previous + + reset_ps + set_vector kernel, 0 + movi a3, 1b + movi a5, \value + \sop a5, a3, 0 + movi a3, 1b - 1 + \lop a6, a3, 0 + assert eq, a5, a6 +.endm +#endif + +test load_ok + load_ok_test l16si, .short, 0x00001234, 0x00001234 + load_ok_test l16si, .short, 0x000089ab, 0xffff89ab + load_ok_test l16ui, .short, 0x00001234, 0x00001234 + load_ok_test l16ui, .short, 0x000089ab, 0x000089ab + load_ok_test l32i, .word, 0x12345678, 0x12345678 +#if XCHAL_HAVE_RELEASE_SYNC + load_ok_test l32ai, .word, 0x12345678, 0x12345678 +#endif +test_end + +#undef WILL_TRAP +#if XCHAL_UNALIGNED_LOAD_HW +#define WILL_TRAP 0 +#else +#define WILL_TRAP 1 +#endif + +test load_unaligned + load_unaligned_test WILL_TRAP, l16si, .short, 0x00001234, 0x00001234 + load_unaligned_test WILL_TRAP, l16si, .short, 0x000089ab, 0xffff89ab + load_unaligned_test WILL_TRAP, l16ui, .short, 0x00001234, 0x00001234 + load_unaligned_test WILL_TRAP, l16ui, .short, 0x000089ab, 0x000089ab + load_unaligned_test WILL_TRAP, l32i, .word, 0x12345678, 0x12345678 +#if XCHAL_HAVE_RELEASE_SYNC + load_unaligned_test 1, l32ai, .word, 0x12345678, 0x12345678 +#endif +test_end + +test store_ok + store_ok_test s16i, .short, 0x00001234 + store_ok_test s32i, .word, 0x12345678 +#if XCHAL_HAVE_RELEASE_SYNC + store_ok_test s32ri, .word, 0x12345678 +#endif +test_end + +#undef WILL_TRAP +#if XCHAL_UNALIGNED_STORE_HW +#define WILL_TRAP 0 +#else +#define WILL_TRAP 1 +#endif + +test store_unaligned + store_unaligned_test WILL_TRAP, s16i, l16ui, .short, 0x00001234 + store_unaligned_test WILL_TRAP, s32i, l32i, .word, 0x12345678 +#if XCHAL_HAVE_RELEASE_SYNC + store_unaligned_test 1, s32ri, l32i, .word, 0x12345678 +#endif +test_end + +test_suite_end