target/microblaze: Use cc->do_unaligned_access

This fixes the problem in which unaligned stores succeeded,
but then we raised the exception after modifying memory.
Store the ESS for the unaligned data access in the iflags
for the insn, so that it can be found during unwind.

Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2020-08-20 20:29:01 -07:00
parent 2271a6ac0a
commit ab0c8d0f5b
6 changed files with 64 additions and 69 deletions

View File

@ -317,6 +317,7 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = mb_cpu_class_by_name;
cc->has_work = mb_cpu_has_work;
cc->do_interrupt = mb_cpu_do_interrupt;
cc->do_unaligned_access = mb_cpu_do_unaligned_access;
cc->cpu_exec_interrupt = mb_cpu_exec_interrupt;
cc->dump_state = mb_cpu_dump_state;
cc->set_pc = mb_cpu_set_pc;

View File

@ -79,10 +79,13 @@ typedef struct CPUMBState CPUMBState;
/* Exception State Register (ESR) Fields */
#define ESR_DIZ (1<<11) /* Zone Protection */
#define ESR_W (1<<11) /* Unaligned word access */
#define ESR_S (1<<10) /* Store instruction */
#define ESR_ESS_FSL_OFFSET 5
#define ESR_ESS_MASK (0x7f << 5)
#define ESR_EC_FSL 0
#define ESR_EC_UNALIGNED_DATA 1
#define ESR_EC_ILLEGAL_OP 2
@ -256,9 +259,11 @@ struct CPUMBState {
/* Internal flags. */
#define IMM_FLAG (1 << 0)
#define BIMM_FLAG (1 << 1)
/* MSR_EE (1 << 8) */
#define ESR_ESS_FLAG (1 << 2) /* indicates ESR_ESS_MASK is present */
/* MSR_EE (1 << 8) -- these 3 are not in iflags but tb_flags */
/* MSR_UM (1 << 11) */
/* MSR_VM (1 << 13) */
/* ESR_ESS_MASK [11:5] -- unwind into iflags for unaligned excp */
#define DRTI_FLAG (1 << 16)
#define DRTE_FLAG (1 << 17)
#define DRTB_FLAG (1 << 18)
@ -330,6 +335,9 @@ struct MicroBlazeCPU {
void mb_cpu_do_interrupt(CPUState *cs);
bool mb_cpu_exec_interrupt(CPUState *cs, int int_req);
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
void mb_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
hwaddr mb_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int mb_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);

View File

@ -296,3 +296,31 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
uint32_t esr, iflags;
/* Recover the pc and iflags from the corresponding insn_start. */
cpu_restore_state(cs, retaddr, true);
iflags = cpu->env.iflags;
qemu_log_mask(CPU_LOG_INT,
"Unaligned access addr=" TARGET_FMT_lx
" pc=%x iflags=%x\n", addr, cpu->env.pc, iflags);
esr = ESR_EC_UNALIGNED_DATA;
if (likely(iflags & ESR_ESS_FLAG)) {
esr |= iflags & ESR_ESS_MASK;
} else {
qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
}
cpu->env.ear = addr;
cpu->env.esr = esr;
cs->exception_index = EXCP_HW_EXCP;
cpu_loop_exit(cs);
}

View File

@ -25,7 +25,6 @@ DEF_HELPER_3(mmu_read, i32, env, i32, i32)
DEF_HELPER_4(mmu_write, void, env, i32, i32, i32)
#endif
DEF_HELPER_5(memalign, void, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl)
DEF_HELPER_2(get, i32, i32, i32)

View File

@ -365,27 +365,6 @@ uint32_t helper_pcmpbf(uint32_t a, uint32_t b)
return 0;
}
void helper_memalign(CPUMBState *env, target_ulong addr,
uint32_t dr, uint32_t wr,
uint32_t mask)
{
if (addr & mask) {
qemu_log_mask(CPU_LOG_INT,
"unaligned access addr=" TARGET_FMT_lx
" mask=%x, wr=%d dr=r%d\n",
addr, mask, wr, dr);
env->ear = addr;
env->esr = ESR_EC_UNALIGNED_DATA | (wr << 10) | (dr & 31) << 5;
if (mask == 3) {
env->esr |= 1 << 11;
}
if (!(env->msr & MSR_EE)) {
return;
}
helper_raise_exception(env, EXCP_HW_EXCP);
}
}
void helper_stackprot(CPUMBState *env, target_ulong addr)
{
if (addr < env->slr || addr > env->shr) {

View File

@ -751,10 +751,22 @@ static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
return ret;
}
static void record_unaligned_ess(DisasContext *dc, int rd,
MemOp size, bool store)
{
uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
iflags |= ESR_ESS_FLAG;
iflags |= rd << 5;
iflags |= store * ESR_S;
iflags |= (size == MO_32) * ESR_W;
tcg_set_insn_start_param(dc->insn_start, 1, iflags);
}
static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
int mem_index, bool rev)
{
TCGv_i32 v;
MemOp size = mop & MO_SIZE;
/*
@ -774,34 +786,15 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
sync_jmpstate(dc);
/*
* Microblaze gives MMU faults priority over faults due to
* unaligned addresses. That's why we speculatively do the load
* into v. If the load succeeds, we verify alignment of the
* address and if that succeeds we write into the destination reg.
*/
v = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(v, addr, mem_index, mop);
/* TODO: Convert to CPUClass::do_unaligned_access. */
if (dc->cpu->cfg.unaligned_exceptions && size > MO_8) {
TCGv_i32 t0 = tcg_const_i32(0);
TCGv_i32 treg = tcg_const_i32(rd);
TCGv_i32 tsize = tcg_const_i32((1 << size) - 1);
tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
gen_helper_memalign(cpu_env, addr, treg, t0, tsize);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(treg);
tcg_temp_free_i32(tsize);
if (size > MO_8 &&
(dc->tb_flags & MSR_EE) &&
dc->cpu->cfg.unaligned_exceptions) {
record_unaligned_ess(dc, rd, size, false);
mop |= MO_ALIGN;
}
if (rd) {
tcg_gen_mov_i32(cpu_R[rd], v);
}
tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
tcg_temp_free_i32(v);
tcg_temp_free(addr);
return true;
}
@ -931,28 +924,15 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
sync_jmpstate(dc);
tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
/* TODO: Convert to CPUClass::do_unaligned_access. */
if (dc->cpu->cfg.unaligned_exceptions && size > MO_8) {
TCGv_i32 t1 = tcg_const_i32(1);
TCGv_i32 treg = tcg_const_i32(rd);
TCGv_i32 tsize = tcg_const_i32((1 << size) - 1);
tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
/* FIXME: if the alignment is wrong, we should restore the value
* in memory. One possible way to achieve this is to probe
* the MMU prior to the memaccess, thay way we could put
* the alignment checks in between the probe and the mem
* access.
*/
gen_helper_memalign(cpu_env, addr, treg, t1, tsize);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(treg);
tcg_temp_free_i32(tsize);
if (size > MO_8 &&
(dc->tb_flags & MSR_EE) &&
dc->cpu->cfg.unaligned_exceptions) {
record_unaligned_ess(dc, rd, size, true);
mop |= MO_ALIGN;
}
tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
tcg_temp_free(addr);
return true;
}