target/riscv: Use new atomic min/max expanders

Reviewed-by: Michael Clark <mjc@sifive.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180508151437.4232-7-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2018-05-10 18:10:57 +01:00 committed by Peter Maydell
parent 58edf9eef9
commit 3e281df1e1
1 changed files with 20 additions and 52 deletions

View File

@ -724,7 +724,6 @@ static void gen_atomic(DisasContext *ctx, uint32_t opc,
TCGv src1, src2, dat;
TCGLabel *l1, *l2;
TCGMemOp mop;
TCGCond cond;
bool aq, rl;
/* Extract the size of the atomic operation. */
@ -822,60 +821,29 @@ static void gen_atomic(DisasContext *ctx, uint32_t opc,
tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
gen_set_gpr(rd, src2);
break;
case OPC_RISC_AMOMIN:
cond = TCG_COND_LT;
goto do_minmax;
case OPC_RISC_AMOMAX:
cond = TCG_COND_GT;
goto do_minmax;
case OPC_RISC_AMOMINU:
cond = TCG_COND_LTU;
goto do_minmax;
case OPC_RISC_AMOMAXU:
cond = TCG_COND_GTU;
goto do_minmax;
do_minmax:
/* Handle the RL barrier. The AQ barrier is handled along the
parallel path by the SC atomic cmpxchg. On the serial path,
of course, barriers do not matter. */
if (rl) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
if (tb_cflags(ctx->tb) & CF_PARALLEL) {
l1 = gen_new_label();
gen_set_label(l1);
} else {
l1 = NULL;
}
gen_get_gpr(src1, rs1);
gen_get_gpr(src2, rs2);
if ((mop & MO_SSIZE) == MO_SL) {
/* Sign-extend the register comparison input. */
tcg_gen_ext32s_tl(src2, src2);
}
dat = tcg_temp_local_new();
tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
if (tb_cflags(ctx->tb) & CF_PARALLEL) {
/* Parallel context. Make this operation atomic by verifying
that the memory didn't change while we computed the result. */
tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
/* If the cmpxchg failed, retry. */
/* ??? There is an assumption here that this will eventually
succeed, such that we don't live-lock. This is not unlike
a similar loop that the compiler would generate for e.g.
__atomic_fetch_and_xor, so don't worry about it. */
tcg_gen_brcond_tl(TCG_COND_NE, dat, src2, l1);
} else {
/* Serial context. Directly store the result. */
tcg_gen_qemu_st_tl(src2, src1, ctx->mem_idx, mop);
}
gen_set_gpr(rd, dat);
tcg_temp_free(dat);
tcg_gen_atomic_fetch_smin_tl(src2, src1, src2, ctx->mem_idx, mop);
gen_set_gpr(rd, src2);
break;
case OPC_RISC_AMOMAX:
gen_get_gpr(src1, rs1);
gen_get_gpr(src2, rs2);
tcg_gen_atomic_fetch_smax_tl(src2, src1, src2, ctx->mem_idx, mop);
gen_set_gpr(rd, src2);
break;
case OPC_RISC_AMOMINU:
gen_get_gpr(src1, rs1);
gen_get_gpr(src2, rs2);
tcg_gen_atomic_fetch_umin_tl(src2, src1, src2, ctx->mem_idx, mop);
gen_set_gpr(rd, src2);
break;
case OPC_RISC_AMOMAXU:
gen_get_gpr(src1, rs1);
gen_get_gpr(src2, rs2);
tcg_gen_atomic_fetch_umax_tl(src2, src1, src2, ctx->mem_idx, mop);
gen_set_gpr(rd, src2);
break;
default: