target/alpha: Reorg integer memory operations

Pass in the MemOp instead of a callback.
Drop the fp argument; add a locked argument.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-07-30 20:15:31 -10:00
parent 452635318b
commit 5ffcb33426
1 changed files with 40 additions and 64 deletions

View File

@ -308,27 +308,10 @@ static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
}
}
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
MemOp op, bool clear, bool locked)
{
tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
tcg_gen_mov_i64(cpu_lock_addr, t1);
tcg_gen_mov_i64(cpu_lock_value, t0);
}
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
{
tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
tcg_gen_mov_i64(cpu_lock_addr, t1);
tcg_gen_mov_i64(cpu_lock_value, t0);
}
static inline void gen_load_mem(DisasContext *ctx,
void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
int flags),
int ra, int rb, int32_t disp16, bool fp,
bool clear)
{
TCGv tmp, addr, va;
TCGv addr, dest;
/* LDQ_U with ra $31 is UNOP. Other various loads are forms of
prefetches, which we can treat as nops. No worries about
@ -337,22 +320,20 @@ static inline void gen_load_mem(DisasContext *ctx,
return;
}
tmp = tcg_temp_new();
addr = load_gpr(ctx, rb);
if (disp16) {
tcg_gen_addi_i64(tmp, addr, disp16);
addr = tmp;
}
addr = tcg_temp_new();
tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
if (clear) {
tcg_gen_andi_i64(tmp, addr, ~0x7);
addr = tmp;
tcg_gen_andi_i64(addr, addr, ~0x7);
}
va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
tcg_gen_qemu_load(va, addr, ctx->mem_idx);
dest = ctx->ir[ra];
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);
tcg_temp_free(tmp);
if (locked) {
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, dest);
}
tcg_temp_free(addr);
}
static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
@ -393,30 +374,21 @@ static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
tcg_temp_free(addr);
}
static inline void gen_store_mem(DisasContext *ctx,
void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
int flags),
int ra, int rb, int32_t disp16, bool fp,
bool clear)
static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
MemOp op, bool clear)
{
TCGv tmp, addr, va;
TCGv addr, src;
tmp = tcg_temp_new();
addr = load_gpr(ctx, rb);
if (disp16) {
tcg_gen_addi_i64(tmp, addr, disp16);
addr = tmp;
}
addr = tcg_temp_new();
tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
if (clear) {
tcg_gen_andi_i64(tmp, addr, ~0x7);
addr = tmp;
tcg_gen_andi_i64(addr, addr, ~0x7);
}
va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
tcg_gen_qemu_store(va, addr, ctx->mem_idx);
src = load_gpr(ctx, ra);
tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
tcg_temp_free(tmp);
tcg_temp_free(addr);
}
static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
@ -1511,30 +1483,30 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x0A:
/* LDBU */
REQUIRE_AMASK(BWX);
gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
break;
case 0x0B:
/* LDQ_U */
gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 1, 0);
break;
case 0x0C:
/* LDWU */
REQUIRE_AMASK(BWX);
gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
break;
case 0x0D:
/* STW */
REQUIRE_AMASK(BWX);
gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
break;
case 0x0E:
/* STB */
REQUIRE_AMASK(BWX);
gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
break;
case 0x0F:
/* STQ_U */
gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 1);
break;
case 0x10:
@ -2489,11 +2461,15 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
case 0x4:
/* Longword virtual PTE fetch (hw_ldl/v) */
@ -2846,27 +2822,27 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x28:
/* LDL */
gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
break;
case 0x29:
/* LDQ */
gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 0);
break;
case 0x2A:
/* LDL_L */
gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
break;
case 0x2B:
/* LDQ_L */
gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 1);
break;
case 0x2C:
/* STL */
gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
break;
case 0x2D:
/* STQ */
gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 0);
break;
case 0x2E:
/* STL_C */