target/Hexagon: Finish conversion to tcg_gen_qemu_{ld, st}_*

Convert away from the old interface with the implicit
MemOp argument.  Importantly, this removes some incorrect
casts generated by idef-parser's gen_load().

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Tested-by: Taylor Simpson <tsimpson@quicinc.com>
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
Reviewed-by: Anton Johansson <anjo@rev.ng>
Message-Id: <20230502135741.1158035-4-richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-05-02 14:57:35 +01:00
parent a9a9c3fa6f
commit 53b26d253c
4 changed files with 40 additions and 42 deletions

View File

@ -320,14 +320,14 @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
{ {
tcg_gen_qemu_ld32u(dest, vaddr, mem_index); tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL);
tcg_gen_mov_tl(hex_llsc_addr, vaddr); tcg_gen_mov_tl(hex_llsc_addr, vaddr);
tcg_gen_mov_tl(hex_llsc_val, dest); tcg_gen_mov_tl(hex_llsc_val, dest);
} }
static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index) static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
{ {
tcg_gen_qemu_ld64(dest, vaddr, mem_index); tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ);
tcg_gen_mov_tl(hex_llsc_addr, vaddr); tcg_gen_mov_tl(hex_llsc_addr, vaddr);
tcg_gen_mov_i64(hex_llsc_val_i64, dest); tcg_gen_mov_i64(hex_llsc_val_i64, dest);
} }
@ -678,7 +678,7 @@ static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA)
{ {
Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */ Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */
CHECK_NOSHUF(EA, 8); CHECK_NOSHUF(EA, 8);
tcg_gen_qemu_ld64(frame, EA, ctx->mem_idx); tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ);
} }
static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src) static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src)
@ -1019,7 +1019,7 @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
} }
for (int i = 0; i < sizeof(MMVector) / 8; i++) { for (int i = 0; i < sizeof(MMVector) / 8; i++) {
tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx); tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ);
tcg_gen_addi_tl(src, src, 8); tcg_gen_addi_tl(src, src, 8);
tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
} }

View File

@ -1737,36 +1737,34 @@ void gen_load_cancel(Context *c, YYLTYPE *locp)
void gen_load(Context *c, YYLTYPE *locp, HexValue *width, void gen_load(Context *c, YYLTYPE *locp, HexValue *width,
HexSignedness signedness, HexValue *ea, HexValue *dst) HexSignedness signedness, HexValue *ea, HexValue *dst)
{ {
char size_suffix[4] = {0}; unsigned dst_bit_width;
const char *sign_suffix; unsigned src_bit_width;
/* Memop width is specified in the load macro */ /* Memop width is specified in the load macro */
assert_signedness(c, locp, signedness); assert_signedness(c, locp, signedness);
sign_suffix = (width->imm.value > 4)
? ""
: ((signedness == UNSIGNED) ? "u" : "s");
/* If dst is a variable, assert that is declared and load the type info */ /* If dst is a variable, assert that is declared and load the type info */
if (dst->type == VARID) { if (dst->type == VARID) {
find_variable(c, locp, dst, dst); find_variable(c, locp, dst, dst);
} }
snprintf(size_suffix, 4, "%" PRIu64, width->imm.value * 8); src_bit_width = width->imm.value * 8;
dst_bit_width = MAX(dst->bit_width, 32);
/* Lookup the effective address EA */ /* Lookup the effective address EA */
find_variable(c, locp, ea, ea); find_variable(c, locp, ea, ea);
OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n"); OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n"); OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n");
OUT(c, locp, "process_store(ctx, 1);\n"); OUT(c, locp, "process_store(ctx, 1);\n");
OUT(c, locp, "}\n"); OUT(c, locp, "}\n");
OUT(c, locp, "tcg_gen_qemu_ld", size_suffix, sign_suffix);
OUT(c, locp, "tcg_gen_qemu_ld_i", &dst_bit_width);
OUT(c, locp, "("); OUT(c, locp, "(");
if (dst->bit_width > width->imm.value * 8) { OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx, MO_", &src_bit_width);
/* if (signedness == SIGNED) {
* Cast to the correct TCG type if necessary, to avoid implict cast OUT(c, locp, " | MO_SIGN");
* warnings. This is needed when the width of the destination var is
* larger than the size of the requested load.
*/
OUT(c, locp, "(TCGv) ");
} }
OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx);\n"); OUT(c, locp, " | MO_TE);\n");
} }
void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea, void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea,

View File

@ -99,37 +99,37 @@
#define MEM_LOAD1s(DST, VA) \ #define MEM_LOAD1s(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 1); \ CHECK_NOSHUF(VA, 1); \
tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_SB); \
} while (0) } while (0)
#define MEM_LOAD1u(DST, VA) \ #define MEM_LOAD1u(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 1); \ CHECK_NOSHUF(VA, 1); \
tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_UB); \
} while (0) } while (0)
#define MEM_LOAD2s(DST, VA) \ #define MEM_LOAD2s(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 2); \ CHECK_NOSHUF(VA, 2); \
tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESW); \
} while (0) } while (0)
#define MEM_LOAD2u(DST, VA) \ #define MEM_LOAD2u(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 2); \ CHECK_NOSHUF(VA, 2); \
tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUW); \
} while (0) } while (0)
#define MEM_LOAD4s(DST, VA) \ #define MEM_LOAD4s(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 4); \ CHECK_NOSHUF(VA, 4); \
tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESL); \
} while (0) } while (0)
#define MEM_LOAD4u(DST, VA) \ #define MEM_LOAD4u(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 4); \ CHECK_NOSHUF(VA, 4); \
tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUL); \
} while (0) } while (0)
#define MEM_LOAD8u(DST, VA) \ #define MEM_LOAD8u(DST, VA) \
do { \ do { \
CHECK_NOSHUF(VA, 8); \ CHECK_NOSHUF(VA, 8); \
tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \ tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_TEUQ); \
} while (0) } while (0)
#define MEM_STORE1_FUNC(X) \ #define MEM_STORE1_FUNC(X) \

View File

@ -627,27 +627,27 @@ void process_store(DisasContext *ctx, int slot_num)
switch (ctx->store_width[slot_num]) { switch (ctx->store_width[slot_num]) {
case 1: case 1:
gen_check_store_width(ctx, slot_num); gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st8(hex_store_val32[slot_num], tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num], hex_store_addr[slot_num],
ctx->mem_idx); ctx->mem_idx, MO_UB);
break; break;
case 2: case 2:
gen_check_store_width(ctx, slot_num); gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st16(hex_store_val32[slot_num], tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num], hex_store_addr[slot_num],
ctx->mem_idx); ctx->mem_idx, MO_TEUW);
break; break;
case 4: case 4:
gen_check_store_width(ctx, slot_num); gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st32(hex_store_val32[slot_num], tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
hex_store_addr[slot_num], hex_store_addr[slot_num],
ctx->mem_idx); ctx->mem_idx, MO_TEUL);
break; break;
case 8: case 8:
gen_check_store_width(ctx, slot_num); gen_check_store_width(ctx, slot_num);
tcg_gen_qemu_st64(hex_store_val64[slot_num], tcg_gen_qemu_st_i64(hex_store_val64[slot_num],
hex_store_addr[slot_num], hex_store_addr[slot_num],
ctx->mem_idx); ctx->mem_idx, MO_TEUQ);
break; break;
default: default:
{ {
@ -693,13 +693,13 @@ static void process_dczeroa(DisasContext *ctx)
TCGv_i64 zero = tcg_constant_i64(0); TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f); tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f);
tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
tcg_gen_addi_tl(addr, addr, 8); tcg_gen_addi_tl(addr, addr, 8);
tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
tcg_gen_addi_tl(addr, addr, 8); tcg_gen_addi_tl(addr, addr, 8);
tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
tcg_gen_addi_tl(addr, addr, 8); tcg_gen_addi_tl(addr, addr, 8);
tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
} }
} }