target/arm: Use tcg_constant in translate-neon.c

Use tcg_constant_{i32,i64} as appropriate throughout.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2022-04-17 10:43:45 -07:00 committed by Peter Maydell
parent 01d90db599
commit d9b47e97e7
1 changed files with 7 additions and 14 deletions

View File

@ -447,7 +447,7 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
int mmu_idx = get_mem_index(s); int mmu_idx = get_mem_index(s);
int size = a->size; int size = a->size;
TCGv_i64 tmp64; TCGv_i64 tmp64;
TCGv_i32 addr, tmp; TCGv_i32 addr;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false; return false;
@ -513,7 +513,6 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
tmp64 = tcg_temp_new_i64(); tmp64 = tcg_temp_new_i64();
addr = tcg_temp_new_i32(); addr = tcg_temp_new_i32();
tmp = tcg_const_i32(1 << size);
load_reg_var(s, addr, a->rn); load_reg_var(s, addr, a->rn);
mop = endian | size | align; mop = endian | size | align;
@ -530,7 +529,7 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
neon_load_element64(tmp64, tt, n, size); neon_load_element64(tmp64, tt, n, size);
gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop); gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
} }
tcg_gen_add_i32(addr, addr, tmp); tcg_gen_addi_i32(addr, addr, 1 << size);
/* Subsequent memory operations inherit alignment */ /* Subsequent memory operations inherit alignment */
mop &= ~MO_AMASK; mop &= ~MO_AMASK;
@ -538,7 +537,6 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
} }
} }
tcg_temp_free_i32(addr); tcg_temp_free_i32(addr);
tcg_temp_free_i32(tmp);
tcg_temp_free_i64(tmp64); tcg_temp_free_i64(tmp64);
gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8); gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
@ -1348,7 +1346,7 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
* To avoid excessive duplication of ops we implement shift * To avoid excessive duplication of ops we implement shift
* by immediate using the variable shift operations. * by immediate using the variable shift operations.
*/ */
constimm = tcg_const_i64(dup_const(a->size, a->shift)); constimm = tcg_constant_i64(dup_const(a->size, a->shift));
for (pass = 0; pass < a->q + 1; pass++) { for (pass = 0; pass < a->q + 1; pass++) {
TCGv_i64 tmp = tcg_temp_new_i64(); TCGv_i64 tmp = tcg_temp_new_i64();
@ -1358,7 +1356,6 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
write_neon_element64(tmp, a->vd, pass, MO_64); write_neon_element64(tmp, a->vd, pass, MO_64);
tcg_temp_free_i64(tmp); tcg_temp_free_i64(tmp);
} }
tcg_temp_free_i64(constimm);
return true; return true;
} }
@ -1394,7 +1391,7 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
* To avoid excessive duplication of ops we implement shift * To avoid excessive duplication of ops we implement shift
* by immediate using the variable shift operations. * by immediate using the variable shift operations.
*/ */
constimm = tcg_const_i32(dup_const(a->size, a->shift)); constimm = tcg_constant_i32(dup_const(a->size, a->shift));
tmp = tcg_temp_new_i32(); tmp = tcg_temp_new_i32();
for (pass = 0; pass < (a->q ? 4 : 2); pass++) { for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
@ -1403,7 +1400,6 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
write_neon_element32(tmp, a->vd, pass, MO_32); write_neon_element32(tmp, a->vd, pass, MO_32);
} }
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
tcg_temp_free_i32(constimm);
return true; return true;
} }
@ -1457,7 +1453,7 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
* This is always a right shift, and the shiftfn is always a * This is always a right shift, and the shiftfn is always a
* left-shift helper, which thus needs the negated shift count. * left-shift helper, which thus needs the negated shift count.
*/ */
constimm = tcg_const_i64(-a->shift); constimm = tcg_constant_i64(-a->shift);
rm1 = tcg_temp_new_i64(); rm1 = tcg_temp_new_i64();
rm2 = tcg_temp_new_i64(); rm2 = tcg_temp_new_i64();
rd = tcg_temp_new_i32(); rd = tcg_temp_new_i32();
@ -1477,7 +1473,6 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
tcg_temp_free_i32(rd); tcg_temp_free_i32(rd);
tcg_temp_free_i64(rm1); tcg_temp_free_i64(rm1);
tcg_temp_free_i64(rm2); tcg_temp_free_i64(rm2);
tcg_temp_free_i64(constimm);
return true; return true;
} }
@ -1521,7 +1516,7 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
/* size == 2 */ /* size == 2 */
imm = -a->shift; imm = -a->shift;
} }
constimm = tcg_const_i32(imm); constimm = tcg_constant_i32(imm);
/* Load all inputs first to avoid potential overwrite */ /* Load all inputs first to avoid potential overwrite */
rm1 = tcg_temp_new_i32(); rm1 = tcg_temp_new_i32();
@ -1546,7 +1541,6 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
shiftfn(rm3, rm3, constimm); shiftfn(rm3, rm3, constimm);
shiftfn(rm4, rm4, constimm); shiftfn(rm4, rm4, constimm);
tcg_temp_free_i32(constimm);
tcg_gen_concat_i32_i64(rtmp, rm3, rm4); tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
tcg_temp_free_i32(rm4); tcg_temp_free_i32(rm4);
@ -2911,7 +2905,7 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
return true; return true;
} }
desc = tcg_const_i32((a->vn << 2) | a->len); desc = tcg_constant_i32((a->vn << 2) | a->len);
def = tcg_temp_new_i64(); def = tcg_temp_new_i64();
if (a->op) { if (a->op) {
read_neon_element64(def, a->vd, 0, MO_64); read_neon_element64(def, a->vd, 0, MO_64);
@ -2926,7 +2920,6 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
tcg_temp_free_i64(def); tcg_temp_free_i64(def);
tcg_temp_free_i64(val); tcg_temp_free_i64(val);
tcg_temp_free_i32(desc);
return true; return true;
} }