diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index fa3f16eddd..f2e3d38515 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -209,8 +209,8 @@ static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a) static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a) { - TCGv s1 = tcg_const_tl(a->rs1); - TCGv s2 = tcg_const_tl(a->zimm); + TCGv s1 = tcg_constant_tl(a->rs1); + TCGv s2 = tcg_constant_tl(a->zimm); return do_vsetivli(s, a->rd, s1, s2); } diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc index d2012c2841..74dde37ff7 100644 --- a/target/riscv/insn_trans/trans_rvzfh.c.inc +++ b/target/riscv/insn_trans/trans_rvzfh.c.inc @@ -299,7 +299,7 @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a) * Replace bit 15 in rs1 with inverse in rs2. * This formulation retains the nanboxing of rs1. */ - mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1)); + mask = tcg_constant_i64(~MAKE_64BIT_MASK(15, 1)); tcg_gen_not_i64(rs2, rs2); tcg_gen_andc_i64(rs2, rs2, mask); tcg_gen_and_i64(dest, mask, rs1); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 0485abbf7a..93909207d2 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -201,8 +201,8 @@ static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in) */ static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in) { - TCGv_i64 t_max = tcg_const_i64(0xffffffffffff0000ull); - TCGv_i64 t_nan = tcg_const_i64(0xffffffffffff7e00ull); + TCGv_i64 t_max = tcg_constant_i64(0xffffffffffff0000ull); + TCGv_i64 t_nan = tcg_constant_i64(0xffffffffffff7e00ull); tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); }