target/ppc: Avoid tcg_const_i64 in do_vcntmb
Compute both partial results separately and accumulate at the end, instead of accumulating in the middle. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
999b7c2659
commit
ffc0ce24fd
@ -2236,24 +2236,25 @@ static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
|
||||
|
||||
static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
|
||||
{
|
||||
TCGv_i64 rt, vrb, mask;
|
||||
rt = tcg_const_i64(0);
|
||||
vrb = tcg_temp_new_i64();
|
||||
TCGv_i64 r[2], mask;
|
||||
|
||||
r[0] = tcg_temp_new_i64();
|
||||
r[1] = tcg_temp_new_i64();
|
||||
mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
get_avr64(vrb, a->vrb, i);
|
||||
get_avr64(r[i], a->vrb, i);
|
||||
if (a->mp) {
|
||||
tcg_gen_and_i64(vrb, mask, vrb);
|
||||
tcg_gen_and_i64(r[i], mask, r[i]);
|
||||
} else {
|
||||
tcg_gen_andc_i64(vrb, mask, vrb);
|
||||
tcg_gen_andc_i64(r[i], mask, r[i]);
|
||||
}
|
||||
tcg_gen_ctpop_i64(vrb, vrb);
|
||||
tcg_gen_add_i64(rt, rt, vrb);
|
||||
tcg_gen_ctpop_i64(r[i], r[i]);
|
||||
}
|
||||
|
||||
tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
|
||||
tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
|
||||
tcg_gen_add_i64(r[0], r[0], r[1]);
|
||||
tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece);
|
||||
tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user