tcg/optimize: Use tcg_constant_internal with constant folding

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2020-03-30 20:42:43 -07:00
parent 8f17a975e6
commit 8fe35e0444

View File

@ -178,37 +178,6 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
}
static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg dst, uint64_t val)
{
const TCGOpDef *def;
TCGOpcode new_op;
uint64_t mask;
TempOptInfo *di = arg_info(dst);
def = &tcg_op_defs[op->opc];
if (def->flags & TCG_OPF_VECTOR) {
new_op = INDEX_op_dupi_vec;
} else if (def->flags & TCG_OPF_64BIT) {
new_op = INDEX_op_movi_i64;
} else {
new_op = INDEX_op_movi_i32;
}
op->opc = new_op;
/* TCGOP_VECL and TCGOP_VECE remain unchanged. */
op->args[0] = dst;
op->args[1] = val;
reset_temp(dst);
di->is_const = true;
di->val = val;
mask = val;
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_movi_i32) {
/* High bits of the destination are now garbage. */
mask |= ~0xffffffffull;
}
di->mask = mask;
}
static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
{
TCGTemp *dst_ts = arg_temp(dst);
@ -259,6 +228,27 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
}
}
static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
TCGOp *op, TCGArg dst, uint64_t val)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
TCGType type;
TCGTemp *tv;
if (def->flags & TCG_OPF_VECTOR) {
type = TCGOP_VECL(op) + TCG_TYPE_V64;
} else if (def->flags & TCG_OPF_64BIT) {
type = TCG_TYPE_I64;
} else {
type = TCG_TYPE_I32;
}
/* Convert movi to mov with constant temp. */
tv = tcg_constant_internal(type, val);
init_ts_info(temps_used, tv);
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
}
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
{
uint64_t l64, h64;
@ -622,7 +612,7 @@ void tcg_optimize(TCGContext *s)
nb_temps = s->nb_temps;
nb_globals = s->nb_globals;
bitmap_zero(temps_used.l, nb_temps);
memset(&temps_used, 0, sizeof(temps_used));
for (i = 0; i < nb_temps; ++i) {
s->temps[i].state_ptr = NULL;
}
@ -727,7 +717,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(rotr):
if (arg_is_const(op->args[1])
&& arg_info(op->args[1])->val == 0) {
tcg_opt_gen_movi(s, op, op->args[0], 0);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
continue;
}
break;
@ -1054,7 +1044,7 @@ void tcg_optimize(TCGContext *s)
if (partmask == 0) {
tcg_debug_assert(nb_oargs == 1);
tcg_opt_gen_movi(s, op, op->args[0], 0);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
continue;
}
if (affected == 0) {
@ -1071,7 +1061,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(mulsh):
if (arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == 0) {
tcg_opt_gen_movi(s, op, op->args[0], 0);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
continue;
}
break;
@ -1098,7 +1088,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(sub):
CASE_OP_32_64_VEC(xor):
if (args_are_copies(op->args[1], op->args[2])) {
tcg_opt_gen_movi(s, op, op->args[0], 0);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
continue;
}
break;
@ -1115,14 +1105,14 @@ void tcg_optimize(TCGContext *s)
break;
CASE_OP_32_64(movi):
case INDEX_op_dupi_vec:
tcg_opt_gen_movi(s, op, op->args[0], op->args[1]);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], op->args[1]);
break;
case INDEX_op_dup_vec:
if (arg_is_const(op->args[1])) {
tmp = arg_info(op->args[1])->val;
tmp = dup_const(TCGOP_VECE(op), tmp);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
goto do_default;
@ -1132,7 +1122,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = arg_info(op->args[1])->val;
if (tmp == arg_info(op->args[2])->val) {
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
} else if (args_are_copies(op->args[1], op->args[2])) {
@ -1160,7 +1150,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_extrh_i64_i32:
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
goto do_default;
@ -1190,7 +1180,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
arg_info(op->args[2])->val);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
goto do_default;
@ -1201,7 +1191,7 @@ void tcg_optimize(TCGContext *s)
TCGArg v = arg_info(op->args[1])->val;
if (v != 0) {
tmp = do_constant_folding(opc, v, 0);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
} else {
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
}
@ -1214,7 +1204,7 @@ void tcg_optimize(TCGContext *s)
tmp = deposit64(arg_info(op->args[1])->val,
op->args[3], op->args[4],
arg_info(op->args[2])->val);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
goto do_default;
@ -1223,7 +1213,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = extract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
goto do_default;
@ -1232,7 +1222,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = sextract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
goto do_default;
@ -1249,7 +1239,7 @@ void tcg_optimize(TCGContext *s)
tmp = (int32_t)(((uint32_t)v1 >> shr) |
((uint32_t)v2 << (32 - shr)));
}
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
goto do_default;
@ -1258,7 +1248,7 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[3]);
if (tmp != 2) {
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
break;
}
goto do_default;
@ -1268,7 +1258,7 @@ void tcg_optimize(TCGContext *s)
op->args[1], op->args[2]);
if (tmp != 2) {
if (tmp) {
bitmap_zero(temps_used.l, nb_temps);
memset(&temps_used, 0, sizeof(temps_used));
op->opc = INDEX_op_br;
op->args[0] = op->args[3];
} else {
@ -1314,7 +1304,7 @@ void tcg_optimize(TCGContext *s)
uint64_t a = ((uint64_t)ah << 32) | al;
uint64_t b = ((uint64_t)bh << 32) | bl;
TCGArg rl, rh;
TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32);
TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
if (opc == INDEX_op_add2_i32) {
a += b;
@ -1324,8 +1314,8 @@ void tcg_optimize(TCGContext *s)
rl = op->args[0];
rh = op->args[1];
tcg_opt_gen_movi(s, op, rl, (int32_t)a);
tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32));
tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
break;
}
goto do_default;
@ -1336,12 +1326,12 @@ void tcg_optimize(TCGContext *s)
uint32_t b = arg_info(op->args[3])->val;
uint64_t r = (uint64_t)a * b;
TCGArg rl, rh;
TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_movi_i32);
TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
rl = op->args[0];
rh = op->args[1];
tcg_opt_gen_movi(s, op, rl, (int32_t)r);
tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32));
tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
break;
}
goto do_default;
@ -1352,7 +1342,7 @@ void tcg_optimize(TCGContext *s)
if (tmp != 2) {
if (tmp) {
do_brcond_true:
bitmap_zero(temps_used.l, nb_temps);
memset(&temps_used, 0, sizeof(temps_used));
op->opc = INDEX_op_br;
op->args[0] = op->args[5];
} else {
@ -1368,7 +1358,7 @@ void tcg_optimize(TCGContext *s)
/* Simplify LT/GE comparisons vs zero to a single compare
vs the high word of the input. */
do_brcond_high:
bitmap_zero(temps_used.l, nb_temps);
memset(&temps_used, 0, sizeof(temps_used));
op->opc = INDEX_op_brcond_i32;
op->args[0] = op->args[1];
op->args[1] = op->args[3];
@ -1394,7 +1384,7 @@ void tcg_optimize(TCGContext *s)
goto do_default;
}
do_brcond_low:
bitmap_zero(temps_used.l, nb_temps);
memset(&temps_used, 0, sizeof(temps_used));
op->opc = INDEX_op_brcond_i32;
op->args[1] = op->args[2];
op->args[2] = op->args[4];
@ -1429,7 +1419,7 @@ void tcg_optimize(TCGContext *s)
op->args[5]);
if (tmp != 2) {
do_setcond_const:
tcg_opt_gen_movi(s, op, op->args[0], tmp);
tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
} else if ((op->args[5] == TCG_COND_LT
|| op->args[5] == TCG_COND_GE)
&& arg_is_const(op->args[3])
@ -1514,7 +1504,7 @@ void tcg_optimize(TCGContext *s)
block, otherwise we only trash the output args. "mask" is
the non-zero bits mask for the first output arg. */
if (def->flags & TCG_OPF_BB_END) {
bitmap_zero(temps_used.l, nb_temps);
memset(&temps_used, 0, sizeof(temps_used));
} else {
do_reset_output:
for (i = 0; i < nb_oargs; i++) {