tcg/optimize: Adjust TempOptInfo allocation

Do not allocate a large block for indexing.  Instead, allocate
for each temporary as they are seen.

In general, this will use less memory, if we consider that most
TBs do not touch every target register.  This also allows us to
allocate TempOptInfo for new temps created during optimization.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2020-03-30 19:52:02 -07:00
parent 4c868ce645
commit 8f17a975e6

View File

@ -89,35 +89,41 @@ static void reset_temp(TCGArg arg)
}
/* Initialize and activate a temporary. */
static void init_ts_info(TempOptInfo *infos,
TCGTempSet *temps_used, TCGTemp *ts)
static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
{
size_t idx = temp_idx(ts);
if (!test_bit(idx, temps_used->l)) {
TempOptInfo *ti = &infos[idx];
TempOptInfo *ti;
if (test_bit(idx, temps_used->l)) {
return;
}
set_bit(idx, temps_used->l);
ti = ts->state_ptr;
if (ti == NULL) {
ti = tcg_malloc(sizeof(TempOptInfo));
ts->state_ptr = ti;
ti->next_copy = ts;
ti->prev_copy = ts;
if (ts->kind == TEMP_CONST) {
ti->is_const = true;
ti->val = ti->mask = ts->val;
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
/* High bits of a 32-bit quantity are garbage. */
ti->mask |= ~0xffffffffull;
}
} else {
ti->is_const = false;
ti->mask = -1;
}
ti->next_copy = ts;
ti->prev_copy = ts;
if (ts->kind == TEMP_CONST) {
ti->is_const = true;
ti->val = ts->val;
ti->mask = ts->val;
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
/* High bits of a 32-bit quantity are garbage. */
ti->mask |= ~0xffffffffull;
}
set_bit(idx, temps_used->l);
} else {
ti->is_const = false;
ti->mask = -1;
}
}
static void init_arg_info(TempOptInfo *infos,
TCGTempSet *temps_used, TCGArg arg)
static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
{
init_ts_info(infos, temps_used, arg_temp(arg));
init_ts_info(temps_used, arg_temp(arg));
}
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
@ -604,9 +610,8 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
int nb_temps, nb_globals;
int nb_temps, nb_globals, i;
TCGOp *op, *op_next, *prev_mb = NULL;
TempOptInfo *infos;
TCGTempSet temps_used;
/* Array VALS has an element for each temp.
@ -616,12 +621,15 @@ void tcg_optimize(TCGContext *s)
nb_temps = s->nb_temps;
nb_globals = s->nb_globals;
bitmap_zero(temps_used.l, nb_temps);
infos = tcg_malloc(sizeof(TempOptInfo) * nb_temps);
for (i = 0; i < nb_temps; ++i) {
s->temps[i].state_ptr = NULL;
}
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
uint64_t mask, partmask, affected, tmp;
int nb_oargs, nb_iargs, i;
int nb_oargs, nb_iargs;
TCGOpcode opc = op->opc;
const TCGOpDef *def = &tcg_op_defs[opc];
@ -633,14 +641,14 @@ void tcg_optimize(TCGContext *s)
for (i = 0; i < nb_oargs + nb_iargs; i++) {
TCGTemp *ts = arg_temp(op->args[i]);
if (ts) {
init_ts_info(infos, &temps_used, ts);
init_ts_info(&temps_used, ts);
}
}
} else {
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
for (i = 0; i < nb_oargs + nb_iargs; i++) {
init_arg_info(infos, &temps_used, op->args[i]);
init_arg_info(&temps_used, op->args[i]);
}
}