backport: sched-deps.c (init_deps): New parameter lazy_reg_last.

Backport from mainline:
 2009-11-13  Andrey Belevantsev  <abel@ispras.ru>

        * sched-deps.c (init_deps): New parameter lazy_reg_last.  Don't
        allocate reg_last when in case lazy_reg_last is true.
        (init_deps_reg_last): New.
        (free_deps): When max_reg is 0, this context is already freed.
        * sched-int.h (init_deps_reg_last): Export.
        (init_deps): Update prototype.
        * sched-ebb.c (schedule_ebb): Update the call to init_deps.
        * sched-rgn.c (sched_rgn_compute_dependencies): Likewise.
        * ddg.c (build_intra_loop_deps): Likewise.
        * sel-sched-ir.c (copy_deps_context, create_deps_context,
        reset_deps_context, deps_init_id): Likewise.
        (init_first_time_insn_data): Lazy allocate INSN_DEPS_CONTEXT.
        (free_data_for_scheduled_insn): New, break down from ...
        (free_first_time_insn_data): ... here.
        (has_dependence_p): Allocate reg_last now, when it is needed.
        (extend_insn_data): When maximal LUID is big enough, allocate
        per-insn data in smaller chunks.
        * sel-sched-ir.h (free_data_for_scheduled_insn): Export.
        * sel-sched.c (update_seqnos_and_stage): Free INSN_DEPS_CONTEXT
        in scheduled insn.

From-SVN: r163500
This commit is contained in:
Andrey Belevantsev 2010-08-24 12:53:11 +04:00 committed by Andrey Belevantsev
parent 0cc232de19
commit bc37349418
9 changed files with 118 additions and 24 deletions

View File

@ -1,3 +1,29 @@
2010-08-24 Andrey Belevantsev <abel@ispras.ru>
Backport from mainline:
2009-11-13 Andrey Belevantsev <abel@ispras.ru>
* sched-deps.c (init_deps): New parameter lazy_reg_last. Don't
allocate reg_last when in case lazy_reg_last is true.
(init_deps_reg_last): New.
(free_deps): When max_reg is 0, this context is already freed.
* sched-int.h (init_deps_reg_last): Export.
(init_deps): Update prototype.
* sched-ebb.c (schedule_ebb): Update the call to init_deps.
* sched-rgn.c (sched_rgn_compute_dependencies): Likewise.
* ddg.c (build_intra_loop_deps): Likewise.
* sel-sched-ir.c (copy_deps_context, create_deps_context,
reset_deps_context, deps_init_id): Likewise.
(init_first_time_insn_data): Lazy allocate INSN_DEPS_CONTEXT.
(free_data_for_scheduled_insn): New, break down from ...
(free_first_time_insn_data): ... here.
(has_dependence_p): Allocate reg_last now, when it is needed.
(extend_insn_data): When maximal LUID is big enough, allocate
per-insn data in smaller chunks.
* sel-sched-ir.h (free_data_for_scheduled_insn): Export.
* sel-sched.c (update_seqnos_and_stage): Free INSN_DEPS_CONTEXT
in scheduled insn.
2010-08-24 Andrey Belevantsev <abel@ispras.ru>
Backport from mainline:

View File

@ -381,7 +381,7 @@ build_intra_loop_deps (ddg_ptr g)
/* Build the dependence information, using the sched_analyze function. */
init_deps_global ();
init_deps (&tmp_deps);
init_deps (&tmp_deps, false);
/* Do the intra-block data dependence analysis for the given block. */
get_ebb_head_tail (g->bb, g->bb, &head, &tail);

View File

@ -2850,15 +2850,19 @@ sched_free_deps (rtx head, rtx tail, bool resolved_p)
}
/* Initialize variables for region data dependence analysis.
n_bbs is the number of region blocks. */
When LAZY_REG_LAST is true, do not allocate reg_last array
of struct deps immediately. */
void
init_deps (struct deps *deps)
init_deps (struct deps *deps, bool lazy_reg_last)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->max_reg = max_reg;
deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
if (lazy_reg_last)
deps->reg_last = NULL;
else
deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
INIT_REG_SET (&deps->reg_last_in_use);
INIT_REG_SET (&deps->reg_conditional_sets);
@ -2877,6 +2881,18 @@ init_deps (struct deps *deps)
deps->readonly = 0;
}
/* Init only reg_last field of DEPS, which was not allocated before as
we inited DEPS lazily. */
void
init_deps_reg_last (struct deps *deps)
{
gcc_assert (deps && deps->max_reg > 0);
gcc_assert (deps->reg_last == NULL);
deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
}
/* Free insn lists found in DEPS. */
void
@ -2885,6 +2901,14 @@ free_deps (struct deps *deps)
unsigned i;
reg_set_iterator rsi;
/* We set max_reg to 0 when this context was already freed. */
if (deps->max_reg == 0)
{
gcc_assert (deps->reg_last == NULL);
return;
}
deps->max_reg = 0;
free_INSN_LIST_list (&deps->pending_read_insns);
free_EXPR_LIST_list (&deps->pending_read_mems);
free_INSN_LIST_list (&deps->pending_write_insns);
@ -2907,7 +2931,10 @@ free_deps (struct deps *deps)
CLEAR_REG_SET (&deps->reg_last_in_use);
CLEAR_REG_SET (&deps->reg_conditional_sets);
free (deps->reg_last);
/* As we initialize reg_last lazily, it is possible that we didn't allocate
it at all. */
if (deps->reg_last)
free (deps->reg_last);
deps->reg_last = NULL;
deps = NULL;

View File

@ -477,7 +477,7 @@ schedule_ebb (rtx head, rtx tail)
init_deps_global ();
/* Compute dependencies. */
init_deps (&tmp_deps);
init_deps (&tmp_deps, false);
sched_analyze (&tmp_deps, head, tail);
free_deps (&tmp_deps);

View File

@ -1079,7 +1079,8 @@ extern bool sched_insns_conditions_mutex_p (const_rtx, const_rtx);
extern bool sched_insn_is_legitimate_for_speculation_p (const_rtx, ds_t);
extern void add_dependence (rtx, rtx, enum reg_note);
extern void sched_analyze (struct deps *, rtx, rtx);
extern void init_deps (struct deps *);
extern void init_deps (struct deps *, bool);
extern void init_deps_reg_last (struct deps *);
extern void free_deps (struct deps *);
extern void init_deps_global (void);
extern void finish_deps_global (void);

View File

@ -3081,7 +3081,7 @@ sched_rgn_compute_dependencies (int rgn)
/* Initializations for region data dependence analysis. */
bb_deps = XNEWVEC (struct deps, current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb);
init_deps (bb_deps + bb, false);
/* Initialize bitmap used in add_branch_dependences. */
insn_referenced = sbitmap_alloc (sched_max_luid);

View File

@ -432,7 +432,7 @@ reset_target_context (tc_t tc, bool clean_p)
static void
copy_deps_context (deps_t to, deps_t from)
{
init_deps (to);
init_deps (to, false);
deps_join (to, from);
}
@ -449,7 +449,7 @@ create_deps_context (void)
{
deps_t dc = alloc_deps_context ();
init_deps (dc);
init_deps (dc, false);
return dc;
}
@ -483,7 +483,7 @@ static void
reset_deps_context (deps_t dc)
{
clear_deps_context (dc);
init_deps (dc);
init_deps (dc, false);
}
/* This structure describes the dependence analysis hooks for advancing
@ -2671,7 +2671,7 @@ deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
deps_init_id_data.force_unique_p = force_unique_p;
deps_init_id_data.force_use_p = false;
init_deps (dc);
init_deps (dc, false);
memcpy (&deps_init_id_sched_deps_info,
&const_deps_init_id_sched_deps_info,
@ -2743,7 +2743,7 @@ init_first_time_insn_data (insn_t insn)
/* These are needed for nops too. */
INSN_LIVE (insn) = get_regset_from_pool ();
INSN_LIVE_VALID_P (insn) = false;
if (!INSN_NOP_P (insn))
{
INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
@ -2751,27 +2751,46 @@ init_first_time_insn_data (insn_t insn)
INSN_TRANSFORMED_INSNS (insn)
= htab_create (16, hash_transformed_insns,
eq_transformed_insns, free_transformed_insns);
init_deps (&INSN_DEPS_CONTEXT (insn));
init_deps (&INSN_DEPS_CONTEXT (insn), true);
}
}
/* Free almost all above data for INSN that is scheduled already.
Used for extra-large basic blocks. */
void
free_data_for_scheduled_insn (insn_t insn)
{
gcc_assert (! first_time_insn_init (insn));
if (! INSN_ANALYZED_DEPS (insn))
return;
BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
BITMAP_FREE (INSN_FOUND_DEPS (insn));
htab_delete (INSN_TRANSFORMED_INSNS (insn));
/* This is allocated only for bookkeeping insns. */
if (INSN_ORIGINATORS (insn))
BITMAP_FREE (INSN_ORIGINATORS (insn));
free_deps (&INSN_DEPS_CONTEXT (insn));
INSN_ANALYZED_DEPS (insn) = NULL;
/* Clear the readonly flag so we would ICE when trying to recalculate
the deps context (as we believe that it should not happen). */
(&INSN_DEPS_CONTEXT (insn))->readonly = 0;
}
/* Free the same data as above for INSN. */
static void
free_first_time_insn_data (insn_t insn)
{
gcc_assert (! first_time_insn_init (insn));
BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
BITMAP_FREE (INSN_FOUND_DEPS (insn));
htab_delete (INSN_TRANSFORMED_INSNS (insn));
free_data_for_scheduled_insn (insn);
return_regset_to_pool (INSN_LIVE (insn));
INSN_LIVE (insn) = NULL;
INSN_LIVE_VALID_P (insn) = false;
/* This is allocated only for bookkeeping insns. */
if (INSN_ORIGINATORS (insn))
BITMAP_FREE (INSN_ORIGINATORS (insn));
free_deps (&INSN_DEPS_CONTEXT (insn));
}
/* Initialize region-scope data structures for basic blocks. */
@ -3208,6 +3227,11 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
return false;
dc = &INSN_DEPS_CONTEXT (pred);
/* We init this field lazily. */
if (dc->reg_last == NULL)
init_deps_reg_last (dc);
if (!dc->readonly)
{
has_dependence_data.pro = NULL;
@ -3814,8 +3838,17 @@ extend_insn_data (void)
- VEC_length (sel_insn_data_def, s_i_d));
if (reserve > 0
&& ! VEC_space (sel_insn_data_def, s_i_d, reserve))
VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d,
3 * sched_max_luid / 2);
{
int size;
if (sched_max_luid / 2 > 1024)
size = sched_max_luid + 1024;
else
size = 3 * sched_max_luid / 2;
VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
}
}
/* Finalize data structures for insns from current region. */

View File

@ -1607,6 +1607,7 @@ extern void init_lv_sets (void);
extern void free_lv_sets (void);
extern void setup_nop_and_exit_insns (void);
extern void free_nop_and_exit_insns (void);
extern void free_data_for_scheduled_insn (insn_t);
extern void setup_nop_vinsn (void);
extern void free_nop_vinsn (void);
extern void sel_set_sched_flags (void);

View File

@ -7091,6 +7091,12 @@ update_seqnos_and_stage (int min_seqno, int max_seqno,
gcc_assert (INSN_SEQNO (insn) < 0);
INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
gcc_assert (INSN_SEQNO (insn) <= new_hs);
/* When not pipelining, purge unneeded insn info on the scheduled insns.
For example, having reg_last array of INSN_DEPS_CONTEXT in memory may
require > 1GB of memory e.g. on limit-fnargs.c. */
if (! pipelining_p)
free_data_for_scheduled_insn (insn);
}
ilist_clear (pscheduled_insns);