sched-deps.c (reg_pending_uses_head): New.

* sched-deps.c (reg_pending_uses_head): New.
        (reg_pending_barrier): Rename from reg_pending_sets_all.
        (find_insn_list): Don't mark inline.
        (find_insn_mem_list): Remove.
        (add_dependence_list, add_dependence_list_and_free): New.
        (flush_pending_lists): Replace only_write param with separate
        for_read and for_write parameters.  Update all callers.  Use
        add_dependence_list_and_free.
        (sched_analyze_1): Do not add reg dependencies here; just set
        the pending bits.  Use add_dependence_list.
        (sched_analyze_2): Likewise.
        (sched_analyze_insn): Replace schedule_barrier_found with
        reg_pending_barrier.  Add all dependencies for pending reg
        uses, sets, and clobbers.
        (sched_analyze): Don't add reg dependencies for calls, just
        set pending bits.  Use regs_invalidated_by_call.  Treat
        sched_before_next_call as a normal list, not a fake insn.
        (init_deps): No funny init for sched_before_next_call.
        (free_deps): Free pending mems lists.  Don't zero reg_last.
        (init_deps_global): Init reg_pending_uses.
        (finish_deps_global): Free it.
        * sched-int.h (deps): Make in_post_call_group_p boolean.  Update docs.
        (find_insn_mem_list): Remove.
        * sched-rgn.c (concat_INSN_LIST, concat_insn_mem_list): New.
        (propagate_deps): Use them.  Zero temp mem lists.

From-SVN: r49262
This commit is contained in:
Richard Henderson 2002-01-26 20:46:53 -08:00 committed by Richard Henderson
parent cea3bd3e5a
commit 37a0f8a525
4 changed files with 314 additions and 399 deletions

View File

@ -1,3 +1,31 @@
2002-01-26 Richard Henderson <rth@redhat.com>
* sched-deps.c (reg_pending_uses_head): New.
(reg_pending_barrier): Rename from reg_pending_sets_all.
(find_insn_list): Don't mark inline.
(find_insn_mem_list): Remove.
(add_dependence_list, add_dependence_list_and_free): New.
(flush_pending_lists): Replace only_write param with separate
for_read and for_write parameters. Update all callers. Use
add_dependence_list_and_free.
(sched_analyze_1): Do not add reg dependencies here; just set
the pending bits. Use add_dependence_list.
(sched_analyze_2): Likewise.
(sched_analyze_insn): Replace schedule_barrier_found with
reg_pending_barrier. Add all dependencies for pending reg
uses, sets, and clobbers.
(sched_analyze): Don't add reg dependencies for calls, just
set pending bits. Use regs_invalidated_by_call. Treat
sched_before_next_call as a normal list, not a fake insn.
(init_deps): No funny init for sched_before_next_call.
(free_deps): Free pending mems lists. Don't zero reg_last.
(init_deps_global): Init reg_pending_uses.
(finish_deps_global): Free it.
* sched-int.h (deps): Make in_post_call_group_p boolean. Update docs.
(find_insn_mem_list): Remove.
* sched-rgn.c (concat_INSN_LIST, concat_insn_mem_list): New.
(propagate_deps): Use them. Zero temp mem lists.
2002-01-26 Richard Henderson <rth@redhat.com>
* Makefile.in (CRTSTUFF_CFLAGS): New.

View File

@ -46,10 +46,12 @@ extern rtx *reg_known_value;
static regset_head reg_pending_sets_head;
static regset_head reg_pending_clobbers_head;
static regset_head reg_pending_uses_head;
static regset reg_pending_sets;
static regset reg_pending_clobbers;
static int reg_pending_sets_all;
static regset reg_pending_uses;
static bool reg_pending_barrier;
/* To speed up the test for duplicate dependency links we keep a
record of dependencies created by add_dependence when the average
@ -77,10 +79,12 @@ static sbitmap *forward_dependency_cache;
#endif
static int deps_may_trap_p PARAMS ((rtx));
static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
static void remove_dependence PARAMS ((rtx, rtx));
static void set_sched_group_p PARAMS ((rtx));
static void flush_pending_lists PARAMS ((struct deps *, rtx, int));
static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
@ -107,7 +111,7 @@ deps_may_trap_p (mem)
/* Return the INSN_LIST containing INSN in LIST, or NULL
if LIST does not contain INSN. */
HAIFA_INLINE rtx
rtx
find_insn_list (insn, list)
rtx insn;
rtx list;
@ -120,25 +124,6 @@ find_insn_list (insn, list)
}
return 0;
}
/* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
otherwise. */
HAIFA_INLINE int
find_insn_mem_list (insn, x, list, list1)
rtx insn, x;
rtx list, list1;
{
while (list)
{
if (XEXP (list, 0) == insn
&& XEXP (list1, 0) == x)
return 1;
list = XEXP (list, 1);
list1 = XEXP (list1, 1);
}
return 0;
}
/* Find the condition under which INSN is executed. */
@ -370,6 +355,34 @@ add_dependence (insn, elem, dep_type)
#endif
}
/* A convenience wrapper to operate on an entire list. */
static void
add_dependence_list (insn, list, dep_type)
rtx insn, list;
enum reg_note dep_type;
{
for (; list; list = XEXP (list, 1))
add_dependence (insn, XEXP (list, 0), dep_type);
}
/* Similar, but free *LISTP at the same time. */
static void
add_dependence_list_and_free (insn, listp, dep_type)
rtx insn;
rtx *listp;
enum reg_note dep_type;
{
rtx list, next;
for (list = *listp, *listp = NULL; list ; list = next)
{
next = XEXP (list, 1);
add_dependence (insn, XEXP (list, 0), dep_type);
free_INSN_LIST_node (list);
}
}
/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
of INSN. Abort if not found. */
@ -505,51 +518,29 @@ add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
}
/* Make a dependency between every memory reference on the pending lists
and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
the read list. */
and INSN, thus flushing the pending lists. FOR_READ is true if emitting
dependencies for a read operation, similarly with FOR_WRITE. */
static void
flush_pending_lists (deps, insn, only_write)
flush_pending_lists (deps, insn, for_read, for_write)
struct deps *deps;
rtx insn;
int only_write;
int for_read, for_write;
{
rtx u;
rtx link;
while (deps->pending_read_insns && ! only_write)
if (for_write)
{
add_dependence (insn, XEXP (deps->pending_read_insns, 0),
REG_DEP_ANTI);
link = deps->pending_read_insns;
deps->pending_read_insns = XEXP (deps->pending_read_insns, 1);
free_INSN_LIST_node (link);
link = deps->pending_read_mems;
deps->pending_read_mems = XEXP (deps->pending_read_mems, 1);
free_EXPR_LIST_node (link);
add_dependence_list_and_free (insn, &deps->pending_read_insns,
REG_DEP_ANTI);
free_EXPR_LIST_list (&deps->pending_read_mems);
}
while (deps->pending_write_insns)
{
add_dependence (insn, XEXP (deps->pending_write_insns, 0),
REG_DEP_ANTI);
link = deps->pending_write_insns;
deps->pending_write_insns = XEXP (deps->pending_write_insns, 1);
free_INSN_LIST_node (link);
link = deps->pending_write_mems;
deps->pending_write_mems = XEXP (deps->pending_write_mems, 1);
free_EXPR_LIST_node (link);
}
add_dependence_list_and_free (insn, &deps->pending_write_insns,
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
free_EXPR_LIST_list (&deps->pending_write_mems);
deps->pending_lists_length = 0;
/* last_pending_memory_flush is now a list of insns. */
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
free_INSN_LIST_list (&deps->last_pending_memory_flush);
add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
deps->pending_flush_length = 1;
}
@ -601,46 +592,22 @@ sched_analyze_1 (deps, x, insn)
if (GET_CODE (dest) == REG)
{
int i;
regno = REGNO (dest);
/* A hard reg in a wide mode may really be multiple registers.
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
while (--i >= 0)
int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
if (code == SET)
{
int r = regno + i;
rtx u;
for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
/* Clobbers need not be ordered with respect to one
another, but sets must be ordered with respect to a
pending clobber. */
if (code == SET)
{
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&deps->reg_last[r].uses);
for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, r);
}
else
SET_REGNO_REG_SET (reg_pending_clobbers, r);
/* Function calls clobber all call_used regs. */
if (global_regs[r]
|| (code == SET
&& TEST_HARD_REG_BIT (regs_invalidated_by_call, r)))
for (u = deps->last_function_call; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_sets, regno + i);
}
else
{
while (--i >= 0)
SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
}
}
/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
@ -654,22 +621,8 @@ sched_analyze_1 (deps, x, insn)
}
else
{
rtx u;
for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
if (code == SET)
{
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&deps->reg_last[regno].uses);
for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, regno);
}
SET_REGNO_REG_SET (reg_pending_sets, regno);
else
SET_REGNO_REG_SET (reg_pending_clobbers, regno);
@ -683,10 +636,8 @@ sched_analyze_1 (deps, x, insn)
/* Don't let it cross a call after scheduling if it doesn't
already cross one. */
if (REG_N_CALLS_CROSSED (regno) == 0)
for (u = deps->last_function_call; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
}
}
else if (GET_CODE (dest) == MEM)
@ -708,11 +659,10 @@ sched_analyze_1 (deps, x, insn)
these lists get long. When compiling GCC with itself,
this flush occurs 8 times for sparc, and 10 times for m88k using
the default value of 32. */
flush_pending_lists (deps, insn, 0);
flush_pending_lists (deps, insn, false, true);
}
else
{
rtx u;
rtx pending, pending_mem;
pending = deps->pending_read_insns;
@ -737,8 +687,8 @@ sched_analyze_1 (deps, x, insn)
pending_mem = XEXP (pending_mem, 1);
}
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
add_dependence_list (insn, deps->last_pending_memory_flush,
REG_DEP_ANTI);
add_insn_mem_dependence (deps, &deps->pending_write_insns,
&deps->pending_write_mems, insn, dest);
@ -790,32 +740,12 @@ sched_analyze_2 (deps, x, insn)
case REG:
{
rtx u;
int regno = REGNO (x);
if (regno < FIRST_PSEUDO_REGISTER)
{
int i;
i = HARD_REGNO_NREGS (regno, GET_MODE (x));
int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
while (--i >= 0)
{
int r = regno + i;
deps->reg_last[r].uses
= alloc_INSN_LIST (insn, deps->reg_last[r].uses);
SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */
for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
if (call_used_regs[r] || global_regs[r])
/* Function calls clobber all call_used regs. */
for (u = deps->last_function_call; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
SET_REGNO_REG_SET (reg_pending_uses, regno + i);
}
/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
it does not reload. Ignore these as they have served their
@ -828,16 +758,7 @@ sched_analyze_2 (deps, x, insn)
}
else
{
deps->reg_last[regno].uses
= alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */
for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
SET_REGNO_REG_SET (reg_pending_uses, regno);
/* Pseudos that are REG_EQUIV to something may be replaced
by that during reloading. We need only add dependencies for
@ -851,8 +772,8 @@ sched_analyze_2 (deps, x, insn)
insn to the sched_before_next_call list so that it will still
not cross calls after scheduling. */
if (REG_N_CALLS_CROSSED (regno) == 0)
add_dependence (deps->sched_before_next_call, insn,
REG_DEP_ANTI);
deps->sched_before_next_call
= alloc_INSN_LIST (insn, deps->sched_before_next_call);
}
return;
}
@ -910,15 +831,13 @@ sched_analyze_2 (deps, x, insn)
/* Force pending stores to memory in case a trap handler needs them. */
case TRAP_IF:
flush_pending_lists (deps, insn, 1);
flush_pending_lists (deps, insn, true, false);
break;
case ASM_OPERANDS:
case ASM_INPUT:
case UNSPEC_VOLATILE:
{
rtx u;
/* Traditional and volatile asm instructions must be considered to use
and clobber all hard registers, all pseudo-registers and all of
memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
@ -927,25 +846,7 @@ sched_analyze_2 (deps, x, insn)
mode. An insn should not be moved across this even if it only uses
pseudo-regs because it might give an incorrectly rounded result. */
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
{
for (i = 0; i < deps->max_reg; i++)
{
struct deps_reg *reg_last = &deps->reg_last[i];
for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&reg_last->uses);
}
reg_pending_sets_all = 1;
flush_pending_lists (deps, insn, 0);
}
reg_pending_barrier = true;
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
We can not just fall through here since then we would be confused
@ -1008,7 +909,6 @@ sched_analyze_insn (deps, x, insn, loop_notes)
rtx loop_notes;
{
RTX_CODE code = GET_CODE (x);
int schedule_barrier_found = 0;
rtx link;
int i;
@ -1057,7 +957,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
sched_analyze_2 (deps, XEXP (link, 0), insn);
}
if (find_reg_note (insn, REG_SETJMP, NULL))
schedule_barrier_found = 1;
reg_pending_barrier = true;
}
if (GET_CODE (insn) == JUMP_INSN)
@ -1065,23 +965,15 @@ sched_analyze_insn (deps, x, insn, loop_notes)
rtx next;
next = next_nonnote_insn (insn);
if (next && GET_CODE (next) == BARRIER)
schedule_barrier_found = 1;
reg_pending_barrier = true;
else
{
rtx pending, pending_mem, u;
rtx pending, pending_mem;
regset_head tmp;
INIT_REG_SET (&tmp);
(*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
{
struct deps_reg *reg_last = &deps->reg_last[i];
for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
});
IOR_REG_SET (reg_pending_uses, &tmp);
CLEAR_REG_SET (&tmp);
/* All memory writes and volatile reads must happen before the
@ -1107,8 +999,8 @@ sched_analyze_insn (deps, x, insn, loop_notes)
pending_mem = XEXP (pending_mem, 1);
}
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
add_dependence_list (insn, deps->last_pending_memory_flush,
REG_DEP_ANTI);
}
}
@ -1130,7 +1022,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
schedule_barrier_found = 1;
reg_pending_barrier = true;
link = XEXP (link, 1);
}
@ -1142,72 +1034,95 @@ sched_analyze_insn (deps, x, insn, loop_notes)
where block boundaries fall. This is mighty confusing elsewhere.
Therefore, prevent such an instruction from being moved. */
if (can_throw_internal (insn))
schedule_barrier_found = 1;
reg_pending_barrier = true;
/* Add dependencies if a scheduling barrier was found. */
if (schedule_barrier_found)
if (reg_pending_barrier)
{
rtx u;
for (i = 0; i < deps->max_reg; i++)
if (GET_CODE (PATTERN (insn)) == COND_EXEC)
{
struct deps_reg *reg_last = &deps->reg_last[i];
for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
free_INSN_LIST_list (&reg_last->uses);
}
flush_pending_lists (deps, insn, 0);
reg_pending_sets_all = 1;
}
/* Accumulate clobbers until the next set so that it will be output
dependent on all of them. At the next set we can clear the clobber
list, since subsequent sets will be output dependent on it. */
if (reg_pending_sets_all)
{
reg_pending_sets_all = 0;
for (i = 0; i < deps->max_reg; i++)
{
struct deps_reg *reg_last = &deps->reg_last[i];
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
{
free_INSN_LIST_list (&reg_last->sets);
free_INSN_LIST_list (&reg_last->clobbers);
}
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
add_dependence_list (insn, reg_last->sets, 0);
add_dependence_list (insn, reg_last->clobbers, 0);
});
}
else
{
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (insn, &reg_last->uses,
REG_DEP_ANTI);
add_dependence_list_and_free (insn, &reg_last->sets, 0);
add_dependence_list_and_free (insn, &reg_last->clobbers, 0);
});
}
for (i = 0; i < deps->max_reg; i++)
{
struct deps_reg *reg_last = &deps->reg_last[i];
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}
flush_pending_lists (deps, insn, true, true);
reg_pending_barrier = false;
}
else
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
{
struct deps_reg *reg_last = &deps->reg_last[i];
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
{
free_INSN_LIST_list (&reg_last->sets);
free_INSN_LIST_list (&reg_last->clobbers);
}
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
add_dependence_list (insn, reg_last->sets, 0);
add_dependence_list (insn, reg_last->clobbers, 0);
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
});
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
});
/* If the current insn is conditional, we can't free any
of the lists. */
if (GET_CODE (PATTERN (insn)) == COND_EXEC)
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
});
}
else
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (insn, &reg_last->sets,
REG_DEP_OUTPUT);
add_dependence_list_and_free (insn, &reg_last->clobbers,
REG_DEP_OUTPUT);
add_dependence_list_and_free (insn, &reg_last->uses,
REG_DEP_ANTI);
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
});
}
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
}
CLEAR_REG_SET (reg_pending_sets);
CLEAR_REG_SET (reg_pending_uses);
CLEAR_REG_SET (reg_pending_clobbers);
CLEAR_REG_SET (reg_pending_sets);
/* If a post-call group is still open, see if it should remain so.
This insn must be a simple move of a hard reg to a pseudo or
@ -1251,7 +1166,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
else
{
end_call_group:
deps->in_post_call_group_p = 0;
deps->in_post_call_group_p = false;
}
}
}
@ -1265,7 +1180,6 @@ sched_analyze (deps, head, tail)
rtx head, tail;
{
rtx insn;
rtx u;
rtx loop_notes = 0;
if (current_sched_info->use_cselib)
@ -1287,7 +1201,7 @@ sched_analyze (deps, head, tail)
{
/* Keep the list a reasonable size. */
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
flush_pending_lists (deps, insn, 0);
flush_pending_lists (deps, insn, true, true);
else
deps->last_pending_memory_flush
= alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
@ -1297,7 +1211,6 @@ sched_analyze (deps, head, tail)
}
else if (GET_CODE (insn) == CALL_INSN)
{
rtx x;
int i;
/* Clear out stale SCHED_GROUP_P. */
@ -1308,59 +1221,35 @@ sched_analyze (deps, head, tail)
/* Clear out the stale LOG_LINKS from flow. */
free_INSN_LIST_list (&LOG_LINKS (insn));
/* Any instruction using a hard register which may get clobbered
by a call needs to be marked as dependent on this call.
This prevents a use of a hard return reg from being moved
past a void call (i.e. it does not explicitly set the hard
return reg). */
/* If this call has REG_SETJMP, then assume that
all registers, not just hard registers, may be clobbered by this
call. */
/* Insn, being a CALL_INSN, magically depends on
`last_function_call' already. */
if (find_reg_note (insn, REG_SETJMP, NULL))
{
for (i = 0; i < deps->max_reg; i++)
{
struct deps_reg *reg_last = &deps->reg_last[i];
for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
free_INSN_LIST_list (&reg_last->uses);
}
reg_pending_sets_all = 1;
/* This is setjmp. Assume that all registers, not just
hard registers, may be clobbered by this call. */
reg_pending_barrier = true;
}
else
{
/* A call may read and modify global register variables.
Other call-clobbered hard regs may be clobbered. We
don't know what set of fixed registers might be used
by the function. It is certain that the stack pointer
is among them, but be conservative. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (call_used_regs[i] || global_regs[i])
if (global_regs[i])
{
for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
SET_REGNO_REG_SET (reg_pending_clobbers, i);
SET_REGNO_REG_SET (reg_pending_sets, i);
SET_REGNO_REG_SET (reg_pending_uses, i);
}
else if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
SET_REGNO_REG_SET (reg_pending_clobbers, i);
else if (fixed_regs[i])
SET_REGNO_REG_SET (reg_pending_uses, i);
}
/* For each insn which shouldn't cross a call, add a dependence
between that insn and this call insn. */
x = LOG_LINKS (deps->sched_before_next_call);
while (x)
{
add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
x = XEXP (x, 1);
}
free_INSN_LIST_list (&LOG_LINKS (deps->sched_before_next_call));
add_dependence_list_and_free (insn, &deps->sched_before_next_call,
REG_DEP_ANTI);
sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
loop_notes = 0;
@ -1369,19 +1258,16 @@ sched_analyze (deps, head, tail)
all pending reads and writes, and start new dependencies starting
from here. But only flush writes for constant calls (which may
be passed a pointer to something we haven't written yet). */
flush_pending_lists (deps, insn, CONST_OR_PURE_CALL_P (insn));
flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
/* Depend this function call (actually, the user of this
function call) on all hard register clobberage. */
/* last_function_call is now a list of insns. */
/* Remember the last function call for limiting lifetimes. */
free_INSN_LIST_list (&deps->last_function_call);
deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
/* Before reload, begin a post-call group, so as to keep the
lifetimes of hard registers correct. */
if (! reload_completed)
deps->in_post_call_group_p = 1;
deps->in_post_call_group_p = true;
}
/* See comments on reemit_notes as to why we do this.
@ -1513,12 +1399,8 @@ init_deps (deps)
deps->pending_flush_length = 0;
deps->last_pending_memory_flush = 0;
deps->last_function_call = 0;
deps->in_post_call_group_p = 0;
deps->sched_before_next_call
= gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
NULL_RTX, 0, NULL_RTX, NULL_RTX);
LOG_LINKS (deps->sched_before_next_call) = 0;
deps->sched_before_next_call = 0;
deps->in_post_call_group_p = false;
}
/* Free insn lists found in DEPS. */
@ -1529,6 +1411,12 @@ free_deps (deps)
{
int i;
free_INSN_LIST_list (&deps->pending_read_insns);
free_EXPR_LIST_list (&deps->pending_read_mems);
free_INSN_LIST_list (&deps->pending_write_insns);
free_EXPR_LIST_list (&deps->pending_write_mems);
free_INSN_LIST_list (&deps->last_pending_memory_flush);
/* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
times. For a test case with 42000 regs and 8000 small basic blocks,
this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
@ -1542,7 +1430,6 @@ free_deps (deps)
CLEAR_REG_SET (&deps->reg_last_in_use);
free (deps->reg_last);
deps->reg_last = NULL;
}
/* If it is profitable to use them, initialize caches for tracking
@ -1602,7 +1489,8 @@ init_deps_global ()
{
reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
reg_pending_sets_all = 0;
reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
reg_pending_barrier = false;
}
/* Free everything used by the dependency analysis code. */
@ -1612,4 +1500,5 @@ finish_deps_global ()
{
FREE_REG_SET (reg_pending_sets);
FREE_REG_SET (reg_pending_clobbers);
FREE_REG_SET (reg_pending_uses);
}

View File

@ -68,19 +68,20 @@ struct deps
too large. */
rtx last_pending_memory_flush;
/* The last function call we have seen. All hard regs, and, of course,
the last function call, must depend on this. */
/* A list of the last function calls we have seen. We use a list to
represent last function calls from multiple predecessor blocks.
Used to prevent register lifetimes from expanding unnecessarily. */
rtx last_function_call;
/* A list of insns which use a pseudo register that does not already
cross a call. We create dependencies between each of those insn
and the next call insn, to ensure that they won't cross a call after
scheduling is done. */
rtx sched_before_next_call;
/* Used to keep post-call psuedo/hard reg movements together with
the call. */
int in_post_call_group_p;
/* The LOG_LINKS field of this is a list of insns which use a pseudo
register that does not already cross a call. We create
dependencies between each of those insn and the next call insn,
to ensure that they won't cross a call after scheduling is done. */
rtx sched_before_next_call;
bool in_post_call_group_p;
/* The maximum register number for the following arrays. Before reload
this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
@ -274,7 +275,6 @@ extern void free_deps PARAMS ((struct deps *));
extern void init_deps_global PARAMS ((void));
extern void finish_deps_global PARAMS ((void));
extern void compute_forward_dependences PARAMS ((rtx, rtx));
extern int find_insn_mem_list PARAMS ((rtx, rtx, rtx, rtx));
extern rtx find_insn_list PARAMS ((rtx, rtx));
extern void init_dependency_caches PARAMS ((int));
extern void free_dependency_caches PARAMS ((void));

View File

@ -300,6 +300,8 @@ void debug_dependencies PARAMS ((void));
static void init_regions PARAMS ((void));
static void schedule_region PARAMS ((int));
static rtx concat_INSN_LIST PARAMS ((rtx, rtx));
static void concat_insn_mem_list PARAMS ((rtx, rtx, rtx *, rtx *));
static void propagate_deps PARAMS ((int, struct deps *));
static void free_pending_lists PARAMS ((void));
@ -2299,8 +2301,7 @@ add_branch_dependences (head, tail)
{
if (GET_CODE (insn) != NOTE)
{
if (last != 0
&& !find_insn_list (insn, LOG_LINKS (last)))
if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
{
add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn)++;
@ -2356,125 +2357,122 @@ add_branch_dependences (head, tail)
static struct deps *bb_deps;
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
static rtx
concat_INSN_LIST (copy, old)
rtx copy, old;
{
rtx new = old;
for (; copy ; copy = XEXP (copy, 1))
new = alloc_INSN_LIST (XEXP (copy, 0), new);
return new;
}
static void
concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p)
rtx copy_insns, copy_mems;
rtx *old_insns_p, *old_mems_p;
{
rtx new_insns = *old_insns_p;
rtx new_mems = *old_mems_p;
while (copy_insns)
{
new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
copy_insns = XEXP (copy_insns, 1);
copy_mems = XEXP (copy_mems, 1);
}
*old_insns_p = new_insns;
*old_mems_p = new_mems;
}
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
propagate_deps (bb, tmp_deps)
propagate_deps (bb, pred_deps)
int bb;
struct deps *tmp_deps;
struct deps *pred_deps;
{
int b = BB_TO_BLOCK (bb);
int e, first_edge;
int reg;
rtx link_insn, link_mem;
rtx u;
/* These lists should point to the right place, for correct
freeing later. */
bb_deps[bb].pending_read_insns = tmp_deps->pending_read_insns;
bb_deps[bb].pending_read_mems = tmp_deps->pending_read_mems;
bb_deps[bb].pending_write_insns = tmp_deps->pending_write_insns;
bb_deps[bb].pending_write_mems = tmp_deps->pending_write_mems;
/* bb's structures are inherited by its successors. */
first_edge = e = OUT_EDGES (b);
if (e <= 0)
return;
if (e > 0)
do
{
int b_succ = TO_BLOCK (e);
int bb_succ = BLOCK_TO_BB (b_succ);
struct deps *succ_deps = bb_deps + bb_succ;
int reg;
do
{
rtx x;
int b_succ = TO_BLOCK (e);
int bb_succ = BLOCK_TO_BB (b_succ);
struct deps *succ_deps = bb_deps + bb_succ;
/* Only bbs "below" bb, in the same region, are interesting. */
if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
|| bb_succ <= bb)
{
e = NEXT_OUT (e);
continue;
}
/* Only bbs "below" bb, in the same region, are interesting. */
if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
|| bb_succ <= bb)
{
e = NEXT_OUT (e);
continue;
}
/* The reg_last lists are inherited by bb_succ. */
EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg,
{
struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
/* The reg_last lists are inherited by bb_succ. */
EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg,
{
struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg];
struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg];
succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
succ_rl->clobbers);
});
IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses))
succ_deps_reg->uses
= alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses);
/* Mem read/write lists are inherited by bb_succ. */
concat_insn_mem_list (pred_deps->pending_read_insns,
pred_deps->pending_read_mems,
&succ_deps->pending_read_insns,
&succ_deps->pending_read_mems);
concat_insn_mem_list (pred_deps->pending_write_insns,
pred_deps->pending_write_mems,
&succ_deps->pending_write_insns,
&succ_deps->pending_write_mems);
for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets))
succ_deps_reg->sets
= alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets);
succ_deps->last_pending_memory_flush
= concat_INSN_LIST (pred_deps->last_pending_memory_flush,
succ_deps->last_pending_memory_flush);
succ_deps->pending_lists_length += pred_deps->pending_lists_length;
succ_deps->pending_flush_length += pred_deps->pending_flush_length;
for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers))
succ_deps_reg->clobbers
= alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers);
});
IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use);
/* last_function_call is inherited by bb_succ. */
succ_deps->last_function_call
= concat_INSN_LIST (pred_deps->last_function_call,
succ_deps->last_function_call);
/* Mem read/write lists are inherited by bb_succ. */
link_insn = tmp_deps->pending_read_insns;
link_mem = tmp_deps->pending_read_mems;
while (link_insn)
{
if (!(find_insn_mem_list (XEXP (link_insn, 0),
XEXP (link_mem, 0),
succ_deps->pending_read_insns,
succ_deps->pending_read_mems)))
add_insn_mem_dependence (succ_deps, &succ_deps->pending_read_insns,
&succ_deps->pending_read_mems,
XEXP (link_insn, 0), XEXP (link_mem, 0));
link_insn = XEXP (link_insn, 1);
link_mem = XEXP (link_mem, 1);
}
/* sched_before_next_call is inherited by bb_succ. */
succ_deps->sched_before_next_call
= concat_INSN_LIST (pred_deps->sched_before_next_call,
succ_deps->sched_before_next_call);
link_insn = tmp_deps->pending_write_insns;
link_mem = tmp_deps->pending_write_mems;
while (link_insn)
{
if (!(find_insn_mem_list (XEXP (link_insn, 0),
XEXP (link_mem, 0),
succ_deps->pending_write_insns,
succ_deps->pending_write_mems)))
add_insn_mem_dependence (succ_deps,
&succ_deps->pending_write_insns,
&succ_deps->pending_write_mems,
XEXP (link_insn, 0), XEXP (link_mem, 0));
e = NEXT_OUT (e);
}
while (e != first_edge);
link_insn = XEXP (link_insn, 1);
link_mem = XEXP (link_mem, 1);
}
/* These lists should point to the right place, for correct
freeing later. */
bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
/* last_function_call is inherited by bb_succ. */
for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call))
succ_deps->last_function_call
= alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call);
/* last_pending_memory_flush is inherited by bb_succ. */
for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
if (! find_insn_list (XEXP (u, 0),
succ_deps->last_pending_memory_flush))
succ_deps->last_pending_memory_flush
= alloc_INSN_LIST (XEXP (u, 0),
succ_deps->last_pending_memory_flush);
/* sched_before_next_call is inherited by bb_succ. */
x = LOG_LINKS (tmp_deps->sched_before_next_call);
for (; x; x = XEXP (x, 1))
add_dependence (succ_deps->sched_before_next_call,
XEXP (x, 0), REG_DEP_ANTI);
e = NEXT_OUT (e);
}
while (e != first_edge);
/* Can't allow these to be freed twice. */
pred_deps->pending_read_insns = 0;
pred_deps->pending_read_mems = 0;
pred_deps->pending_write_insns = 0;
pred_deps->pending_write_mems = 0;
}
/* Compute backward dependences inside bb. In a multiple blocks region: