Maxim Kuvyrkov <maxim@codesourcery.com>

* gcc/sched-deps.c (sched_analyze_insn): Use MOVE_BARRIER
	instead of TRUE_BARRIER for jumps.  Add register dependencies
	even when reg_pending_barrier is set.


Co-Authored-By: Maxim Kuvyrkov <maxim@codesourcery.com>

From-SVN: r130052
This commit is contained in:
Alexander Monakov 2007-11-09 20:23:42 +03:00 committed by Alexander Monakov
parent cbc6c888f9
commit 0a1766b289
2 changed files with 96 additions and 90 deletions

View File

@ -1,3 +1,10 @@
2007-11-09 Alexander Monakov <amonakov@ispras.ru>
Maxim Kuvyrkov <maxim@codesourcery.com>
* gcc/sched-deps.c (sched_analyze_insn): Use MOVE_BARRIER
instead of TRUE_BARRIER for jumps. Add register dependencies
even when reg_pending_barrier is set.
2007-11-09 Alexander Monakov <amonakov@ispras.ru>
* gcc/haifa-sched.c (haifa_classify_insn): Rename to ...

View File

@ -1920,7 +1920,7 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
rtx next;
next = next_nonnote_insn (insn);
if (next && BARRIER_P (next))
reg_pending_barrier = TRUE_BARRIER;
reg_pending_barrier = MOVE_BARRIER;
else
{
rtx pending, pending_mem;
@ -1984,58 +1984,8 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
|| (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
reg_pending_barrier = MOVE_BARRIER;
/* Add dependencies if a scheduling barrier was found. */
if (reg_pending_barrier)
{
/* In the case of barrier the most added dependencies are not
real, so we use anti-dependence here. */
if (sched_get_condition (insn))
{
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
add_dependence_list
(insn, reg_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
add_dependence_list
(insn, reg_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
}
}
else
{
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (insn, &reg_last->uses, 0,
REG_DEP_ANTI);
add_dependence_list_and_free
(insn, &reg_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
add_dependence_list_and_free
(insn, &reg_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
}
}
for (i = 0; i < (unsigned)deps->max_reg; i++)
{
struct deps_reg *reg_last = &deps->reg_last[i];
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}
flush_pending_lists (deps, insn, true, true);
CLEAR_REG_SET (&deps->reg_conditional_sets);
reg_pending_barrier = NOT_A_BARRIER;
}
else
{
/* If the current insn is conditional, we can't free any
of the lists. */
/* Add register dependencies for insn.
If the current insn is conditional, we can't free any of the lists. */
if (sched_get_condition (insn))
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
@ -2117,11 +2067,60 @@ sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
}
CLEAR_REG_SET (reg_pending_uses);
CLEAR_REG_SET (reg_pending_clobbers);
CLEAR_REG_SET (reg_pending_sets);
/* Add dependencies if a scheduling barrier was found. */
if (reg_pending_barrier)
{
/* In the case of barrier the most added dependencies are not
real, so we use anti-dependence here. */
if (sched_get_condition (insn))
{
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
add_dependence_list
(insn, reg_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
add_dependence_list
(insn, reg_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
}
}
else
{
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (insn, &reg_last->uses, 0,
REG_DEP_ANTI);
add_dependence_list_and_free
(insn, &reg_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
add_dependence_list_and_free
(insn, &reg_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
reg_last->uses_length = 0;
reg_last->clobbers_length = 0;
}
}
for (i = 0; i < (unsigned)deps->max_reg; i++)
{
struct deps_reg *reg_last = &deps->reg_last[i];
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}
flush_pending_lists (deps, insn, true, true);
CLEAR_REG_SET (&deps->reg_conditional_sets);
reg_pending_barrier = NOT_A_BARRIER;
}
/* If we are currently in a libcall scheduling group, then mark the
current insn as being in a scheduling group and that it can not
be moved into a different basic block. */