haifa-sched.c (schedule_insn): Return necessary cycle advance after issuing the insn.

2003-01-28  Vladimir Makarov  <vmakarov@redhat.com>

	* haifa-sched.c (schedule_insn): Return necessary cycle advance
	after issuing the insn.
	(rank_for_schedule): Make a insn with /S the highest priority
	insn.
	(move_insn): Ignore schedule groups.  Clear SCHED_GROUP_P.
	(choose_ready): Check SCHED_GROUP_P.
	(schedule_block): Advance cycle after issuing insn if it is
	necessary.  Don't reorder insns if there is an insn with /S.
	(set_priorities): Ignore schedule groups.

	* sched-deps.c (remove_dependence, group_leader): Remove the
	functions.
	(add_dependence): Ignore schedule groups.
	(set_sched_group_p): Don't make copy of dependencies from previous
	insn of the schedule group.  Add anti-dependency to the previous
	insn of the schedule group.
	(compute_forward_dependences): Ignore schedule groups.

	* sched-ebb.c (init_ready_list): Ignore schedule groups.

	* sched-rgn.c (init_ready_list): Ditto.
	(can_schedule_ready_p): Ditto.

From-SVN: r61983
This commit is contained in:
Vladimir Makarov 2003-01-28 17:12:06 +00:00 committed by Vladimir Makarov
parent 5db544e164
commit 58fb780923
5 changed files with 99 additions and 238 deletions

View File

@ -1,3 +1,28 @@
2003-01-28 Vladimir Makarov <vmakarov@redhat.com>
* haifa-sched.c (schedule_insn): Return necessary cycle advance
after issuing the insn.
(rank_for_schedule): Make a insn with /S the highest priority
insn.
(move_insn): Ignore schedule groups. Clear SCHED_GROUP_P.
(choose_ready): Check SCHED_GROUP_P.
(schedule_block): Advance cycle after issuing insn if it is
necessary. Don't reorder insns if there is an insn with /S.
(set_priorities): Ignore schedule groups.
* sched-deps.c (remove_dependence, group_leader): Remove the
functions.
(add_dependence): Ignore schedule groups.
(set_sched_group_p): Don't make copy of dependencies from previous
insn of the schedule group. Add anti-dependency to the previous
insn of the schedule group.
(compute_forward_dependences): Ignore schedule groups.
* sched-ebb.c (init_ready_list): Ignore schedule groups.
* sched-rgn.c (init_ready_list): Ditto.
(can_schedule_ready_p): Ditto.
2003-01-28 Vladimir Makarov <vmakarov@redhat.com>
* config/i386/i386.md (*movsi_1): Use movdqa to move one xmm

View File

@ -319,7 +319,7 @@ static int priority PARAMS ((rtx));
static int rank_for_schedule PARAMS ((const PTR, const PTR));
static void swap_sort PARAMS ((rtx *, int));
static void queue_insn PARAMS ((rtx, int));
static void schedule_insn PARAMS ((rtx, struct ready_list *, int));
static int schedule_insn PARAMS ((rtx, struct ready_list *, int));
static int find_set_reg_weight PARAMS ((rtx));
static void find_insn_reg_weight PARAMS ((int));
static void adjust_priority PARAMS ((rtx));
@ -852,6 +852,10 @@ rank_for_schedule (x, y)
int tmp_class, tmp2_class, depend_count1, depend_count2;
int val, priority_val, weight_val, info_val;
/* The insn in a schedule group should be issued the first. */
if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
return SCHED_GROUP_P (tmp2) ? 1 : -1;
/* Prefer insn with higher priority. */
priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
@ -1105,16 +1109,18 @@ static int last_clock_var;
/* INSN is the "currently executing insn". Launch each insn which was
waiting on INSN. READY is the ready list which contains the insns
that are ready to fire. CLOCK is the current cycle.
*/
that are ready to fire. CLOCK is the current cycle. The function
returns necessary cycle advance after issuing the insn (it is not
zero for insns in a schedule group). */
static void
static int
schedule_insn (insn, ready, clock)
rtx insn;
struct ready_list *ready;
int clock;
{
rtx link;
int advance = 0;
int unit = 0;
if (!targetm.sched.use_dfa_pipeline_interface
@ -1156,7 +1162,7 @@ schedule_insn (insn, ready, clock)
schedule_unit (unit, insn, clock);
if (INSN_DEPEND (insn) == 0)
return;
return 0;
}
for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
@ -1181,7 +1187,8 @@ schedule_insn (insn, ready, clock)
if (effective_cost < 1)
fprintf (sched_dump, "into ready\n");
else
fprintf (sched_dump, "into queue with cost=%d\n", effective_cost);
fprintf (sched_dump, "into queue with cost=%d\n",
effective_cost);
}
/* Adjust the priority of NEXT and either put it on the ready
@ -1190,7 +1197,12 @@ schedule_insn (insn, ready, clock)
if (effective_cost < 1)
ready_add (ready, next);
else
queue_insn (next, effective_cost);
{
queue_insn (next, effective_cost);
if (SCHED_GROUP_P (next) && advance < effective_cost)
advance = effective_cost;
}
}
}
@ -1207,6 +1219,7 @@ schedule_insn (insn, ready, clock)
PUT_MODE (insn, clock > last_clock_var ? TImode : VOIDmode);
last_clock_var = clock;
}
return advance;
}
/* Functions for handling of notes. */
@ -1757,8 +1770,7 @@ reemit_notes (insn, last)
return retval;
}
/* Move INSN, and all insns which should be issued before it,
due to SCHED_GROUP_P flag. Reemit notes if needed.
/* Move INSN. Reemit notes if needed.
Return the last insn emitted by the scheduler, which is the
return value from the first call to reemit_notes. */
@ -1769,26 +1781,6 @@ move_insn (insn, last)
{
rtx retval = NULL;
/* If INSN has SCHED_GROUP_P set, then issue it and any other
insns with SCHED_GROUP_P set first. */
while (SCHED_GROUP_P (insn))
{
rtx prev = PREV_INSN (insn);
/* Move a SCHED_GROUP_P insn. */
move_insn1 (insn, last);
/* If this is the first call to reemit_notes, then record
its return value. */
if (retval == NULL_RTX)
retval = reemit_notes (insn, insn);
else
reemit_notes (insn, insn);
/* Consume SCHED_GROUP_P flag. */
SCHED_GROUP_P (insn) = 0;
insn = prev;
}
/* Now move the first non SCHED_GROUP_P insn. */
move_insn1 (insn, last);
/* If this is the first call to reemit_notes, then record
@ -1798,6 +1790,8 @@ move_insn (insn, last)
else
reemit_notes (insn, insn);
SCHED_GROUP_P (insn) = 0;
return retval;
}
@ -1911,7 +1905,8 @@ choose_ready (ready)
struct ready_list *ready;
{
if (!targetm.sched.first_cycle_multipass_dfa_lookahead
|| (*targetm.sched.first_cycle_multipass_dfa_lookahead) () <= 0)
|| (*targetm.sched.first_cycle_multipass_dfa_lookahead) () <= 0
|| SCHED_GROUP_P (ready_element (ready, 0)))
return ready_remove_first (ready);
else
{
@ -1961,7 +1956,7 @@ schedule_block (b, rgn_n_insns)
int i, first_cycle_insn_p;
int can_issue_more;
state_t temp_state = NULL; /* It is used for multipass scheduling. */
int sort_p;
int sort_p, advance, start_clock_var;
/* Head/tail info for this block. */
rtx prev_head = current_sched_info->prev_head;
@ -2045,29 +2040,37 @@ schedule_block (b, rgn_n_insns)
/* Start just before the beginning of time. */
clock_var = -1;
advance = 0;
sort_p = TRUE;
/* Loop until all the insns in BB are scheduled. */
while ((*current_sched_info->schedule_more_p) ())
{
clock_var++;
advance_one_cycle ();
/* Add to the ready list all pending insns that can be issued now.
If there are no ready insns, increment clock until one
is ready and add all pending insns at that point to the ready
list. */
queue_to_ready (&ready);
if (ready.n_ready == 0)
abort ();
if (sched_verbose >= 2)
do
{
fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
debug_ready_list (&ready);
start_clock_var = clock_var;
clock_var++;
advance_one_cycle ();
/* Add to the ready list all pending insns that can be issued now.
If there are no ready insns, increment clock until one
is ready and add all pending insns at that point to the ready
list. */
queue_to_ready (&ready);
if (ready.n_ready == 0)
abort ();
if (sched_verbose >= 2)
{
fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
debug_ready_list (&ready);
}
advance -= clock_var - start_clock_var;
}
while (advance > 0);
if (sort_p)
{
@ -2083,7 +2086,9 @@ schedule_block (b, rgn_n_insns)
/* Allow the target to reorder the list, typically for
better instruction bundling. */
if (targetm.sched.reorder)
if (targetm.sched.reorder
&& (ready.n_ready == 0
|| !SCHED_GROUP_P (ready_element (&ready, 0))))
can_issue_more =
(*targetm.sched.reorder) (sched_dump, sched_verbose,
ready_lastpos (&ready),
@ -2256,7 +2261,9 @@ schedule_block (b, rgn_n_insns)
&& GET_CODE (PATTERN (insn)) != CLOBBER)
can_issue_more--;
schedule_insn (insn, &ready, clock_var);
advance = schedule_insn (insn, &ready, clock_var);
if (advance != 0)
break;
next:
first_cycle_insn_p = 0;
@ -2267,7 +2274,9 @@ schedule_block (b, rgn_n_insns)
if (ready.n_ready > 0)
ready_sort (&ready);
if (targetm.sched.reorder2)
if (targetm.sched.reorder2
&& (ready.n_ready == 0
|| !SCHED_GROUP_P (ready_element (&ready, 0))))
{
can_issue_more =
(*targetm.sched.reorder2) (sched_dump, sched_verbose,
@ -2393,8 +2402,7 @@ set_priorities (head, tail)
if (GET_CODE (insn) == NOTE)
continue;
if (! SCHED_GROUP_P (insn))
n_insn++;
n_insn++;
(void) priority (insn);
}

View File

@ -83,14 +83,12 @@ static sbitmap *forward_dependency_cache;
static int deps_may_trap_p PARAMS ((rtx));
static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
static void remove_dependence PARAMS ((rtx, rtx));
static void set_sched_group_p PARAMS ((rtx));
static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
static rtx group_leader PARAMS ((rtx));
static rtx get_condition PARAMS ((rtx));
static int conditions_mutex_p PARAMS ((rtx, rtx));
@ -184,7 +182,7 @@ add_dependence (insn, elem, dep_type)
rtx elem;
enum reg_note dep_type;
{
rtx link, next;
rtx link;
int present_p;
rtx cond1, cond2;
@ -218,38 +216,6 @@ add_dependence (insn, elem, dep_type)
return;
}
/* If elem is part of a sequence that must be scheduled together, then
make the dependence point to the last insn of the sequence.
When HAVE_cc0, it is possible for NOTEs to exist between users and
setters of the condition codes, so we must skip past notes here.
Otherwise, NOTEs are impossible here. */
next = next_nonnote_insn (elem);
if (next && INSN_P (next) && SCHED_GROUP_P (next))
{
/* Notes will never intervene here though, so don't bother checking
for them. */
/* Hah! Wrong. */
/* We must reject CODE_LABELs, so that we don't get confused by one
that has LABEL_PRESERVE_P set, which is represented by the same
bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
SCHED_GROUP_P. */
rtx nnext;
while ((nnext = next_nonnote_insn (next)) != NULL
&& INSN_P (nnext)
&& SCHED_GROUP_P (nnext))
next = nnext;
/* Again, don't depend an insn on itself. */
if (insn == next)
return;
/* Make the dependence to NEXT, the last insn of the group,
instead of the original ELEM. */
elem = next;
}
present_p = 1;
#ifdef INSN_SCHEDULING
/* ??? No good way to tell from here whether we're doing interblock
@ -385,76 +351,6 @@ add_dependence_list_and_free (insn, listp, dep_type)
}
}
/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
of INSN. Abort if not found. */
static void
remove_dependence (insn, elem)
rtx insn;
rtx elem;
{
rtx prev, link, next;
int found = 0;
for (prev = 0, link = LOG_LINKS (insn); link; link = next)
{
next = XEXP (link, 1);
if (XEXP (link, 0) == elem)
{
if (prev)
XEXP (prev, 1) = next;
else
LOG_LINKS (insn) = next;
#ifdef INSN_SCHEDULING
/* If we are removing a dependency from the LOG_LINKS list,
make sure to remove it from the cache too. */
if (true_dependency_cache != NULL)
{
if (REG_NOTE_KIND (link) == 0)
RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem));
}
#endif
free_INSN_LIST_node (link);
found = 1;
}
else
prev = link;
}
if (!found)
abort ();
return;
}
/* Return an insn which represents a SCHED_GROUP, which is
the last insn in the group. */
static rtx
group_leader (insn)
rtx insn;
{
rtx prev;
do
{
prev = insn;
insn = next_nonnote_insn (insn);
}
while (insn && INSN_P (insn) && SCHED_GROUP_P (insn));
return prev;
}
/* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
goes along with that. */
@ -462,26 +358,12 @@ static void
set_sched_group_p (insn)
rtx insn;
{
rtx link, prev;
rtx prev;
SCHED_GROUP_P (insn) = 1;
/* There may be a note before this insn now, but all notes will
be removed before we actually try to schedule the insns, so
it won't cause a problem later. We must avoid it here
though. */
prev = prev_nonnote_insn (insn);
/* Make a copy of all dependencies on the immediately previous
insn, and add to this insn. This is so that all the
dependencies will apply to the group. Remove an explicit
dependence on this insn as SCHED_GROUP_P now represents it. */
if (find_insn_list (prev, LOG_LINKS (insn)))
remove_dependence (insn, prev);
for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
add_dependence (insn, prev, REG_DEP_ANTI);
}
/* Process an insn's memory dependencies. There are four kinds of
@ -1446,11 +1328,9 @@ compute_forward_dependences (head, tail)
if (! INSN_P (insn))
continue;
insn = group_leader (insn);
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
{
rtx x = group_leader (XEXP (link, 0));
rtx x = XEXP (link, 0);
rtx new_link;
if (x != XEXP (link, 0))

View File

@ -90,17 +90,9 @@ init_ready_list (ready)
Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
rtx next;
if (! INSN_P (insn))
continue;
next = NEXT_INSN (insn);
if (INSN_DEP_COUNT (insn) == 0
&& (! INSN_P (next) || SCHED_GROUP_P (next) == 0))
if (INSN_DEP_COUNT (insn) == 0)
ready_add (ready, insn);
if (! SCHED_GROUP_P (insn))
target_n_insns++;
target_n_insns++;
}
}

View File

@ -2023,17 +2023,9 @@ init_ready_list (ready)
Count number of insns in the target block being scheduled. */
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
rtx next;
if (! INSN_P (insn))
continue;
next = NEXT_INSN (insn);
if (INSN_DEP_COUNT (insn) == 0
&& (! INSN_P (next) || SCHED_GROUP_P (next) == 0))
if (INSN_DEP_COUNT (insn) == 0)
ready_add (ready, insn);
if (! SCHED_GROUP_P (insn))
target_n_insns++;
target_n_insns++;
}
/* Add to ready list all 'ready' insns in valid source blocks.
@ -2067,19 +2059,8 @@ init_ready_list (ready)
insn, insn) <= 3)))
&& check_live (insn, bb_src)
&& is_exception_free (insn, bb_src, target_bb))))
{
rtx next;
/* Note that we haven't squirreled away the notes for
blocks other than the current. So if this is a
speculative insn, NEXT might otherwise be a note. */
next = next_nonnote_insn (insn);
if (INSN_DEP_COUNT (insn) == 0
&& (! next
|| ! INSN_P (next)
|| SCHED_GROUP_P (next) == 0))
ready_add (ready, insn);
}
if (INSN_DEP_COUNT (insn) == 0)
ready_add (ready, insn);
}
}
}
@ -2097,7 +2078,6 @@ can_schedule_ready_p (insn)
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
{
rtx temp;
basic_block b1;
if (IS_SPECULATIVE_INSN (insn))
@ -2114,18 +2094,9 @@ can_schedule_ready_p (insn)
}
nr_inter++;
/* Find the beginning of the scheduling group. */
/* ??? Ought to update basic block here, but later bits of
schedule_block assumes the original insn block is
still intact. */
temp = insn;
while (SCHED_GROUP_P (temp))
temp = PREV_INSN (temp);
/* Update source block boundaries. */
b1 = BLOCK_FOR_INSN (temp);
if (temp == b1->head && temp == b1->end)
b1 = BLOCK_FOR_INSN (insn);
if (insn == b1->head && insn == b1->end)
{
/* We moved all the insns in the basic block.
Emit a note after the last insn and update the
@ -2139,9 +2110,9 @@ can_schedule_ready_p (insn)
/* We took insns from the end of the basic block,
so update the end of block boundary so that it
points to the first insn we did not move. */
b1->end = PREV_INSN (temp);
b1->end = PREV_INSN (insn);
}
else if (temp == b1->head)
else if (insn == b1->head)
{
/* We took insns from the start of the basic block,
so update the start of block boundary so that
@ -2361,17 +2332,6 @@ add_branch_dependences (head, tail)
CANT_MOVE (insn) = 1;
last = insn;
/* Skip over insns that are part of a group.
Make each insn explicitly depend on the previous insn.
This ensures that only the group header will ever enter
the ready queue (and, when scheduled, will automatically
schedule the SCHED_GROUP_P block). */
while (SCHED_GROUP_P (insn))
{
rtx temp = prev_nonnote_insn (insn);
add_dependence (insn, temp, REG_DEP_ANTI);
insn = temp;
}
}
/* Don't overrun the bounds of the basic block. */
@ -2393,10 +2353,6 @@ add_branch_dependences (head, tail)
add_dependence (last, insn, REG_DEP_ANTI);
INSN_REF_COUNT (insn) = 1;
/* Skip over insns that are part of a group. */
while (SCHED_GROUP_P (insn))
insn = prev_nonnote_insn (insn);
}
}