make some HAVE_cc0 code always compiled

gcc/ChangeLog:

2015-04-21  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>

	* cfgrtl.c (rtl_merge_blocks): Change #if HAVE_cc0 to if (HAVE_cc0)
	(try_redirect_by_replacing_jump): Likewise.
	(rtl_tidy_fallthru_edge): Likewise.
	* combine.c (insn_a_feeds_b): Likewise.
	(find_split_point): Likewise.
	(simplify_set): Likewise.
	* cprop.c (cprop_jump): Likewise.
	* cse.c (cse_extended_basic_block): Likewise.
	* df-problems.c (can_move_insns_across): Likewise.
	* function.c (emit_use_return_register_into_block): Likewise.
	* haifa-sched.c (sched_init): Likewise.
	* ira.c (find_moveable_pseudos): Likewise.
	* loop-invariant.c (find_invariant_insn): Likewise.
	* lra-constraints.c (curr_insn_transform): Likewise.
	* postreload.c (reload_combine_recognize_const_pattern):
	* Likewise.
	* reload.c (find_reloads): Likewise.
	* reorg.c (delete_scheduled_jump): Likewise.
	(steal_delay_list_from_target): Likewise.
	(steal_delay_list_from_fallthrough): Likewise.
	(redundant_insn): Likewise.
	(fill_simple_delay_slots): Likewise.
	(fill_slots_from_thread): Likewise.
	(delete_computation): Likewise.
	* sched-rgn.c (add_branch_dependences): Likewise.

From-SVN: r222297
This commit is contained in:
Trevor Saunders 2015-04-22 00:44:37 +00:00 committed by Trevor Saunders
parent f1e52ed6b2
commit 058eb3b07a
15 changed files with 57 additions and 78 deletions

View File

@ -1,3 +1,31 @@
2015-04-21 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
* cfgrtl.c (rtl_merge_blocks): Change #if HAVE_cc0 to if (HAVE_cc0)
(try_redirect_by_replacing_jump): Likewise.
(rtl_tidy_fallthru_edge): Likewise.
* combine.c (insn_a_feeds_b): Likewise.
(find_split_point): Likewise.
(simplify_set): Likewise.
* cprop.c (cprop_jump): Likewise.
* cse.c (cse_extended_basic_block): Likewise.
* df-problems.c (can_move_insns_across): Likewise.
* function.c (emit_use_return_register_into_block): Likewise.
* haifa-sched.c (sched_init): Likewise.
* ira.c (find_moveable_pseudos): Likewise.
* loop-invariant.c (find_invariant_insn): Likewise.
* lra-constraints.c (curr_insn_transform): Likewise.
* postreload.c (reload_combine_recognize_const_pattern):
* Likewise.
* reload.c (find_reloads): Likewise.
* reorg.c (delete_scheduled_jump): Likewise.
(steal_delay_list_from_target): Likewise.
(steal_delay_list_from_fallthrough): Likewise.
(redundant_insn): Likewise.
(fill_simple_delay_slots): Likewise.
(fill_slots_from_thread): Likewise.
(delete_computation): Likewise.
* sched-rgn.c (add_branch_dependences): Likewise.
2015-04-21 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
* genconfig.c (main): Always define HAVE_cc0.

View File

@ -893,10 +893,9 @@ rtl_merge_blocks (basic_block a, basic_block b)
del_first = a_end;
#if HAVE_cc0
/* If this was a conditional jump, we need to also delete
the insn that set cc0. */
if (only_sets_cc0_p (prev))
if (HAVE_cc0 && only_sets_cc0_p (prev))
{
rtx_insn *tmp = prev;
@ -905,7 +904,6 @@ rtl_merge_blocks (basic_block a, basic_block b)
prev = BB_HEAD (a);
del_first = tmp;
}
#endif
a_end = PREV_INSN (del_first);
}
@ -1064,11 +1062,9 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
/* In case we zap a conditional jump, we'll need to kill
the cc0 setter too. */
kill_from = insn;
#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, PATTERN (insn))
if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
&& only_sets_cc0_p (PREV_INSN (insn)))
kill_from = PREV_INSN (insn);
#endif
/* See if we can create the fallthru edge. */
if (in_cfglayout || can_fallthru (src, target))
@ -1825,12 +1821,10 @@ rtl_tidy_fallthru_edge (edge e)
delete_insn (table);
}
#if HAVE_cc0
/* If this was a conditional jump, we need to also delete
the insn that set cc0. */
if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
q = PREV_INSN (q);
#endif
q = PREV_INSN (q);
}

View File

@ -1141,10 +1141,8 @@ insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
FOR_EACH_LOG_LINK (links, b)
if (links->insn == a)
return true;
#if HAVE_cc0
if (sets_cc0_p (a))
if (HAVE_cc0 && sets_cc0_p (a))
return true;
#endif
return false;
}
@ -4816,7 +4814,6 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
break;
case SET:
#if HAVE_cc0
/* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
ZERO_EXTRACT, the most likely reason why this doesn't match is that
we need to put the operand into a register. So split at that
@ -4829,7 +4826,6 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
&& ! (GET_CODE (SET_SRC (x)) == SUBREG
&& OBJECT_P (SUBREG_REG (SET_SRC (x)))))
return &SET_SRC (x);
#endif
/* See if we can split SET_SRC as it stands. */
split = find_split_point (&SET_SRC (x), insn, true);
@ -6582,13 +6578,12 @@ simplify_set (rtx x)
else
compare_mode = SELECT_CC_MODE (new_code, op0, op1);
#if !HAVE_cc0
/* If the mode changed, we have to change SET_DEST, the mode in the
compare, and the mode in the place SET_DEST is used. If SET_DEST is
a hard register, just build new versions with the proper mode. If it
is a pseudo, we lose unless it is only time we set the pseudo, in
which case we can safely change its mode. */
if (compare_mode != GET_MODE (dest))
if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
{
if (can_change_dest_mode (dest, 0, compare_mode))
{
@ -6610,7 +6605,6 @@ simplify_set (rtx x)
dest = new_dest;
}
}
#endif /* cc0 */
#endif /* SELECT_CC_MODE */
/* If the code changed, we have to build a new comparison in

View File

@ -965,11 +965,9 @@ cprop_jump (basic_block bb, rtx_insn *setcc, rtx_insn *jump, rtx from, rtx src)
remove_note (jump, note);
}
#if HAVE_cc0
/* Delete the cc0 setter. */
if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
if (HAVE_cc0 && setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
delete_insn (setcc);
#endif
global_const_prop_count++;
if (dump_file != NULL)

View File

@ -6524,8 +6524,7 @@ cse_extended_basic_block (struct cse_basic_block_data *ebb_data)
&& check_for_label_ref (insn))
recorded_label_ref = true;
#if HAVE_cc0
if (NONDEBUG_INSN_P (insn))
if (HAVE_cc0 && NONDEBUG_INSN_P (insn))
{
/* If the previous insn sets CC0 and this insn no
longer references CC0, delete the previous insn.
@ -6552,7 +6551,6 @@ cse_extended_basic_block (struct cse_basic_block_data *ebb_data)
prev_insn_cc0_mode = this_insn_cc0_mode;
}
}
#endif
}
}

View File

@ -3820,9 +3820,7 @@ can_move_insns_across (rtx_insn *from, rtx_insn *to,
if (bitmap_intersect_p (merge_set, test_use)
|| bitmap_intersect_p (merge_use, test_set))
break;
#if HAVE_cc0
if (!sets_cc0_p (insn))
#endif
if (!HAVE_cc0 || !sets_cc0_p (insn))
max_to = insn;
}
next = NEXT_INSN (insn);

View File

@ -5661,10 +5661,9 @@ emit_use_return_register_into_block (basic_block bb)
seq = get_insns ();
end_sequence ();
insn = BB_END (bb);
#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, PATTERN (insn)))
if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
insn = prev_cc0_setter (insn);
#endif
emit_insn_before (seq, insn);
}

View File

@ -7184,9 +7184,8 @@ void
sched_init (void)
{
/* Disable speculative loads in their presence if cc0 defined. */
#if HAVE_cc0
if (HAVE_cc0)
flag_schedule_speculative_load = 0;
#endif
if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
targetm.sched.dispatch_do (NULL, DISPATCH_INIT);

View File

@ -4641,15 +4641,14 @@ find_moveable_pseudos (void)
? " (no unique first use)" : "");
continue;
}
#if HAVE_cc0
if (reg_referenced_p (cc0_rtx, PATTERN (closest_use)))
if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (closest_use)))
{
if (dump_file)
fprintf (dump_file, "Reg %d: closest user uses cc0\n",
regno);
continue;
}
#endif
bitmap_set_bit (&interesting, regno);
/* If we get here, we know closest_use is a non-NULL insn
(as opposed to const_0_rtx). */

View File

@ -923,11 +923,9 @@ find_invariant_insn (rtx_insn *insn, bool always_reached, bool always_executed)
bool simple = true;
struct invariant *inv;
#if HAVE_cc0
/* We can't move a CC0 setter without the user. */
if (sets_cc0_p (insn))
if (HAVE_cc0 && sets_cc0_p (insn))
return;
#endif
set = single_set (insn);
if (!set)

View File

@ -3354,12 +3354,10 @@ curr_insn_transform (bool check_only_p)
if (JUMP_P (curr_insn) || CALL_P (curr_insn))
no_output_reloads_p = true;
#if HAVE_cc0
if (reg_referenced_p (cc0_rtx, PATTERN (curr_insn)))
if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (curr_insn)))
no_input_reloads_p = true;
if (reg_set_p (cc0_rtx, PATTERN (curr_insn)))
if (HAVE_cc0 && reg_set_p (cc0_rtx, PATTERN (curr_insn)))
no_output_reloads_p = true;
#endif
n_operands = curr_static_id->n_operands;
n_alternatives = curr_static_id->n_alternatives;

View File

@ -1032,11 +1032,9 @@ reload_combine_recognize_const_pattern (rtx_insn *insn)
&& reg_state[clobbered_regno].real_store_ruid >= use_ruid)
break;
#if HAVE_cc0
/* Do not separate cc0 setter and cc0 user on HAVE_cc0 targets. */
if (must_move_add && sets_cc0_p (PATTERN (use_insn)))
if (HAVE_cc0 && must_move_add && sets_cc0_p (PATTERN (use_insn)))
break;
#endif
gcc_assert (reg_state[regno].store_ruid <= use_ruid);
/* Avoid moving a use of ADDREG past a point where it is stored. */

View File

@ -2706,12 +2706,10 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known,
if (JUMP_P (insn) || CALL_P (insn))
no_output_reloads = 1;
#if HAVE_cc0
if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (insn)))
no_input_reloads = 1;
if (reg_set_p (cc0_rtx, PATTERN (insn)))
if (HAVE_cc0 && reg_set_p (cc0_rtx, PATTERN (insn)))
no_output_reloads = 1;
#endif
#ifdef SECONDARY_MEMORY_NEEDED
/* The eliminated forms of any secondary memory locations are per-insn, so
@ -4579,16 +4577,14 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known,
rld[j].in = 0;
}
#if HAVE_cc0
/* If we made any reloads for addresses, see if they violate a
"no input reloads" requirement for this insn. But loads that we
do after the insn (such as for output addresses) are fine. */
if (no_input_reloads)
if (HAVE_cc0 && no_input_reloads)
for (i = 0; i < n_reloads; i++)
gcc_assert (rld[i].in == 0
|| rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS
|| rld[i].when_needed == RELOAD_FOR_OUTPUT_ADDRESS);
#endif
/* Compute reload_mode and reload_nregs. */
for (i = 0; i < n_reloads; i++)

View File

@ -182,7 +182,6 @@ skip_consecutive_labels (rtx label_or_return)
return label;
}
#if HAVE_cc0
/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
and REG_CC_USER notes so we can find it. */
@ -197,7 +196,6 @@ link_cc0_insns (rtx insn)
add_reg_note (user, REG_CC_SETTER, insn);
add_reg_note (insn, REG_CC_USER, user);
}
#endif
/* Insns which have delay slots that have not yet been filled. */
@ -699,8 +697,7 @@ delete_scheduled_jump (rtx_insn *insn)
be other insns that became dead anyway, which we wouldn't know to
delete. */
#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, insn))
if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, insn))
{
rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
@ -730,7 +727,6 @@ delete_scheduled_jump (rtx_insn *insn)
delete_from_delay_slot (trial);
}
}
#endif
delete_related_insns (insn);
}
@ -1171,11 +1167,9 @@ steal_delay_list_from_target (rtx_insn *insn, rtx condition, rtx_sequence *seq,
if (insn_references_resource_p (trial, sets, false)
|| insn_sets_resource_p (trial, needed, false)
|| insn_sets_resource_p (trial, sets, false)
#if HAVE_cc0
/* If TRIAL sets CC0, we can't copy it, so we can't steal this
delay list. */
|| find_reg_note (trial, REG_CC_USER, NULL_RTX)
#endif
|| (HAVE_cc0 && find_reg_note (trial, REG_CC_USER, NULL_RTX))
/* If TRIAL is from the fallthrough code of an annulled branch insn
in SEQ, we cannot use it. */
|| (INSN_ANNULLED_BRANCH_P (seq->insn (0))
@ -1279,10 +1273,7 @@ steal_delay_list_from_fallthrough (rtx_insn *insn, rtx condition,
if (insn_references_resource_p (trial, sets, false)
|| insn_sets_resource_p (trial, needed, false)
|| insn_sets_resource_p (trial, sets, false)
#if HAVE_cc0
|| sets_cc0_p (PATTERN (trial))
#endif
)
|| (HAVE_cc0 && sets_cc0_p (PATTERN (trial))))
break;
@ -1629,9 +1620,7 @@ redundant_insn (rtx insn, rtx_insn *target, rtx delay_list)
target_main = XVECEXP (PATTERN (target), 0, 0);
if (resource_conflicts_p (&needed, &set)
#if HAVE_cc0
|| reg_mentioned_p (cc0_rtx, ipat)
#endif
|| (HAVE_cc0 && reg_mentioned_p (cc0_rtx, ipat))
/* The insn requiring the delay may not set anything needed or set by
INSN. */
|| insn_sets_resource_p (target_main, &needed, true)
@ -2270,10 +2259,9 @@ fill_simple_delay_slots (int non_jumps_p)
{
next_trial = next_nonnote_insn (trial);
delay_list = add_to_delay_list (trial, delay_list);
#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, pat))
if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, pat))
link_cc0_insns (trial);
#endif
delete_related_insns (trial);
if (slots_to_fill == ++slots_filled)
break;
@ -2605,10 +2593,8 @@ fill_slots_from_thread (rtx_insn *insn, rtx condition, rtx thread_or_return,
must_annul = 1;
winner:
#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, pat))
if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, pat))
link_cc0_insns (trial);
#endif
/* If we own this thread, delete the insn. If this is the
destination of a branch, show that a basic block status
@ -3161,8 +3147,7 @@ delete_computation (rtx insn)
{
rtx note, next;
#if HAVE_cc0
if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (insn)))
{
rtx prev = prev_nonnote_insn (insn);
/* We assume that at this stage
@ -3182,7 +3167,6 @@ delete_computation (rtx insn)
add_reg_note (prev, REG_UNUSED, cc0_rtx);
}
}
#endif
for (note = REG_NOTES (insn); note; note = next)
{

View File

@ -2487,9 +2487,7 @@ add_branch_dependences (rtx_insn *head, rtx_insn *tail)
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| can_throw_internal (insn)
#if HAVE_cc0
|| sets_cc0_p (PATTERN (insn))
#endif
|| (HAVE_cc0 && sets_cc0_p (PATTERN (insn)))
|| (!reload_completed
&& sets_likely_spilled (PATTERN (insn)))))
|| NOTE_P (insn)