basic-block.h (single_succ_p, [...]): New inline functions.

* basic-block.h (single_succ_p, single_pred_p, single_succ_edge,
	single_pred_edge, single_succ, single_pred): New inline functions.
	* bb-reorder.c (rotate_loop, find_traces_1_round,
	add_labels_and_missing_jumps, fix_up_fall_thru_edges,
	duplicate_computed_gotos): Use the single_succ/pred functions.
	* cfganal.c (forwarder_block_p): Ditto.
	* cfgbuild.c (compute_outgoing_frequencies): Ditto.
	* cfgcleanup.c (try_simplify_condjump, try_forward_edges,
	outgoing_edges_match, try_crossjump_to_edge, try_optimize_cfg,
	merge_seq_blocks): Ditto.
	* cfghooks.c (split_edge, tidy_fallthru_edges): Ditto.
	* cfglayout.c (fixup_reorder_chain): Ditto.
	* cfgloop.c (mark_single_exit_loops, update_latch_info,
	canonicalize_loop_headers, verify_loop_structure): Ditto.
	* cfgloopmanip.c (remove_path, unloop, loop_delete_branch_edge,
	mfb_update_loops, create_preheader, force_single_succ_latches,
	create_loop_notes): Ditto.
	* cfgrtl.c (rtl_can_merge_blocks, try_redirect_by_replacing_jump,
	force_nonfallthru_and_redirect, rtl_tidy_fallthru_edge,
	commit_one_edge_insertion, purge_dead_edges,
	cfg_layout_can_merge_blocks_p): Ditto.
	* except.c (sjlj_emit_function_enter): Ditto.
	* flow.c (init_propagate_block_info): Ditto.
	* function.c (thread_prologue_and_epilogue_insns): Ditto.
	* gcse.c (find_implicit_sets, bypass_conditional_jumps,
	insert_insn_end_bb): Ditto.
	* ifcvt.c (merge_if_block, find_if_block, find_if_case_1,
	find_if_case_2): Ditto.
	* lambda-code.c (perfect_nestify): Ditto.
	* lcm.c (optimize_mode_switching): Ditto.
	* loop-doloop.c (doloop_modify): Ditto.
	* loop-init.c (loop_optimizer_init): Ditto.
	* loop-iv.c (simplify_using_initial_values): Ditto.
	* loop-unroll.c (unroll_loop_runtime_iterations): Ditto.
	* loop-unswitch.c (unswitch_loop): Ditto.
	* modulo-sched.c (generate_prolog_epilog): Ditto.
	* predict.c (combine_predictions_for_insn, estimate_probability,
	tree_estimate_probability, last_basic_block_p,
	estimate_bb_frequencies): Ditto.
	* profile.c (branch_prob): Ditto.
	* regrename.c (copyprop_hardreg_forward): Ditto.
	* sched-rgn.c (is_cfg_nonregular, find_rgns, update_live): Ditto.
	* tracer.c (layout_superblocks): Ditto.
	* tree-cfg.c (tree_can_merge_blocks_p, tree_merge_blocks,
	cfg_remove_useless_stmts_bb, cleanup_control_flow,
	cleanup_control_expr_graph, disband_implicit_edges,
	tree_find_edge_insert_loc, bsi_commit_edge_inserts,
	tree_verify_flow_info, tree_make_forwarder_block,
	tree_forwarder_block_p, remove_forwarder_block,
	remove_forwarder_block_with_phi, merge_phi_nodes): Ditto.
	* tree-if-conv.c (tree_if_conversion): Ditto.
	* tree-mudflap.c (mf_build_check_statement_for): Ditto.
	* tree-ssa-dce.c (remove_dead_stmt): Ditto.
	* tree-ssa-dom.c (dom_opt_finalize_block): Ditto.
	* tree-ssa-loop-ch.c (should_duplicate_loop_header_p,
	copy_loop_headers): Ditto.
	* tree-ssa-loop-im.c (loop_commit_inserts): Ditto.
	* tree-ssa-loop-ivopts.c (compute_phi_arg_on_exit): Ditto.
	* tree-ssa-loop-manip.c (split_loop_exit_edge, ip_normal_pos,
	lv_adjust_loop_entry_edge, tree_ssa_loop_version): Ditto.
	* tree-ssa-loop-niter.c (simplify_using_initial_conditions): Ditto.
	* tree-ssa-loop-unswitch.c (simplify_using_entry_checks): Ditto.
	* tree-ssa-phiopt.c (tree_ssa_phiopt, value_replacement): Ditto.
	* tree-ssa-pre.c (compute_antic_aux, insert_aux, init_pre): Ditto.
	* tree-ssa-threadupdate.c (redirect_edges): Ditto.
	* tree-tailcall.c (independent_of_stmt_p, find_tail_calls,
	eliminate_tail_call, tree_optimize_tail_calls_1): Ditto.
	* tree-vect-analyze.c (vect_analyze_loop_form): Ditto.
	* tree-vect-transform.c (vect_update_ivs_after_vectorizer): Ditto.
	* tree-vectorizer.c (slpeel_update_phi_nodes_for_guard,
	slpeel_add_loop_guard): Ditto.

From-SVN: r96292
This commit is contained in:
Zdenek Dvorak 2005-03-11 10:05:12 +01:00 committed by Zdenek Dvorak
parent 0b2df4a703
commit c5cbcccf90
47 changed files with 448 additions and 324 deletions

View File

@ -1,3 +1,77 @@
2005-03-11 Zdenek Dvorak <dvorakz@suse.cz>
* basic-block.h (single_succ_p, single_pred_p, single_succ_edge,
single_pred_edge, single_succ, single_pred): New inline functions.
* bb-reorder.c (rotate_loop, find_traces_1_round,
add_labels_and_missing_jumps, fix_up_fall_thru_edges,
duplicate_computed_gotos): Use the single_succ/pred functions.
* cfganal.c (forwarder_block_p): Ditto.
* cfgbuild.c (compute_outgoing_frequencies): Ditto.
* cfgcleanup.c (try_simplify_condjump, try_forward_edges,
outgoing_edges_match, try_crossjump_to_edge, try_optimize_cfg,
merge_seq_blocks): Ditto.
* cfghooks.c (split_edge, tidy_fallthru_edges): Ditto.
* cfglayout.c (fixup_reorder_chain): Ditto.
* cfgloop.c (mark_single_exit_loops, update_latch_info,
canonicalize_loop_headers, verify_loop_structure): Ditto.
* cfgloopmanip.c (remove_path, unloop, loop_delete_branch_edge,
mfb_update_loops, create_preheader, force_single_succ_latches,
create_loop_notes): Ditto.
* cfgrtl.c (rtl_can_merge_blocks, try_redirect_by_replacing_jump,
force_nonfallthru_and_redirect, rtl_tidy_fallthru_edge,
commit_one_edge_insertion, purge_dead_edges,
cfg_layout_can_merge_blocks_p): Ditto.
* except.c (sjlj_emit_function_enter): Ditto.
* flow.c (init_propagate_block_info): Ditto.
* function.c (thread_prologue_and_epilogue_insns): Ditto.
* gcse.c (find_implicit_sets, bypass_conditional_jumps,
insert_insn_end_bb): Ditto.
* ifcvt.c (merge_if_block, find_if_block, find_if_case_1,
find_if_case_2): Ditto.
* lambda-code.c (perfect_nestify): Ditto.
* lcm.c (optimize_mode_switching): Ditto.
* loop-doloop.c (doloop_modify): Ditto.
* loop-init.c (loop_optimizer_init): Ditto.
* loop-iv.c (simplify_using_initial_values): Ditto.
* loop-unroll.c (unroll_loop_runtime_iterations): Ditto.
* loop-unswitch.c (unswitch_loop): Ditto.
* modulo-sched.c (generate_prolog_epilog): Ditto.
* predict.c (combine_predictions_for_insn, estimate_probability,
tree_estimate_probability, last_basic_block_p,
estimate_bb_frequencies): Ditto.
* profile.c (branch_prob): Ditto.
* regrename.c (copyprop_hardreg_forward): Ditto.
* sched-rgn.c (is_cfg_nonregular, find_rgns, update_live): Ditto.
* tracer.c (layout_superblocks): Ditto.
* tree-cfg.c (tree_can_merge_blocks_p, tree_merge_blocks,
cfg_remove_useless_stmts_bb, cleanup_control_flow,
cleanup_control_expr_graph, disband_implicit_edges,
tree_find_edge_insert_loc, bsi_commit_edge_inserts,
tree_verify_flow_info, tree_make_forwarder_block,
tree_forwarder_block_p, remove_forwarder_block,
remove_forwarder_block_with_phi, merge_phi_nodes): Ditto.
* tree-if-conv.c (tree_if_conversion): Ditto.
* tree-mudflap.c (mf_build_check_statement_for): Ditto.
* tree-ssa-dce.c (remove_dead_stmt): Ditto.
* tree-ssa-dom.c (dom_opt_finalize_block): Ditto.
* tree-ssa-loop-ch.c (should_duplicate_loop_header_p,
copy_loop_headers): Ditto.
* tree-ssa-loop-im.c (loop_commit_inserts): Ditto.
* tree-ssa-loop-ivopts.c (compute_phi_arg_on_exit): Ditto.
* tree-ssa-loop-manip.c (split_loop_exit_edge, ip_normal_pos,
lv_adjust_loop_entry_edge, tree_ssa_loop_version): Ditto.
* tree-ssa-loop-niter.c (simplify_using_initial_conditions): Ditto.
* tree-ssa-loop-unswitch.c (simplify_using_entry_checks): Ditto.
* tree-ssa-phiopt.c (tree_ssa_phiopt, value_replacement): Ditto.
* tree-ssa-pre.c (compute_antic_aux, insert_aux, init_pre): Ditto.
* tree-ssa-threadupdate.c (redirect_edges): Ditto.
* tree-tailcall.c (independent_of_stmt_p, find_tail_calls,
eliminate_tail_call, tree_optimize_tail_calls_1): Ditto.
* tree-vect-analyze.c (vect_analyze_loop_form): Ditto.
* tree-vect-transform.c (vect_update_ivs_after_vectorizer): Ditto.
* tree-vectorizer.c (slpeel_update_phi_nodes_for_guard,
slpeel_add_loop_guard): Ditto.
2005-03-11 James A. Morrison <phython@gcc.gnu.org>
PR tree-optimization/15784

View File

@ -547,6 +547,56 @@ struct edge_list
#define EDGE_PRED(bb,i) VEC_index (edge, (bb)->preds, (i))
#define EDGE_SUCC(bb,i) VEC_index (edge, (bb)->succs, (i))
/* Returns true if BB has precisely one successor. */
static inline bool
single_succ_p (basic_block bb)
{
return EDGE_COUNT (bb->succs) == 1;
}
/* Returns true if BB has precisely one predecessor. */
static inline bool
single_pred_p (basic_block bb)
{
return EDGE_COUNT (bb->preds) == 1;
}
/* Returns the single successor edge of basic block BB. */
static inline edge
single_succ_edge (basic_block bb)
{
gcc_assert (single_succ_p (bb));
return EDGE_SUCC (bb, 0);
}
/* Returns the single predecessor edge of basic block BB. */
static inline edge
single_pred_edge (basic_block bb)
{
gcc_assert (single_pred_p (bb));
return EDGE_PRED (bb, 0);
}
/* Returns the single successor block of basic block BB. */
static inline basic_block
single_succ (basic_block bb)
{
return single_succ_edge (bb)->dest;
}
/* Returns the single predecessor block of basic block BB. */
static inline basic_block
single_pred (basic_block bb)
{
return single_pred_edge (bb)->src;
}
/* Iterator object for edges. */
typedef struct {

View File

@ -385,18 +385,16 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
prev_bb->rbi->next = best_bb->rbi->next;
/* Try to get rid of uncond jump to cond jump. */
if (EDGE_COUNT (prev_bb->succs) == 1)
if (single_succ_p (prev_bb))
{
basic_block header = EDGE_SUCC (prev_bb, 0)->dest;
basic_block header = single_succ (prev_bb);
/* Duplicate HEADER if it is a small block containing cond jump
in the end. */
if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
&& !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
NULL_RTX))
{
copy_bb (header, EDGE_SUCC (prev_bb, 0), prev_bb, trace_n);
}
copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
}
}
}
@ -655,7 +653,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
{
/* The loop has less than 4 iterations. */
if (EDGE_COUNT (bb->succs) == 1
if (single_succ_p (bb)
&& copy_bb_p (best_edge->dest, !optimize_size))
{
bb = copy_bb (best_edge->dest, best_edge, bb,
@ -695,12 +693,13 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
&& !e->dest->rbi->visited
&& EDGE_COUNT (e->dest->preds) == 1
&& single_pred_p (e->dest)
&& !(e->flags & EDGE_CROSSING)
&& EDGE_COUNT (e->dest->succs) == 1
&& (EDGE_SUCC (e->dest, 0)->flags & EDGE_CAN_FALLTHRU)
&& !(EDGE_SUCC (e->dest, 0)->flags & EDGE_COMPLEX)
&& EDGE_SUCC (e->dest, 0)->dest == best_edge->dest
&& single_succ_p (e->dest) == 1
&& (single_succ_edge (e->dest)->flags
& EDGE_CAN_FALLTHRU)
&& !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
&& single_succ (e->dest) == best_edge->dest
&& 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
{
best_edge = e;
@ -1391,7 +1390,7 @@ add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
/* bb just falls through. */
{
/* make sure there's only one successor */
gcc_assert (EDGE_COUNT (src->succs) == 1);
gcc_assert (single_succ_p (src));
/* Find label in dest block. */
label = block_label (dest);
@ -1533,7 +1532,7 @@ fix_up_fall_thru_edges (void)
partition as bb it's falling through from. */
BB_COPY_PARTITION (new_bb, cur_bb);
EDGE_SUCC (new_bb, 0)->flags |= EDGE_CROSSING;
single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
}
/* Add barrier after new jump */
@ -2085,17 +2084,17 @@ duplicate_computed_gotos (void)
/* BB must have one outgoing edge. That edge must not lead to
the exit block or the next block.
The destination must have more than one predecessor. */
if (EDGE_COUNT(bb->succs) != 1
|| EDGE_SUCC(bb,0)->dest == EXIT_BLOCK_PTR
|| EDGE_SUCC(bb,0)->dest == bb->next_bb
|| EDGE_COUNT(EDGE_SUCC(bb,0)->dest->preds) <= 1)
if (!single_succ_p (bb)
|| single_succ (bb) == EXIT_BLOCK_PTR
|| single_succ (bb) == bb->next_bb
|| single_pred_p (single_succ (bb)))
continue;
/* The successor block has to be a duplication candidate. */
if (!bitmap_bit_p (candidates, EDGE_SUCC(bb,0)->dest->index))
if (!bitmap_bit_p (candidates, single_succ (bb)->index))
continue;
new_bb = duplicate_block (EDGE_SUCC(bb,0)->dest, EDGE_SUCC(bb,0));
new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb));
new_bb->rbi->next = bb->rbi->next;
bb->rbi->next = new_bb;
new_bb->rbi->visited = 1;

View File

@ -87,7 +87,7 @@ forwarder_block_p (basic_block bb)
rtx insn;
if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR
|| EDGE_COUNT (bb->succs) != 1)
|| !single_succ_p (bb))
return false;
for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))

View File

@ -708,9 +708,9 @@ compute_outgoing_frequencies (basic_block b)
}
}
if (EDGE_COUNT (b->succs) == 1)
if (single_succ_p (b))
{
e = EDGE_SUCC (b, 0);
e = single_succ_edge (b);
e->probability = REG_BR_PROB_BASE;
e->count = b->count;
return;

View File

@ -139,11 +139,11 @@ try_simplify_condjump (basic_block cbranch_block)
be the last block in the function, and must contain just the
unconditional jump. */
jump_block = cbranch_fallthru_edge->dest;
if (EDGE_COUNT (jump_block->preds) >= 2
if (!single_pred_p (jump_block)
|| jump_block->next_bb == EXIT_BLOCK_PTR
|| !FORWARDER_BLOCK_P (jump_block))
return false;
jump_dest_block = EDGE_SUCC (jump_block, 0)->dest;
jump_dest_block = single_succ (jump_block);
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
@ -483,13 +483,13 @@ try_forward_edges (int mode, basic_block b)
may_thread |= target->flags & BB_DIRTY;
if (FORWARDER_BLOCK_P (target)
&& !(EDGE_SUCC (target, 0)->flags & EDGE_CROSSING)
&& EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR)
&& !(single_succ_edge (target)->flags & EDGE_CROSSING)
&& single_succ (target) != EXIT_BLOCK_PTR)
{
/* Bypass trivial infinite loops. */
if (target == EDGE_SUCC (target, 0)->dest)
new_target = single_succ (target);
if (target == new_target)
counter = n_basic_blocks;
new_target = EDGE_SUCC (target, 0)->dest;
}
/* Allow to thread only over one edge at time to simplify updating
@ -618,7 +618,7 @@ try_forward_edges (int mode, basic_block b)
{
edge t;
if (EDGE_COUNT (first->succs) > 1)
if (!single_succ_p (first))
{
gcc_assert (n < nthreaded_edges);
t = threaded_edges [n++];
@ -642,7 +642,7 @@ try_forward_edges (int mode, basic_block b)
if (n < nthreaded_edges
&& first == threaded_edges [n]->src)
n++;
t = EDGE_SUCC (first, 0);
t = single_succ_edge (first);
}
t->count -= edge_count;
@ -1233,11 +1233,12 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
/* If BB1 has only one successor, we may be looking at either an
unconditional jump, or a fake edge to exit. */
if (EDGE_COUNT (bb1->succs) == 1
&& (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
if (single_succ_p (bb1)
&& (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
&& (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
return (EDGE_COUNT (bb2->succs) == 1
&& (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
return (single_succ_p (bb2)
&& (single_succ_edge (bb2)->flags
& (EDGE_COMPLEX | EDGE_FAKE)) == 0
&& (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
/* Match conditional jumps - this may get tricky when fallthru and branch
@ -1264,10 +1265,10 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
/* Get around possible forwarders on fallthru edges. Other cases
should be optimized out already. */
if (FORWARDER_BLOCK_P (f1->dest))
f1 = EDGE_SUCC (f1->dest, 0);
f1 = single_succ_edge (f1->dest);
if (FORWARDER_BLOCK_P (f2->dest))
f2 = EDGE_SUCC (f2->dest, 0);
f2 = single_succ_edge (f2->dest);
/* To simplify use of this function, return false if there are
unneeded forwarder blocks. These will get eliminated later
@ -1463,9 +1464,9 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
if (fallthru1)
{
basic_block d1 = (forwarder_block_p (fallthru1->dest)
? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest);
? single_succ (fallthru1->dest): fallthru1->dest);
basic_block d2 = (forwarder_block_p (fallthru2->dest)
? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest);
? single_succ (fallthru2->dest): fallthru2->dest);
if (d1 != d2)
return false;
@ -1520,13 +1521,13 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
about multiple entry or chained forwarders, as they will be optimized
away. We do this to look past the unconditional jump following a
conditional jump that is required due to the current CFG shape. */
if (EDGE_COUNT (src1->preds) == 1
if (single_pred_p (src1)
&& FORWARDER_BLOCK_P (src1))
e1 = EDGE_PRED (src1, 0), src1 = e1->src;
e1 = single_pred_edge (src1), src1 = e1->src;
if (EDGE_COUNT (src2->preds) == 1
if (single_pred_p (src2)
&& FORWARDER_BLOCK_P (src2))
e2 = EDGE_PRED (src2, 0), src2 = e2->src;
e2 = single_pred_edge (src2), src2 = e2->src;
/* Nothing to do if we reach ENTRY, or a common source block. */
if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
@ -1536,11 +1537,11 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
/* Seeing more than 1 forwarder blocks would confuse us later... */
if (FORWARDER_BLOCK_P (e1->dest)
&& FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest))
&& FORWARDER_BLOCK_P (single_succ (e1->dest)))
return false;
if (FORWARDER_BLOCK_P (e2->dest)
&& FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest))
&& FORWARDER_BLOCK_P (single_succ (e2->dest)))
return false;
/* Likewise with dead code (possibly newly created by the other optimizations
@ -1623,13 +1624,13 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
basic_block d = s->dest;
if (FORWARDER_BLOCK_P (d))
d = EDGE_SUCC (d, 0)->dest;
d = single_succ (d);
FOR_EACH_EDGE (s2, ei, src1->succs)
{
basic_block d2 = s2->dest;
if (FORWARDER_BLOCK_P (d2))
d2 = EDGE_SUCC (d2, 0)->dest;
d2 = single_succ (d2);
if (d == d2)
break;
}
@ -1641,16 +1642,16 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
into infinite loop. */
if (FORWARDER_BLOCK_P (s->dest))
{
EDGE_SUCC (s->dest, 0)->count += s2->count;
single_succ_edge (s->dest)->count += s2->count;
s->dest->count += s2->count;
s->dest->frequency += EDGE_FREQUENCY (s);
}
if (FORWARDER_BLOCK_P (s2->dest))
{
EDGE_SUCC (s2->dest, 0)->count -= s2->count;
if (EDGE_SUCC (s2->dest, 0)->count < 0)
EDGE_SUCC (s2->dest, 0)->count = 0;
single_succ_edge (s2->dest)->count -= s2->count;
if (single_succ_edge (s2->dest)->count < 0)
single_succ_edge (s2->dest)->count = 0;
s2->dest->count -= s2->count;
s2->dest->frequency -= EDGE_FREQUENCY (s);
if (s2->dest->frequency < 0)
@ -1680,9 +1681,9 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
newpos1 = NEXT_INSN (newpos1);
redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
to_remove = EDGE_SUCC (redirect_from, 0)->dest;
to_remove = single_succ (redirect_from);
redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to);
redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
delete_basic_block (to_remove);
update_forwarder_flag (redirect_from);
@ -1884,9 +1885,9 @@ try_optimize_cfg (int mode)
}
/* Remove code labels no longer used. */
if (EDGE_COUNT (b->preds) == 1
&& (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
&& !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX)
if (single_pred_p (b)
&& (single_pred_edge (b)->flags & EDGE_FALLTHRU)
&& !(single_pred_edge (b)->flags & EDGE_COMPLEX)
&& LABEL_P (BB_HEAD (b))
/* If the previous block ends with a branch to this
block, we can't delete the label. Normally this
@ -1894,10 +1895,10 @@ try_optimize_cfg (int mode)
if CASE_DROPS_THRU, this can be a tablejump with
some element going to the same place as the
default (fallthru). */
&& (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR
|| !JUMP_P (BB_END (EDGE_PRED (b, 0)->src))
&& (single_pred (b) == ENTRY_BLOCK_PTR
|| !JUMP_P (BB_END (single_pred (b)))
|| ! label_is_jump_target_p (BB_HEAD (b),
BB_END (EDGE_PRED (b, 0)->src))))
BB_END (single_pred (b)))))
{
rtx label = BB_HEAD (b);
@ -1918,13 +1919,13 @@ try_optimize_cfg (int mode)
/* If we fall through an empty block, we can remove it. */
if (!(mode & CLEANUP_CFGLAYOUT)
&& EDGE_COUNT (b->preds) == 1
&& (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
&& single_pred_p (b)
&& (single_pred_edge (b)->flags & EDGE_FALLTHRU)
&& !LABEL_P (BB_HEAD (b))
&& FORWARDER_BLOCK_P (b)
/* Note that forwarder_block_p true ensures that
there is a successor for this block. */
&& (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU)
&& (single_succ_edge (b)->flags & EDGE_FALLTHRU)
&& n_basic_blocks > 1)
{
if (dump_file)
@ -1933,17 +1934,18 @@ try_optimize_cfg (int mode)
b->index);
c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest);
redirect_edge_succ_nodup (single_pred_edge (b),
single_succ (b));
delete_basic_block (b);
changed = true;
b = c;
}
if (EDGE_COUNT (b->succs) == 1
&& (s = EDGE_SUCC (b, 0))
if (single_succ_p (b)
&& (s = single_succ_edge (b))
&& !(s->flags & EDGE_COMPLEX)
&& (c = s->dest) != EXIT_BLOCK_PTR
&& EDGE_COUNT (c->preds) == 1
&& single_pred_p (c)
&& b != c)
{
/* When not in cfg_layout mode use code aware of reordering
@ -1985,11 +1987,12 @@ try_optimize_cfg (int mode)
non-trivial jump instruction without side-effects, we
can either delete the jump entirely, or replace it
with a simple unconditional jump. */
if (EDGE_COUNT (b->succs) == 1
&& EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR
if (single_succ_p (b)
&& single_succ (b) != EXIT_BLOCK_PTR
&& onlyjump_p (BB_END (b))
&& !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
&& try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest,
&& try_redirect_by_replacing_jump (single_succ_edge (b),
single_succ (b),
(mode & CLEANUP_CFGLAYOUT) != 0))
{
update_forwarder_flag (b);
@ -2074,11 +2077,11 @@ merge_seq_blocks (void)
for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
{
if (EDGE_COUNT (bb->succs) == 1
&& can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest))
if (single_succ_p (bb)
&& can_merge_blocks_p (bb, single_succ (bb)))
{
/* Merge the blocks and retry. */
merge_blocks (bb, EDGE_SUCC (bb, 0)->dest);
merge_blocks (bb, single_succ (bb));
changed = true;
continue;
}

View File

@ -407,18 +407,18 @@ split_edge (edge e)
ret = cfg_hooks->split_edge (e);
ret->count = count;
ret->frequency = freq;
EDGE_SUCC (ret, 0)->probability = REG_BR_PROB_BASE;
EDGE_SUCC (ret, 0)->count = count;
single_succ_edge (ret)->probability = REG_BR_PROB_BASE;
single_succ_edge (ret)->count = count;
if (irr)
{
ret->flags |= BB_IRREDUCIBLE_LOOP;
EDGE_PRED (ret, 0)->flags |= EDGE_IRREDUCIBLE_LOOP;
EDGE_SUCC (ret, 0)->flags |= EDGE_IRREDUCIBLE_LOOP;
single_pred_edge (ret)->flags |= EDGE_IRREDUCIBLE_LOOP;
single_succ_edge (ret)->flags |= EDGE_IRREDUCIBLE_LOOP;
}
if (dom_computed[CDI_DOMINATORS])
set_immediate_dominator (CDI_DOMINATORS, ret, EDGE_PRED (ret, 0)->src);
set_immediate_dominator (CDI_DOMINATORS, ret, single_pred (ret));
if (dom_computed[CDI_DOMINATORS] >= DOM_NO_FAST_QUERY)
{
@ -431,22 +431,22 @@ split_edge (edge e)
ret, provided that all other predecessors of e->dest are
dominated by e->dest. */
if (get_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (ret, 0)->dest)
== EDGE_PRED (ret, 0)->src)
if (get_immediate_dominator (CDI_DOMINATORS, single_succ (ret))
== single_pred (ret))
{
edge_iterator ei;
FOR_EACH_EDGE (f, ei, EDGE_SUCC (ret, 0)->dest->preds)
FOR_EACH_EDGE (f, ei, single_succ (ret)->preds)
{
if (f == EDGE_SUCC (ret, 0))
if (f == single_succ_edge (ret))
continue;
if (!dominated_by_p (CDI_DOMINATORS, f->src,
EDGE_SUCC (ret, 0)->dest))
single_succ (ret)))
break;
}
if (!f)
set_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (ret, 0)->dest, ret);
set_immediate_dominator (CDI_DOMINATORS, single_succ (ret), ret);
}
};
@ -657,9 +657,9 @@ tidy_fallthru_edges (void)
merge the flags for the duplicate edges. So we do not want to
check that the edge is not a FALLTHRU edge. */
if (EDGE_COUNT (b->succs) == 1)
if (single_succ_p (b))
{
s = EDGE_SUCC (b, 0);
s = single_succ_edge (b);
if (! (s->flags & EDGE_COMPLEX)
&& s->dest == c
&& !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))

View File

@ -776,11 +776,11 @@ fixup_reorder_chain (void)
/* Make sure new bb is tagged for correct section (same as
fall-thru source, since you cannot fall-throu across
section boundaries). */
BB_COPY_PARTITION (e_fall->src, EDGE_PRED (bb, 0)->src);
BB_COPY_PARTITION (e_fall->src, single_pred (bb));
if (flag_reorder_blocks_and_partition
&& targetm.have_named_sections)
{
if (BB_PARTITION (EDGE_PRED (bb, 0)->src) == BB_COLD_PARTITION)
if (BB_PARTITION (single_pred (bb)) == BB_COLD_PARTITION)
{
rtx new_note;
rtx note = BB_HEAD (e_fall->src);
@ -796,7 +796,7 @@ fixup_reorder_chain (void)
}
if (JUMP_P (BB_END (bb))
&& !any_condjump_p (BB_END (bb))
&& (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING))
&& (single_succ_edge (bb)->flags & EDGE_CROSSING))
REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
(REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
}

View File

@ -305,7 +305,7 @@ mark_single_exit_loops (struct loops *loops)
/* If we have already seen an exit, mark this by the edge that
surely does not occur as any exit. */
if (loop->single_exit)
loop->single_exit = EDGE_SUCC (ENTRY_BLOCK_PTR, 0);
loop->single_exit = single_succ_edge (ENTRY_BLOCK_PTR);
else
loop->single_exit = e;
}
@ -318,7 +318,7 @@ mark_single_exit_loops (struct loops *loops)
if (!loop)
continue;
if (loop->single_exit == EDGE_SUCC (ENTRY_BLOCK_PTR, 0))
if (loop->single_exit == single_succ_edge (ENTRY_BLOCK_PTR))
loop->single_exit = NULL;
}
@ -430,9 +430,9 @@ update_latch_info (basic_block jump)
{
alloc_aux_for_block (jump, sizeof (int));
HEADER_BLOCK (jump) = 0;
alloc_aux_for_edge (EDGE_PRED (jump, 0), sizeof (int));
LATCH_EDGE (EDGE_PRED (jump, 0)) = 0;
set_immediate_dominator (CDI_DOMINATORS, jump, EDGE_PRED (jump, 0)->src);
alloc_aux_for_edge (single_pred_edge (jump), sizeof (int));
LATCH_EDGE (single_pred_edge (jump)) = 0;
set_immediate_dominator (CDI_DOMINATORS, jump, single_pred (jump));
}
/* A callback for make_forwarder block, to redirect all edges except for
@ -494,16 +494,16 @@ canonicalize_loop_headers (void)
HEADER_BLOCK (header) = num_latches;
}
if (HEADER_BLOCK (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest))
if (HEADER_BLOCK (single_succ (ENTRY_BLOCK_PTR)))
{
basic_block bb;
/* We could not redirect edges freely here. On the other hand,
we can simply split the edge from entry block. */
bb = split_edge (EDGE_SUCC (ENTRY_BLOCK_PTR, 0));
bb = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
alloc_aux_for_edge (EDGE_SUCC (bb, 0), sizeof (int));
LATCH_EDGE (EDGE_SUCC (bb, 0)) = 0;
alloc_aux_for_edge (single_succ_edge (bb), sizeof (int));
LATCH_EDGE (single_succ_edge (bb)) = 0;
alloc_aux_for_block (bb, sizeof (int));
HEADER_BLOCK (bb) = 0;
}
@ -1124,12 +1124,12 @@ verify_loop_structure (struct loops *loops)
}
if (loops->state & LOOPS_HAVE_SIMPLE_LATCHES)
{
if (EDGE_COUNT (loop->latch->succs) != 1)
if (!single_succ_p (loop->latch))
{
error ("Loop %d's latch does not have exactly 1 successor.", i);
err = 1;
}
if (EDGE_SUCC (loop->latch, 0)->dest != loop->header)
if (single_succ (loop->latch) != loop->header)
{
error ("Loop %d's latch does not have header as successor.", i);
err = 1;

View File

@ -331,8 +331,8 @@ remove_path (struct loops *loops, edge e)
e, but we only have basic block dominators. This is easy to
fix -- when e->dest has exactly one predecessor, this corresponds
to blocks dominated by e->dest, if not, split the edge. */
if (EDGE_COUNT (e->dest->preds) > 1)
e = EDGE_PRED (loop_split_edge_with (e, NULL_RTX), 0);
if (!single_pred_p (e->dest))
e = single_pred_edge (loop_split_edge_with (e, NULL_RTX));
/* It may happen that by removing path we remove one or more loops
we belong to. In this case first unloop the loops, then proceed
@ -623,7 +623,7 @@ unloop (struct loops *loops, struct loop *loop)
loops->parray[loop->num] = NULL;
flow_loop_free (loop);
remove_edge (EDGE_SUCC (latch, 0));
remove_edge (single_succ_edge (latch));
fix_bb_placements (loops, latch);
/* If the loop was inside an irreducible region, we would have to somehow
@ -802,8 +802,8 @@ loop_delete_branch_edge (edge e, int really_delete)
if (!redirect_edge_and_branch (e, newdest))
return false;
EDGE_SUCC (src, 0)->flags &= ~EDGE_IRREDUCIBLE_LOOP;
EDGE_SUCC (src, 0)->flags |= irr;
single_succ_edge (src)->flags &= ~EDGE_IRREDUCIBLE_LOOP;
single_succ_edge (src)->flags |= irr;
return true;
}
@ -1121,10 +1121,10 @@ mfb_keep_just (edge e)
static void
mfb_update_loops (basic_block jump)
{
struct loop *loop = EDGE_SUCC (jump, 0)->dest->loop_father;
struct loop *loop = single_succ (jump)->loop_father;
if (dom_computed[CDI_DOMINATORS])
set_immediate_dominator (CDI_DOMINATORS, jump, EDGE_PRED (jump, 0)->src);
set_immediate_dominator (CDI_DOMINATORS, jump, single_pred (jump));
add_bb_to_loop (jump, loop);
loop->latch = jump;
}
@ -1154,7 +1154,7 @@ create_preheader (struct loop *loop, int flags)
continue;
irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
nentry++;
if (EDGE_COUNT (e->src->succs) == 1)
if (single_succ_p (e->src))
one_succ_pred = e;
}
gcc_assert (nentry);
@ -1165,7 +1165,7 @@ create_preheader (struct loop *loop, int flags)
e = EDGE_PRED (loop->header,
EDGE_PRED (loop->header, 0)->src == loop->latch);
if (!(flags & CP_SIMPLE_PREHEADERS) || EDGE_COUNT (e->src->succs) == 1)
if (!(flags & CP_SIMPLE_PREHEADERS) || single_succ_p (e->src))
return NULL;
}
@ -1206,7 +1206,7 @@ create_preheader (struct loop *loop, int flags)
if (irred)
{
dummy->flags |= BB_IRREDUCIBLE_LOOP;
EDGE_SUCC (dummy, 0)->flags |= EDGE_IRREDUCIBLE_LOOP;
single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
}
if (dump_file)
@ -1239,7 +1239,7 @@ force_single_succ_latches (struct loops *loops)
for (i = 1; i < loops->num; i++)
{
loop = loops->parray[i];
if (loop->latch != loop->header && EDGE_COUNT (loop->latch->succs) == 1)
if (loop->latch != loop->header && single_succ_p (loop->latch))
continue;
e = find_edge (loop->latch, loop->header);
@ -1341,9 +1341,9 @@ create_loop_notes (void)
&& onlyjump_p (insn))
{
pbb = BLOCK_FOR_INSN (insn);
gcc_assert (pbb && EDGE_COUNT (pbb->succs) == 1);
gcc_assert (pbb && single_succ_p (pbb));
if (!flow_bb_inside_loop_p (loop, EDGE_SUCC (pbb, 0)->dest))
if (!flow_bb_inside_loop_p (loop, single_succ (pbb)))
insn = BB_HEAD (first[loop->num]);
}
else

View File

@ -623,12 +623,12 @@ rtl_can_merge_blocks (basic_block a,basic_block b)
return false;
/* There must be exactly one edge in between the blocks. */
return (EDGE_COUNT (a->succs) == 1
&& EDGE_SUCC (a, 0)->dest == b
&& EDGE_COUNT (b->preds) == 1
return (single_succ_p (a)
&& single_succ (a) == b
&& single_pred_p (b)
&& a != b
/* Must be simple edge. */
&& !(EDGE_SUCC (a, 0)->flags & EDGE_COMPLEX)
&& !(single_succ_edge (a)->flags & EDGE_COMPLEX)
&& a->next_bb == b
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
@ -817,10 +817,11 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
}
/* Keep only one edge out and set proper flags. */
while (EDGE_COUNT (src->succs) > 1)
if (!single_succ_p (src))
remove_edge (e);
gcc_assert (single_succ_p (src));
e = EDGE_SUCC (src, 0);
e = single_succ_edge (src);
if (fallthru)
e->flags = EDGE_FALLTHRU;
else
@ -1124,7 +1125,7 @@ force_nonfallthru_and_redirect (edge e, basic_block target)
}
if (JUMP_P (BB_END (jump_block))
&& !any_condjump_p (BB_END (jump_block))
&& (EDGE_SUCC (jump_block, 0)->flags & EDGE_CROSSING))
&& (single_succ_edge (jump_block)->flags & EDGE_CROSSING))
REG_NOTES (BB_END (jump_block)) = gen_rtx_EXPR_LIST
(REG_CROSSING_JUMP, NULL_RTX,
REG_NOTES (BB_END (jump_block)));
@ -1226,7 +1227,7 @@ rtl_tidy_fallthru_edge (edge e)
if (JUMP_P (q)
&& onlyjump_p (q)
&& (any_uncondjump_p (q)
|| EDGE_COUNT (b->succs) == 1))
|| single_succ_p (b)))
{
#ifdef HAVE_cc0
/* If this was a conditional jump, we need to also delete
@ -1544,7 +1545,7 @@ commit_one_edge_insertion (edge e, int watch_calls)
/* Special case -- avoid inserting code between call and storing
its return value. */
if (watch_calls && (e->flags & EDGE_FALLTHRU)
&& EDGE_COUNT (e->dest->preds) == 1
&& single_pred_p (e->dest)
&& e->src != ENTRY_BLOCK_PTR
&& CALL_P (BB_END (e->src)))
{
@ -1564,7 +1565,7 @@ commit_one_edge_insertion (edge e, int watch_calls)
{
/* Figure out where to put these things. If the destination has
one predecessor, insert there. Except for the exit block. */
if (EDGE_COUNT (e->dest->preds) == 1 && e->dest != EXIT_BLOCK_PTR)
if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR)
{
bb = e->dest;
@ -1590,7 +1591,7 @@ commit_one_edge_insertion (edge e, int watch_calls)
/* If the source has one successor and the edge is not abnormal,
insert there. Except for the entry block. */
else if ((e->flags & EDGE_ABNORMAL) == 0
&& EDGE_COUNT (e->src->succs) == 1
&& single_succ_p (e->src)
&& e->src != ENTRY_BLOCK_PTR)
{
bb = e->src;
@ -1645,7 +1646,7 @@ commit_one_edge_insertion (edge e, int watch_calls)
NOTE_BASIC_BLOCK (new_note) = bb;
if (JUMP_P (BB_END (bb))
&& !any_condjump_p (BB_END (bb))
&& (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING))
&& (single_succ_edge (bb)->flags & EDGE_CROSSING))
REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
(REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
if (after == bb_note)
@ -1671,9 +1672,9 @@ commit_one_edge_insertion (edge e, int watch_calls)
for the (single) epilogue, which already has a fallthru edge
to EXIT. */
e = EDGE_SUCC (bb, 0);
e = single_succ_edge (bb);
gcc_assert (e->dest == EXIT_BLOCK_PTR
&& EDGE_COUNT (bb->succs) == 1 && (e->flags & EDGE_FALLTHRU));
&& single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
e->flags &= ~EDGE_FALLTHRU;
emit_barrier_after (last);
@ -2404,10 +2405,10 @@ purge_dead_edges (basic_block bb)
return purged;
/* Redistribute probabilities. */
if (EDGE_COUNT (bb->succs) == 1)
if (single_succ_p (bb))
{
EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE;
EDGE_SUCC (bb, 0)->count = bb->count;
single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
single_succ_edge (bb)->count = bb->count;
}
else
{
@ -2431,8 +2432,9 @@ purge_dead_edges (basic_block bb)
from non-local gotos and the like. If there were, we shouldn't
have created the sibcall in the first place. Second, there
should of course never have been a fallthru edge. */
gcc_assert (EDGE_COUNT (bb->succs) == 1);
gcc_assert (EDGE_SUCC (bb, 0)->flags == (EDGE_SIBCALL | EDGE_ABNORMAL));
gcc_assert (single_succ_p (bb));
gcc_assert (single_succ_edge (bb)->flags
== (EDGE_SIBCALL | EDGE_ABNORMAL));
return 0;
}
@ -2465,10 +2467,10 @@ purge_dead_edges (basic_block bb)
ei_next (&ei);
}
gcc_assert (EDGE_COUNT (bb->succs) == 1);
gcc_assert (single_succ_p (bb));
EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE;
EDGE_SUCC (bb, 0)->count = bb->count;
single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
single_succ_edge (bb)->count = bb->count;
if (dump_file)
fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
@ -2722,12 +2724,12 @@ cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
return false;
/* There must be exactly one edge in between the blocks. */
return (EDGE_COUNT (a->succs) == 1
&& EDGE_SUCC (a, 0)->dest == b
&& EDGE_COUNT (b->preds) == 1
return (single_succ_p (a)
&& single_succ (a) == b
&& single_pred_p (b) == 1
&& a != b
/* Must be simple edge. */
&& !(EDGE_SUCC (a, 0)->flags & EDGE_COMPLEX)
&& !(single_succ_edge (a)->flags & EDGE_COMPLEX)
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
we can't kill the edge. */

View File

@ -1637,10 +1637,10 @@ sjlj_emit_function_enter (rtx dispatch_label)
|| NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_BASIC_BLOCK))
break;
if (NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_FUNCTION_BEG)
insert_insn_on_edge (seq, EDGE_SUCC (ENTRY_BLOCK_PTR, 0));
insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
else
{
rtx last = BB_END (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest);
rtx last = BB_END (single_succ (ENTRY_BLOCK_PTR));
for (; ; fn_begin = NEXT_INSN (fn_begin))
if ((NOTE_P (fn_begin)
&& NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_FUNCTION_BEG)

View File

@ -1962,7 +1962,7 @@ init_propagate_block_info (basic_block bb, regset live, regset local_set,
/* Identify the successor blocks. */
bb_true = EDGE_SUCC (bb, 0)->dest;
if (EDGE_COUNT (bb->succs) > 1)
if (!single_succ_p (bb))
{
bb_false = EDGE_SUCC (bb, 1)->dest;
@ -2059,8 +2059,8 @@ init_propagate_block_info (basic_block bb, regset live, regset local_set,
(TREE_TYPE (current_function_decl))))
&& (flags & PROP_SCAN_DEAD_STORES)
&& (EDGE_COUNT (bb->succs) == 0
|| (EDGE_COUNT (bb->succs) == 1
&& EDGE_SUCC (bb, 0)->dest == EXIT_BLOCK_PTR
|| (single_succ_p (bb)
&& single_succ (bb) == EXIT_BLOCK_PTR
&& ! current_function_calls_eh_return)))
{
rtx insn, set;

View File

@ -5130,9 +5130,9 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED)
/* Can't deal with multiple successors of the entry block
at the moment. Function should always have at least one
entry point. */
gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1);
gcc_assert (single_succ_p (ENTRY_BLOCK_PTR));
insert_insn_on_edge (seq, EDGE_SUCC (ENTRY_BLOCK_PTR, 0));
insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
inserted = 1;
}
#endif
@ -5228,7 +5228,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED)
/* If this block has only one successor, it both jumps
and falls through to the fallthru block, so we can't
delete the edge. */
if (EDGE_COUNT (bb->succs) == 1)
if (single_succ_p (bb))
{
ei_next (&ei2);
continue;
@ -5250,7 +5250,7 @@ thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED)
emit_barrier_after (BB_END (last));
emit_return_into_block (last, epilogue_line_note);
epilogue_end = BB_END (last);
EDGE_SUCC (last, 0)->flags &= ~EDGE_FALLTHRU;
single_succ_edge (last)->flags &= ~EDGE_FALLTHRU;
goto epilogue_done;
}
}

View File

@ -3331,7 +3331,7 @@ find_implicit_sets (void)
dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
: FALLTHRU_EDGE (bb)->dest;
if (dest && EDGE_COUNT (dest->preds) == 1
if (dest && single_pred_p (dest)
&& dest != EXIT_BLOCK_PTR)
{
new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
@ -3662,7 +3662,7 @@ bypass_conditional_jumps (void)
EXIT_BLOCK_PTR, next_bb)
{
/* Check for more than one predecessor. */
if (EDGE_COUNT (bb->preds) > 1)
if (!single_pred_p (bb))
{
setcc = NULL_RTX;
for (insn = BB_HEAD (bb);
@ -3976,8 +3976,8 @@ insert_insn_end_bb (struct expr *expr, basic_block bb, int pre)
if (JUMP_P (insn)
|| (NONJUMP_INSN_P (insn)
&& (EDGE_COUNT (bb->succs) > 1
|| EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL)))
&& (!single_succ_p (bb)
|| single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
{
#ifdef HAVE_cc0
rtx note;
@ -4018,7 +4018,8 @@ insert_insn_end_bb (struct expr *expr, basic_block bb, int pre)
/* Likewise if the last insn is a call, as will happen in the presence
of exception handling. */
else if (CALL_P (insn)
&& (EDGE_COUNT (bb->succs) > 1 || EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL))
&& (!single_succ_p (bb)
|| single_succ_edge (bb)->flags & EDGE_ABNORMAL))
{
/* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
we search backward and place the instructions before the first

View File

@ -2278,13 +2278,12 @@ merge_if_block (struct ce_if_block * ce_info)
/* The outgoing edge for the current COMBO block should already
be correct. Verify this. */
if (EDGE_COUNT (combo_bb->succs) > 1
|| EDGE_SUCC (combo_bb, 0)->dest != join_bb)
abort ();
gcc_assert (single_succ_p (combo_bb)
&& single_succ (combo_bb) == join_bb);
/* Remove the jump and cruft from the end of the COMBO block. */
if (join_bb != EXIT_BLOCK_PTR)
tidy_fallthru_edge (EDGE_SUCC (combo_bb, 0));
tidy_fallthru_edge (single_succ_edge (combo_bb));
}
num_updated_if_blocks++;
@ -2456,10 +2455,10 @@ find_if_block (struct ce_if_block * ce_info)
were && tests (which jump to the else block) or || tests (which jump to
the then block). */
if (HAVE_conditional_execution && reload_completed
&& EDGE_COUNT (test_bb->preds) == 1
&& EDGE_PRED (test_bb, 0)->flags == EDGE_FALLTHRU)
&& single_pred_p (test_bb)
&& single_pred_edge (test_bb)->flags == EDGE_FALLTHRU)
{
basic_block bb = EDGE_PRED (test_bb, 0)->src;
basic_block bb = single_pred (test_bb);
basic_block target_bb;
int max_insns = MAX_CONDITIONAL_EXECUTE;
int n_insns;
@ -2492,10 +2491,10 @@ find_if_block (struct ce_if_block * ce_info)
total_insns += n_insns;
blocks++;
if (EDGE_COUNT (bb->preds) != 1)
if (!single_pred_p (bb))
break;
bb = EDGE_PRED (bb, 0)->src;
bb = single_pred (bb);
n_insns = block_jumps_and_fallthru_p (bb, target_bb);
}
while (n_insns >= 0 && (total_insns + n_insns) <= max_insns);
@ -2530,8 +2529,8 @@ find_if_block (struct ce_if_block * ce_info)
/* The THEN block of an IF-THEN combo must have zero or one successors. */
if (EDGE_COUNT (then_bb->succs) > 0
&& (EDGE_COUNT (then_bb->succs) > 1
|| (EDGE_SUCC (then_bb, 0)->flags & EDGE_COMPLEX)
&& (!single_succ_p (then_bb)
|| (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
|| (flow2_completed && tablejump_p (BB_END (then_bb), NULL, NULL))))
return FALSE;
@ -2543,7 +2542,7 @@ find_if_block (struct ce_if_block * ce_info)
code processing. ??? we should fix this in the future. */
if (EDGE_COUNT (then_bb->succs) == 0)
{
if (EDGE_COUNT (else_bb->preds) == 1)
if (single_pred_p (else_bb))
{
rtx last_insn = BB_END (then_bb);
@ -2566,7 +2565,7 @@ find_if_block (struct ce_if_block * ce_info)
/* If the THEN block's successor is the other edge out of the TEST block,
then we have an IF-THEN combo without an ELSE. */
else if (EDGE_SUCC (then_bb, 0)->dest == else_bb)
else if (single_succ (then_bb) == else_bb)
{
join_bb = else_bb;
else_bb = NULL_BLOCK;
@ -2575,12 +2574,12 @@ find_if_block (struct ce_if_block * ce_info)
/* If the THEN and ELSE block meet in a subsequent block, and the ELSE
has exactly one predecessor and one successor, and the outgoing edge
is not complex, then we have an IF-THEN-ELSE combo. */
else if (EDGE_COUNT (else_bb->succs) == 1
&& EDGE_SUCC (then_bb, 0)->dest == EDGE_SUCC (else_bb, 0)->dest
&& EDGE_COUNT (else_bb->preds) == 1
&& ! (EDGE_SUCC (else_bb, 0)->flags & EDGE_COMPLEX)
else if (single_succ_p (else_bb)
&& single_succ (then_bb) == single_succ (else_bb)
&& single_pred_p (else_bb)
&& ! (single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
&& ! (flow2_completed && tablejump_p (BB_END (else_bb), NULL, NULL)))
join_bb = EDGE_SUCC (else_bb, 0)->dest;
join_bb = single_succ (else_bb);
/* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */
else
@ -2875,15 +2874,15 @@ find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
return FALSE;
/* THEN has one successor. */
if (EDGE_COUNT (then_bb->succs) != 1)
if (!single_succ_p (then_bb))
return FALSE;
/* THEN does not fall through, but is not strange either. */
if (EDGE_SUCC (then_bb, 0)->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))
if (single_succ_edge (then_bb)->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))
return FALSE;
/* THEN has one predecessor. */
if (EDGE_COUNT (then_bb->preds) != 1)
if (!single_pred_p (then_bb))
return FALSE;
/* THEN must do something. */
@ -2902,7 +2901,7 @@ find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
/* Registers set are dead, or are predicable. */
if (! dead_or_predicable (test_bb, then_bb, else_bb,
EDGE_SUCC (then_bb, 0)->dest, 1))
single_succ (then_bb), 1))
return FALSE;
/* Conversion went ok, including moving the insns and fixing up the
@ -2980,17 +2979,17 @@ find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge)
return FALSE;
/* ELSE has one successor. */
if (EDGE_COUNT (else_bb->succs) != 1)
if (!single_succ_p (else_bb))
return FALSE;
else
else_succ = EDGE_SUCC (else_bb, 0);
else_succ = single_succ_edge (else_bb);
/* ELSE outgoing edge is not complex. */
if (else_succ->flags & EDGE_COMPLEX)
return FALSE;
/* ELSE has one predecessor. */
if (EDGE_COUNT (else_bb->preds) != 1)
if (!single_pred_p (else_bb))
return FALSE;
/* THEN is not EXIT. */

View File

@ -2320,7 +2320,7 @@ perfect_nestify (struct loops *loops,
VEC_safe_push (tree, phis, PHI_RESULT (phi));
VEC_safe_push (tree, phis, PHI_ARG_DEF (phi, 0));
}
e = redirect_edge_and_branch (EDGE_SUCC (preheaderbb, 0), headerbb);
e = redirect_edge_and_branch (single_succ_edge (preheaderbb), headerbb);
/* Remove the exit phis from the old basic block. Make sure to set
PHI_RESULT to null so it doesn't get released. */
@ -2338,7 +2338,7 @@ perfect_nestify (struct loops *loops,
def = VEC_pop (tree, phis);
phiname = VEC_pop (tree, phis);
phi = create_phi_node (phiname, preheaderbb);
add_phi_arg (phi, def, EDGE_PRED (preheaderbb, 0));
add_phi_arg (phi, def, single_pred_edge (preheaderbb));
}
flush_pending_stmts (e);

View File

@ -1194,7 +1194,7 @@ optimize_mode_switching (FILE *file)
#if defined (MODE_ENTRY) && defined (MODE_EXIT)
/* Split the edge from the entry block, so that we can note that
there NORMAL_MODE is supplied. */
post_entry = split_edge (EDGE_SUCC (ENTRY_BLOCK_PTR, 0));
post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
#endif

View File

@ -359,11 +359,11 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
/* Expand the condition testing the assumptions and if it does not pass,
reset the count register to 0. */
add_test (XEXP (ass, 0), preheader, set_zero);
EDGE_SUCC (preheader, 0)->flags &= ~EDGE_FALLTHRU;
cnt = EDGE_SUCC (preheader, 0)->count;
EDGE_SUCC (preheader, 0)->probability = 0;
EDGE_SUCC (preheader, 0)->count = 0;
irr = EDGE_SUCC (preheader, 0)->flags & EDGE_IRREDUCIBLE_LOOP;
single_succ_edge (preheader)->flags &= ~EDGE_FALLTHRU;
cnt = single_succ_edge (preheader)->count;
single_succ_edge (preheader)->probability = 0;
single_succ_edge (preheader)->count = 0;
irr = single_succ_edge (preheader)->flags & EDGE_IRREDUCIBLE_LOOP;
te = make_edge (preheader, new_preheader, EDGE_FALLTHRU | irr);
te->probability = REG_BR_PROB_BASE;
te->count = cnt;
@ -375,7 +375,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
for (ass = XEXP (ass, 1); ass; ass = XEXP (ass, 1))
{
bb = loop_split_edge_with (te, NULL_RTX);
te = EDGE_SUCC (bb, 0);
te = single_succ_edge (bb);
add_test (XEXP (ass, 0), bb, set_zero);
make_edge (bb, set_zero, irr);
}

View File

@ -49,7 +49,7 @@ loop_optimizer_init (FILE *dumpfile)
block. */
for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); )
if ((e->flags & EDGE_FALLTHRU) && EDGE_COUNT (e->src->succs) > 1)
if ((e->flags & EDGE_FALLTHRU) && !single_succ_p (e->src))
split_edge (e);
else
ei_next (&ei);

View File

@ -1781,8 +1781,6 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
while (1)
{
basic_block tmp_bb;
insn = BB_END (e->src);
if (any_condjump_p (insn))
{
@ -1814,14 +1812,10 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
}
}
/* This is a bit subtle. Store away e->src in tmp_bb, since we
modify `e' and this can invalidate the subsequent count of
e->src's predecessors by looking at the wrong block. */
tmp_bb = e->src;
e = EDGE_PRED (tmp_bb, 0);
if (EDGE_COUNT (tmp_bb->preds) > 1
|| e->src == ENTRY_BLOCK_PTR)
if (!single_pred_p (e->src)
|| single_pred (e->src) == ENTRY_BLOCK_PTR)
break;
e = single_pred_edge (e->src);
}
FREE_REG_SET (altered);

View File

@ -1043,11 +1043,11 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
block_label (preheader), p, NULL_RTX);
swtch = loop_split_edge_with (EDGE_PRED (swtch, 0), branch_code);
swtch = loop_split_edge_with (single_pred_edge (swtch), branch_code);
set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
EDGE_SUCC (swtch, 0)->probability = REG_BR_PROB_BASE - p;
single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p;
e = make_edge (swtch, preheader,
EDGE_SUCC (swtch, 0)->flags & EDGE_IRREDUCIBLE_LOOP);
single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
e->probability = p;
}
@ -1060,11 +1060,11 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ,
block_label (preheader), p, NULL_RTX);
swtch = loop_split_edge_with (EDGE_SUCC (swtch, 0), branch_code);
swtch = loop_split_edge_with (single_succ_edge (swtch), branch_code);
set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
EDGE_SUCC (swtch, 0)->probability = REG_BR_PROB_BASE - p;
single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p;
e = make_edge (swtch, preheader,
EDGE_SUCC (swtch, 0)->flags & EDGE_IRREDUCIBLE_LOOP);
single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
e->probability = p;
}

View File

@ -443,7 +443,7 @@ unswitch_loop (struct loops *loops, struct loop *loop, basic_block unswitch_on,
unswitch_on_alt = unswitch_on->rbi->copy;
true_edge = BRANCH_EDGE (unswitch_on_alt);
false_edge = FALLTHRU_EDGE (unswitch_on);
latch_edge = EDGE_SUCC (loop->latch->rbi->copy, 0);
latch_edge = single_succ_edge (loop->latch->rbi->copy);
/* Create a block with the condition. */
prob = true_edge->probability;
@ -474,7 +474,7 @@ unswitch_loop (struct loops *loops, struct loop *loop, basic_block unswitch_on,
/* Loopify from the copy of LOOP body, constructing the new loop. */
nloop = loopify (loops, latch_edge,
EDGE_PRED (loop->header->rbi->copy, 0), switch_bb,
single_pred_edge (loop->header->rbi->copy), switch_bb,
BRANCH_EDGE (switch_bb), FALLTHRU_EDGE (switch_bb), true);
/* Remove branches that are now unreachable in new loops. */

View File

@ -739,7 +739,7 @@ generate_prolog_epilog (partial_schedule_ptr ps, rtx orig_loop_beg,
basic_block epilog_bb = BLOCK_FOR_INSN (last_epilog_insn);
basic_block precond_bb = BLOCK_FOR_INSN (precond_jump);
basic_block orig_loop_bb = BLOCK_FOR_INSN (precond_exit_label_insn);
edge epilog_exit_edge = EDGE_SUCC (epilog_bb, 0);
edge epilog_exit_edge = single_succ_edge (epilog_bb);
/* Do loop preconditioning to take care of cases were the loop count is
less than the stage count. Update the CFG properly. */

View File

@ -434,14 +434,14 @@ combine_predictions_for_insn (rtx insn, basic_block bb)
/* Save the prediction into CFG in case we are seeing non-degenerated
conditional jump. */
if (EDGE_COUNT (bb->succs) > 1)
if (!single_succ_p (bb))
{
BRANCH_EDGE (bb)->probability = combined_probability;
FALLTHRU_EDGE (bb)->probability
= REG_BR_PROB_BASE - combined_probability;
}
}
else if (EDGE_COUNT (bb->succs) > 1)
else if (!single_succ_p (bb))
{
int prob = INTVAL (XEXP (prob_note, 0));
@ -449,7 +449,7 @@ combine_predictions_for_insn (rtx insn, basic_block bb)
FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - prob;
}
else
EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE;
single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
}
/* Combine predictions into single probability and store them into CFG.
@ -833,8 +833,8 @@ estimate_probability (struct loops *loops_info)
care for error returns and other are often used for fast paths
trought function. */
if ((e->dest == EXIT_BLOCK_PTR
|| (EDGE_COUNT (e->dest->succs) == 1
&& EDGE_SUCC (e->dest, 0)->dest == EXIT_BLOCK_PTR))
|| (single_succ_p (e->dest)
&& single_succ (e->dest) == EXIT_BLOCK_PTR))
&& !predicted_by_p (bb, PRED_NULL_RETURN)
&& !predicted_by_p (bb, PRED_CONST_RETURN)
&& !predicted_by_p (bb, PRED_NEGATIVE_RETURN)
@ -1314,7 +1314,7 @@ tree_estimate_probability (void)
fast paths trought function. */
if (e->dest == EXIT_BLOCK_PTR
&& TREE_CODE (last_stmt (bb)) == RETURN_EXPR
&& EDGE_COUNT (bb->preds) > 1)
&& !single_pred_p (bb))
{
edge e1;
edge_iterator ei1;
@ -1457,8 +1457,8 @@ last_basic_block_p (basic_block bb)
return (bb->next_bb == EXIT_BLOCK_PTR
|| (bb->next_bb->next_bb == EXIT_BLOCK_PTR
&& EDGE_COUNT (bb->succs) == 1
&& EDGE_SUCC (bb, 0)->dest->next_bb == EXIT_BLOCK_PTR));
&& single_succ_p (bb)
&& single_succ (bb)->next_bb == EXIT_BLOCK_PTR));
}
/* Sets branch probabilities according to PREDiction and
@ -1811,7 +1811,7 @@ estimate_bb_frequencies (struct loops *loops)
mark_dfs_back_edges ();
EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->probability = REG_BR_PROB_BASE;
single_succ_edge (ENTRY_BLOCK_PTR)->probability = REG_BR_PROB_BASE;
/* Set up block info for each basic block. */
tovisit = BITMAP_ALLOC (NULL);

View File

@ -1052,10 +1052,10 @@ branch_prob (void)
/* Notice GOTO expressions we eliminated while constructing the
CFG. */
if (EDGE_COUNT (bb->succs) == 1 && EDGE_SUCC (bb, 0)->goto_locus)
if (single_succ_p (bb) && single_succ_edge (bb)->goto_locus)
{
/* ??? source_locus type is marked deprecated in input.h. */
source_locus curr_location = EDGE_SUCC (bb, 0)->goto_locus;
source_locus curr_location = single_succ_edge (bb)->goto_locus;
/* ??? The FILE/LINE API is inconsistent for these cases. */
#ifdef USE_MAPPED_LOCATION
output_location (LOCATION_FILE (curr_location),

View File

@ -1766,11 +1766,11 @@ copyprop_hardreg_forward (void)
processed, begin with the value data that was live at
the end of the predecessor block. */
/* ??? Ought to use more intelligent queuing of blocks. */
if (EDGE_COUNT (bb->preds) == 1
if (single_pred_p (bb)
&& TEST_BIT (visited,
EDGE_PRED (bb, 0)->src->index - (INVALID_BLOCK + 1))
&& ! (EDGE_PRED (bb, 0)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
all_vd[bb->index] = all_vd[EDGE_PRED (bb, 0)->src->index];
single_pred (bb)->index - (INVALID_BLOCK + 1))
&& ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
all_vd[bb->index] = all_vd[single_pred (bb)->index];
else
init_value_data (all_vd + bb->index);

View File

@ -337,8 +337,8 @@ is_cfg_nonregular (void)
FOR_EACH_BB (b)
{
if (EDGE_COUNT (b->preds) == 0
|| (EDGE_PRED (b, 0)->src == b
&& EDGE_COUNT (b->preds) == 1))
|| (single_pred_p (b)
&& single_pred (b) == b))
return 1;
}
@ -537,7 +537,7 @@ find_rgns (void)
/* DFS traversal to find inner loops in the cfg. */
current_edge = ei_start (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest->succs);
current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR)->succs);
sp = -1;
while (1)
@ -727,8 +727,8 @@ find_rgns (void)
FOR_EACH_BB (jbb)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
if (EDGE_COUNT (jbb->succs) == 1
&& EDGE_SUCC (jbb, 0)->dest == EXIT_BLOCK_PTR)
if (single_succ_p (jbb)
&& single_succ (jbb) == EXIT_BLOCK_PTR)
{
queue[++tail] = jbb->index;
SET_BIT (in_queue, jbb->index);
@ -1323,7 +1323,7 @@ update_live (rtx insn, int src)
(bb_from == bb_to \
|| IS_RGN_ENTRY (bb_from) \
|| (TEST_BIT (ancestor_edges[bb_to], \
EDGE_TO_BIT (EDGE_PRED (BASIC_BLOCK (BB_TO_BLOCK (bb_from)), 0)))))
EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK (BB_TO_BLOCK (bb_from)))))))
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */

View File

@ -321,8 +321,8 @@ tail_duplicate (void)
static void
layout_superblocks (void)
{
basic_block end = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest;
basic_block bb = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest->next_bb;
basic_block end = single_succ (ENTRY_BLOCK_PTR);
basic_block bb = end->next_bb;
while (bb != EXIT_BLOCK_PTR)
{
@ -333,7 +333,7 @@ layout_superblocks (void)
FOR_EACH_EDGE (e, ei, end->succs)
if (e->dest != EXIT_BLOCK_PTR
&& e->dest != EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
&& e->dest != single_succ (ENTRY_BLOCK_PTR)
&& !e->dest->rbi->visited
&& (!best || EDGE_FREQUENCY (e) > EDGE_FREQUENCY (best)))
best = e;

View File

@ -1260,16 +1260,16 @@ tree_can_merge_blocks_p (basic_block a, basic_block b)
tree stmt;
block_stmt_iterator bsi;
if (EDGE_COUNT (a->succs) != 1)
if (!single_succ_p (a))
return false;
if (EDGE_SUCC (a, 0)->flags & EDGE_ABNORMAL)
if (single_succ_edge (a)->flags & EDGE_ABNORMAL)
return false;
if (EDGE_SUCC (a, 0)->dest != b)
if (single_succ (a) != b)
return false;
if (EDGE_COUNT (b->preds) > 1)
if (!single_pred_p (b))
return false;
if (b == EXIT_BLOCK_PTR)
@ -1324,7 +1324,7 @@ tree_merge_blocks (basic_block a, basic_block b)
/* Ensure that B follows A. */
move_block_after (b, a);
gcc_assert (EDGE_SUCC (a, 0)->flags & EDGE_FALLTHRU);
gcc_assert (single_succ_edge (a)->flags & EDGE_FALLTHRU);
gcc_assert (!last_stmt (a) || !stmt_ends_bb_p (last_stmt (a)));
/* Remove labels from B and set bb_for_stmt to A for other statements. */
@ -1922,16 +1922,17 @@ cfg_remove_useless_stmts_bb (basic_block bb)
/* Check whether we come here from a condition, and if so, get the
condition. */
if (EDGE_COUNT (bb->preds) != 1
|| !(EDGE_PRED (bb, 0)->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
if (!single_pred_p (bb)
|| !(single_pred_edge (bb)->flags
& (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
return;
cond = COND_EXPR_COND (last_stmt (EDGE_PRED (bb, 0)->src));
cond = COND_EXPR_COND (last_stmt (single_pred (bb)));
if (TREE_CODE (cond) == VAR_DECL || TREE_CODE (cond) == PARM_DECL)
{
var = cond;
val = (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE
val = (single_pred_edge (bb)->flags & EDGE_FALSE_VALUE
? boolean_false_node : boolean_true_node);
}
else if (TREE_CODE (cond) == TRUTH_NOT_EXPR
@ -1939,12 +1940,12 @@ cfg_remove_useless_stmts_bb (basic_block bb)
|| TREE_CODE (TREE_OPERAND (cond, 0)) == PARM_DECL))
{
var = TREE_OPERAND (cond, 0);
val = (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE
val = (single_pred_edge (bb)->flags & EDGE_FALSE_VALUE
? boolean_true_node : boolean_false_node);
}
else
{
if (EDGE_PRED (bb, 0)->flags & EDGE_FALSE_VALUE)
if (single_pred_edge (bb)->flags & EDGE_FALSE_VALUE)
cond = invert_truthvalue (cond);
if (TREE_CODE (cond) == EQ_EXPR
&& (TREE_CODE (TREE_OPERAND (cond, 0)) == VAR_DECL
@ -2208,10 +2209,10 @@ cleanup_control_flow (void)
else
{
/* Turn off the EDGE_ABNORMAL flag. */
EDGE_SUCC (bb, 0)->flags &= ~EDGE_ABNORMAL;
e->flags &= ~EDGE_ABNORMAL;
/* And set EDGE_FALLTHRU. */
EDGE_SUCC (bb, 0)->flags |= EDGE_FALLTHRU;
e->flags |= EDGE_FALLTHRU;
ei_next (&ei);
}
}
@ -2249,7 +2250,7 @@ cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi)
bool retval = false;
tree expr = bsi_stmt (bsi), val;
if (EDGE_COUNT (bb->succs) > 1)
if (!single_succ_p (bb))
{
edge e;
edge_iterator ei;
@ -2291,7 +2292,7 @@ cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi)
taken_edge->probability = REG_BR_PROB_BASE;
}
else
taken_edge = EDGE_SUCC (bb, 0);
taken_edge = single_succ_edge (bb);
bsi_remove (&bsi);
taken_edge->flags = EDGE_FALLTHRU;
@ -2862,14 +2863,14 @@ disband_implicit_edges (void)
{
/* Remove the RETURN_EXPR if we may fall though to the exit
instead. */
gcc_assert (EDGE_COUNT (bb->succs) == 1);
gcc_assert (EDGE_SUCC (bb, 0)->dest == EXIT_BLOCK_PTR);
gcc_assert (single_succ_p (bb));
gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR);
if (bb->next_bb == EXIT_BLOCK_PTR
&& !TREE_OPERAND (stmt, 0))
{
bsi_remove (&last);
EDGE_SUCC (bb, 0)->flags |= EDGE_FALLTHRU;
single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
}
continue;
}
@ -3164,7 +3165,7 @@ tree_find_edge_insert_loc (edge e, block_stmt_iterator *bsi,
would have to examine the PHIs to prove that none of them used
the value set by the statement we want to insert on E. That
hardly seems worth the effort. */
if (EDGE_COUNT (dest->preds) == 1
if (single_pred_p (dest)
&& ! phi_nodes (dest)
&& dest != EXIT_BLOCK_PTR)
{
@ -3196,7 +3197,7 @@ tree_find_edge_insert_loc (edge e, block_stmt_iterator *bsi,
Except for the entry block. */
src = e->src;
if ((e->flags & EDGE_ABNORMAL) == 0
&& EDGE_COUNT (src->succs) == 1
&& single_succ_p (src)
&& src != ENTRY_BLOCK_PTR)
{
*bsi = bsi_last (src);
@ -3227,7 +3228,7 @@ tree_find_edge_insert_loc (edge e, block_stmt_iterator *bsi,
dest = split_edge (e);
if (new_bb)
*new_bb = dest;
e = EDGE_PRED (dest, 0);
e = single_pred_edge (dest);
goto restart;
}
@ -3242,7 +3243,7 @@ bsi_commit_edge_inserts (void)
edge e;
edge_iterator ei;
bsi_commit_one_edge_insert (EDGE_SUCC (ENTRY_BLOCK_PTR, 0), NULL);
bsi_commit_one_edge_insert (single_succ_edge (ENTRY_BLOCK_PTR), NULL);
FOR_EACH_BB (bb)
FOR_EACH_EDGE (e, ei, bb->succs)
@ -3940,14 +3941,15 @@ tree_verify_flow_info (void)
break;
case RETURN_EXPR:
if (EDGE_COUNT (bb->succs) != 1
|| (EDGE_SUCC (bb, 0)->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL
| EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
if (!single_succ_p (bb)
|| (single_succ_edge (bb)->flags
& (EDGE_FALLTHRU | EDGE_ABNORMAL
| EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
{
error ("Wrong outgoing edge flags at end of bb %d\n", bb->index);
err = 1;
}
if (EDGE_SUCC (bb, 0)->dest != EXIT_BLOCK_PTR)
if (single_succ (bb) != EXIT_BLOCK_PTR)
{
error ("Return edge does not point to exit in bb %d\n",
bb->index);
@ -4064,7 +4066,7 @@ tree_make_forwarder_block (edge fallthru)
dummy = fallthru->src;
bb = fallthru->dest;
if (EDGE_COUNT (bb->preds) == 1)
if (single_pred_p (bb))
return;
/* If we redirected a branch we must create new phi nodes at the
@ -4105,16 +4107,16 @@ tree_forwarder_block_p (basic_block bb, bool phi_wanted)
block_stmt_iterator bsi;
/* BB must have a single outgoing edge. */
if (EDGE_COUNT (bb->succs) != 1
if (single_succ_p (bb) != 1
/* If PHI_WANTED is false, BB must not have any PHI nodes.
Otherwise, BB must have PHI nodes. */
|| (phi_nodes (bb) != NULL_TREE) != phi_wanted
/* BB may not be a predecessor of EXIT_BLOCK_PTR. */
|| EDGE_SUCC (bb, 0)->dest == EXIT_BLOCK_PTR
|| single_succ (bb) == EXIT_BLOCK_PTR
/* Nor should this be an infinite loop. */
|| EDGE_SUCC (bb, 0)->dest == bb
|| single_succ (bb) == bb
/* BB may not have an abnormal outgoing edge. */
|| (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL))
|| (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
return false;
#if ENABLE_CHECKING
@ -4179,7 +4181,7 @@ has_abnormal_incoming_edge_p (basic_block bb)
static bool
remove_forwarder_block (basic_block bb, basic_block **worklist)
{
edge succ = EDGE_SUCC (bb, 0), e, s;
edge succ = single_succ_edge (bb), e, s;
basic_block dest = succ->dest;
tree label;
tree phi;
@ -4338,7 +4340,7 @@ cleanup_forwarder_blocks (void)
static void
remove_forwarder_block_with_phi (basic_block bb)
{
edge succ = EDGE_SUCC (bb, 0);
edge succ = single_succ_edge (bb);
basic_block dest = succ->dest;
tree label;
basic_block dombb, domdest, dom;
@ -4379,7 +4381,7 @@ remove_forwarder_block_with_phi (basic_block bb)
/* PHI arguments are different. Create a forwarder block by
splitting E so that we can merge PHI arguments on E to
DEST. */
e = EDGE_SUCC (split_edge (e), 0);
e = single_succ_edge (split_edge (e));
}
s = redirect_edge_and_branch (e, dest);
@ -4481,7 +4483,7 @@ merge_phi_nodes (void)
if (!tree_forwarder_block_p (bb, true))
continue;
dest = EDGE_SUCC (bb, 0)->dest;
dest = single_succ (bb);
/* We have to feed into another basic block with PHI
nodes. */

View File

@ -187,9 +187,9 @@ tree_if_conversion (struct loop *loop, bool for_vectorizer)
/* If current bb has only one successor, then consider it as an
unconditional goto. */
if (EDGE_COUNT (bb->succs) == 1)
if (single_succ_p (bb))
{
basic_block bb_n = EDGE_SUCC (bb, 0)->dest;
basic_block bb_n = single_succ (bb);
if (cond != NULL_TREE)
add_to_predicate_list (bb_n, cond);
cond = NULL_TREE;

View File

@ -545,7 +545,7 @@ mf_build_check_statement_for (tree base, tree limit,
/* We expect that the conditional jump we will construct will not
be taken very often as it basically is an exception condition. */
predict_edge_def (EDGE_PRED (then_bb, 0), PRED_MUDFLAP, NOT_TAKEN);
predict_edge_def (single_pred_edge (then_bb), PRED_MUDFLAP, NOT_TAKEN);
/* Mark the pseudo-fallthrough edge from cond_bb to join_bb. */
e = find_edge (cond_bb, join_bb);

View File

@ -757,7 +757,7 @@ remove_dead_stmt (block_stmt_iterator *i, basic_block bb)
EDGE_SUCC (bb, 0)->flags &= ~EDGE_FALLTHRU;
/* Remove the remaining the outgoing edges. */
while (EDGE_COUNT (bb->succs) != 1)
while (!single_succ_p (bb))
remove_edge (EDGE_SUCC (bb, 1));
}

View File

@ -995,13 +995,13 @@ dom_opt_finalize_block (struct dom_walk_data *walk_data, basic_block bb)
the edge from BB through its successor.
Do this before we remove entries from our equivalence tables. */
if (EDGE_COUNT (bb->succs) == 1
&& (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
&& (get_immediate_dominator (CDI_DOMINATORS, EDGE_SUCC (bb, 0)->dest) != bb
|| phi_nodes (EDGE_SUCC (bb, 0)->dest)))
if (single_succ_p (bb)
&& (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
&& (get_immediate_dominator (CDI_DOMINATORS, single_succ (bb)) != bb
|| phi_nodes (single_succ (bb))))
{
thread_across_edge (walk_data, EDGE_SUCC (bb, 0));
thread_across_edge (walk_data, single_succ_edge (bb));
}
else if ((last = last_stmt (bb))
&& TREE_CODE (last) == GOTO_EXPR

View File

@ -60,7 +60,7 @@ should_duplicate_loop_header_p (basic_block header, struct loop *loop,
return false;
gcc_assert (EDGE_COUNT (header->succs) > 0);
if (EDGE_COUNT (header->succs) == 1)
if (single_succ_p (header))
return false;
if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest)
&& flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest))
@ -68,7 +68,7 @@ should_duplicate_loop_header_p (basic_block header, struct loop *loop,
/* If this is not the original loop header, we want it to have just
one predecessor in order to match the && pattern. */
if (header != loop->header && EDGE_COUNT (header->preds) >= 2)
if (header != loop->header && !single_pred_p (header))
return false;
last = last_stmt (header);
@ -193,8 +193,8 @@ copy_loop_headers (void)
/* Ensure that the header will have just the latch as a predecessor
inside the loop. */
if (EDGE_COUNT (exit->dest->preds) > 1)
exit = EDGE_SUCC (loop_split_edge_with (exit, NULL), 0);
if (!single_pred_p (exit->dest))
exit = single_succ_edge (loop_split_edge_with (exit, NULL));
if (!tree_duplicate_sese_region (loop_preheader_edge (loop), exit,
bbs, n_bbs, NULL))

View File

@ -644,8 +644,8 @@ loop_commit_inserts (void)
{
bb = BASIC_BLOCK (i);
add_bb_to_loop (bb,
find_common_loop (EDGE_SUCC (bb, 0)->dest->loop_father,
EDGE_PRED (bb, 0)->src->loop_father));
find_common_loop (single_pred (bb)->loop_father,
single_succ (bb)->loop_father));
}
}

View File

@ -4987,7 +4987,7 @@ compute_phi_arg_on_exit (edge exit, tree stmts, tree op)
block_stmt_iterator bsi;
tree phi, stmt, def, next;
if (EDGE_COUNT (exit->dest->preds) > 1)
if (!single_pred_p (exit->dest))
split_loop_exit_edge (exit);
if (TREE_CODE (stmts) == STATEMENT_LIST)

View File

@ -442,7 +442,7 @@ split_loop_exit_edge (edge exit)
for (phi = phi_nodes (dest); phi; phi = PHI_CHAIN (phi))
{
op_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, EDGE_SUCC (bb, 0));
op_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (bb));
name = USE_FROM_PTR (op_p);
@ -506,10 +506,10 @@ ip_normal_pos (struct loop *loop)
basic_block bb;
edge exit;
if (EDGE_COUNT (loop->latch->preds) > 1)
if (!single_pred_p (loop->latch))
return NULL;
bb = EDGE_PRED (loop->latch, 0)->src;
bb = single_pred (loop->latch);
last = last_stmt (bb);
if (TREE_CODE (last) != COND_EXPR)
return NULL;
@ -757,7 +757,7 @@ lv_adjust_loop_entry_edge (basic_block first_head,
/* Adjust edges appropriately to connect new head with first head
as well as second head. */
e0 = EDGE_SUCC (new_head, 0);
e0 = single_succ_edge (new_head);
e0->flags &= ~EDGE_FALLTHRU;
e0->flags |= EDGE_FALSE_VALUE;
e1 = make_edge (new_head, first_head, EDGE_TRUE_VALUE);
@ -816,12 +816,12 @@ tree_ssa_loop_version (struct loops *loops, struct loop * loop,
*condition_bb = lv_adjust_loop_entry_edge (first_head, second_head, entry,
cond_expr);
latch_edge = EDGE_SUCC (loop->latch->rbi->copy, 0);
latch_edge = single_succ_edge (loop->latch->rbi->copy);
extract_true_false_edges_from_block (*condition_bb, &true_edge, &false_edge);
nloop = loopify (loops,
latch_edge,
EDGE_PRED (loop->header->rbi->copy, 0),
single_pred_edge (loop->header->rbi->copy),
*condition_bb, true_edge, false_edge,
false /* Do not redirect all edges. */);
@ -842,7 +842,7 @@ tree_ssa_loop_version (struct loops *loops, struct loop * loop,
(*condition_bb)->flags |= BB_IRREDUCIBLE_LOOP;
loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
EDGE_PRED ((*condition_bb), 0)->flags |= EDGE_IRREDUCIBLE_LOOP;
single_pred_edge ((*condition_bb))->flags |= EDGE_IRREDUCIBLE_LOOP;
}
/* At this point condition_bb is loop predheader with two successors,

View File

@ -682,9 +682,9 @@ simplify_using_initial_conditions (struct loop *loop, tree expr,
bb != ENTRY_BLOCK_PTR;
bb = get_immediate_dominator (CDI_DOMINATORS, bb))
{
e = EDGE_PRED (bb, 0);
if (EDGE_COUNT (bb->preds) > 1)
if (!single_pred_p (bb))
continue;
e = single_pred_edge (bb);
if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
continue;

View File

@ -169,10 +169,10 @@ simplify_using_entry_checks (struct loop *loop, tree cond)
? boolean_true_node
: boolean_false_node);
if (EDGE_COUNT (e->src->preds) > 1)
if (!single_pred_p (e->src))
return cond;
e = EDGE_PRED (e->src, 0);
e = single_pred_edge (e->src);
if (e->src == ENTRY_BLOCK_PTR)
return cond;
}

View File

@ -138,9 +138,9 @@ tree_ssa_phiopt (void)
continue;
/* If either bb1's succ or bb2 or bb2's succ is non NULL. */
if (EDGE_COUNT (bb1->succs) < 1
if (EDGE_COUNT (bb1->succs) == 0
|| bb2 == NULL
|| EDGE_COUNT (bb2->succs) < 1)
|| EDGE_COUNT (bb2->succs) == 0)
continue;
/* Find the bb which is the fall through to the other. */
@ -161,13 +161,13 @@ tree_ssa_phiopt (void)
e1 = EDGE_SUCC (bb1, 0);
/* Make sure that bb1 is just a fall through. */
if (EDGE_COUNT (bb1->succs) > 1
if (!single_succ_p (bb1) > 1
|| (e1->flags & EDGE_FALLTHRU) == 0)
continue;
/* Also make that bb1 only have one pred and it is bb. */
if (EDGE_COUNT (bb1->preds) > 1
|| EDGE_PRED (bb1, 0)->src != bb)
if (!single_pred_p (bb1)
|| single_pred (bb1) != bb)
continue;
phi = phi_nodes (bb2);
@ -471,7 +471,7 @@ value_replacement (basic_block cond_bb, basic_block middle_bb,
edge from OTHER_BLOCK which reaches BB and represents the desired
path from COND_BLOCK. */
if (e->dest == middle_bb)
e = EDGE_SUCC (e->dest, 0);
e = single_succ_edge (e->dest);
/* Now we know the incoming edge to BB that has the argument for the
RHS of our new assignment statement. */

View File

@ -1143,10 +1143,10 @@ compute_antic_aux (basic_block block, bool block_has_abnormal_pred_edge)
;
/* If we have one successor, we could have some phi nodes to
translate through. */
else if (EDGE_COUNT (block->succs) == 1)
else if (single_succ_p (block))
{
phi_translate_set (ANTIC_OUT, ANTIC_IN(EDGE_SUCC (block, 0)->dest),
block, EDGE_SUCC (block, 0)->dest);
phi_translate_set (ANTIC_OUT, ANTIC_IN(single_succ (block)),
block, single_succ (block));
}
/* If we have multiple successors, we take the intersection of all of
them. */
@ -1554,7 +1554,7 @@ insert_aux (basic_block block)
bitmap_value_replace_in_set (AVAIL_OUT (block), ssa_name (i));
}
}
if (EDGE_COUNT (block->preds) > 1)
if (!single_pred_p (block))
{
value_set_node_t node;
for (node = ANTIC_IN (block)->head;
@ -2138,9 +2138,9 @@ init_pre (bool do_fre)
ENTRY_BLOCK_PTR (FIXME, if ENTRY_BLOCK_PTR had an index number
different than -1 we wouldn't have to hack this. tree-ssa-dce.c
needs a similar change). */
if (EDGE_COUNT (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest->preds) > 1)
if (!(EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->flags & EDGE_ABNORMAL))
split_edge (EDGE_SUCC (ENTRY_BLOCK_PTR, 0));
if (!single_pred_p (single_succ (ENTRY_BLOCK_PTR)))
if (!(single_succ_edge (ENTRY_BLOCK_PTR)->flags & EDGE_ABNORMAL))
split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
FOR_ALL_BB (bb)
bb->aux = xcalloc (1, sizeof (struct bb_value_sets));

View File

@ -616,9 +616,9 @@ redirect_edges (void **slot, void *data)
rd->outgoing_edge->dest);
/* And fixup the flags on the single remaining edge. */
EDGE_SUCC (local_info->bb, 0)->flags
single_succ_edge (local_info->bb)->flags
&= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
EDGE_SUCC (local_info->bb, 0)->flags |= EDGE_FALLTHRU;
single_succ_edge (local_info->bb)->flags |= EDGE_FALLTHRU;
}
}

View File

@ -211,7 +211,7 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi)
/* Mark the blocks in the chain leading to the end. */
at_bb = bb_for_stmt (at);
call_bb = bb_for_stmt (bsi_stmt (bsi));
for (bb = call_bb; bb != at_bb; bb = EDGE_SUCC (bb, 0)->dest)
for (bb = call_bb; bb != at_bb; bb = single_succ (bb))
bb->aux = &bb->aux;
bb->aux = &bb->aux;
@ -255,7 +255,7 @@ independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi)
}
/* Unmark the blocks. */
for (bb = call_bb; bb != at_bb; bb = EDGE_SUCC (bb, 0)->dest)
for (bb = call_bb; bb != at_bb; bb = single_succ (bb))
bb->aux = NULL;
bb->aux = NULL;
@ -382,7 +382,7 @@ find_tail_calls (basic_block bb, struct tailcall **ret)
basic_block abb;
stmt_ann_t ann;
if (EDGE_COUNT (bb->succs) > 1)
if (!single_succ_p (bb))
return;
for (bsi = bsi_last (bb); !bsi_end_p (bsi); bsi_prev (&bsi))
@ -482,8 +482,8 @@ find_tail_calls (basic_block bb, struct tailcall **ret)
while (bsi_end_p (absi))
{
ass_var = propagate_through_phis (ass_var, EDGE_SUCC (abb, 0));
abb = EDGE_SUCC (abb, 0)->dest;
ass_var = propagate_through_phis (ass_var, single_succ_edge (abb));
abb = single_succ (abb);
absi = bsi_start (abb);
}
@ -701,7 +701,7 @@ eliminate_tail_call (struct tailcall *t)
if (TREE_CODE (stmt) == MODIFY_EXPR)
stmt = TREE_OPERAND (stmt, 1);
first = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest;
first = single_succ (ENTRY_BLOCK_PTR);
/* Remove the code after call_bsi that will become unreachable. The
possibly unreachable code in other blocks is removed later in
@ -721,7 +721,7 @@ eliminate_tail_call (struct tailcall *t)
}
/* Replace the call by a jump to the start of function. */
e = redirect_edge_and_branch (EDGE_SUCC (t->call_block, 0), first);
e = redirect_edge_and_branch (single_succ_edge (t->call_block), first);
gcc_assert (e);
PENDING_STMT (e) = NULL_TREE;
@ -776,7 +776,7 @@ eliminate_tail_call (struct tailcall *t)
var_ann (param)->default_def = new_name;
phi = create_phi_node (name, first);
SSA_NAME_DEF_STMT (name) = phi;
add_phi_arg (phi, new_name, EDGE_SUCC (ENTRY_BLOCK_PTR, 0));
add_phi_arg (phi, new_name, single_succ_edge (ENTRY_BLOCK_PTR));
/* For all calls the same set of variables should be clobbered. This
means that there always should be the appropriate phi node except
@ -843,7 +843,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
bool phis_constructed = false;
struct tailcall *tailcalls = NULL, *act, *next;
bool changed = false;
basic_block first = EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest;
basic_block first = single_succ (ENTRY_BLOCK_PTR);
tree stmt, param, ret_type, tmp, phi;
edge_iterator ei;
@ -873,8 +873,8 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
if (!phis_constructed)
{
/* Ensure that there is only one predecessor of the block. */
if (EDGE_COUNT (first->preds) > 1)
first = split_edge (EDGE_SUCC (ENTRY_BLOCK_PTR, 0));
if (!single_pred_p (first))
first = split_edge (single_succ_edge (ENTRY_BLOCK_PTR));
/* Copy the args if needed. */
for (param = DECL_ARGUMENTS (current_function_decl);
@ -894,7 +894,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
var_ann (param)->default_def = new_name;
phi = create_phi_node (name, first);
SSA_NAME_DEF_STMT (name) = phi;
add_phi_arg (phi, new_name, EDGE_PRED (first, 0));
add_phi_arg (phi, new_name, single_pred_edge (first));
}
phis_constructed = true;
}
@ -911,7 +911,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
/* RET_TYPE can be a float when -ffast-maths is
enabled. */
fold_convert (ret_type, integer_zero_node),
EDGE_PRED (first, 0));
single_pred_edge (first));
a_acc = PHI_RESULT (phi);
}
@ -927,7 +927,7 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls)
/* RET_TYPE can be a float when -ffast-maths is
enabled. */
fold_convert (ret_type, integer_one_node),
EDGE_PRED (first, 0));
single_pred_edge (first));
m_acc = PHI_RESULT (phi);
}
}

View File

@ -2309,7 +2309,7 @@ vect_analyze_loop_form (struct loop *loop)
}
/* Make sure there exists a single-predecessor exit bb: */
if (EDGE_COUNT (loop->single_exit->dest->preds) != 1)
if (!single_pred_p (loop->single_exit->dest))
{
edge e = loop->single_exit;
if (!(e->flags & EDGE_ABNORMAL))

View File

@ -1322,7 +1322,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
/* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
/* Make sure there exists a single-predecessor exit bb: */
gcc_assert (EDGE_COUNT (exit_bb->preds) == 1);
gcc_assert (single_pred_p (exit_bb));
for (phi = phi_nodes (loop->header), phi1 = phi_nodes (update_bb);
phi && phi1;
@ -1373,7 +1373,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
/* Fix phi expressions in the successor bb. */
gcc_assert (PHI_ARG_DEF_FROM_EDGE (phi1, update_e) ==
PHI_ARG_DEF_FROM_EDGE (phi, EDGE_SUCC (loop->latch, 0)));
PHI_ARG_DEF_FROM_EDGE (phi, single_succ_edge (loop->latch)));
SET_PHI_ARG_DEF (phi1, update_e->dest_idx, ni_name);
}
}

View File

@ -501,7 +501,7 @@ slpeel_update_phi_nodes_for_guard (edge guard_edge,
tree orig_phi, new_phi, update_phi;
tree guard_arg, loop_arg;
basic_block new_merge_bb = guard_edge->dest;
edge e = EDGE_SUCC (new_merge_bb, 0);
edge e = single_succ_edge (new_merge_bb);
basic_block update_bb = e->dest;
basic_block orig_bb = (entry_phis ? loop->header : update_bb);
@ -742,7 +742,7 @@ slpeel_add_loop_guard (basic_block guard_bb, tree cond, basic_block exit_bb,
edge new_e, enter_e;
tree cond_stmt, then_label, else_label;
enter_e = EDGE_SUCC (guard_bb, 0);
enter_e = single_succ_edge (guard_bb);
enter_e->flags &= ~EDGE_FALLTHRU;
enter_e->flags |= EDGE_FALSE_VALUE;
bsi = bsi_last (guard_bb);