cfganal.c (flow_depth_first_order_compute, [...]): Use gcc_assert or gcc_unreachable.

* cfganal.c (flow_depth_first_order_compute, dfs_enumerate_from,
	cfgbuild.c, inside_basic_block_p, control_flow_insn_p,
	make_label_edge, make_edges, find_basic_blocks_1): Use gcc_assert
	or gcc_unreachable.
	* cfg.c (clear_edges, initialize_bb_rbi, compact_blocks,
	remove_edge, alloc_aux_for_blocks, free_aux_for_blocks,
	alloc_aux_for_edges, free_aux_for_edges): Likewise.
	* cfgcleanup.c (try_forward_edges,
	merge_blocks_move_predecessor_nojumps,
	merge_blocks_move_successor_nojumps): Likewise.
	* cfgexpand.c (expand_gimple_cond_expr,
	expand_gimple_tailcall): Likewise.
	* cfghooks.c (duplicate_block): Likewise.
	* cfglayout.c (record_effective_endpoints,
	insn_locators_initialize, change_scope, fixup_reorder_chain,
	verify_insn_chain, fixup_fallthru_exit_predecessor,
	duplicate_insn_chain, cfg_layout_finalize): Likewise.
	* cfgloopanal.c (check_irred): Likewise.
	* cfgloop.c (superloop_at_depth, flow_loops_free,
	flow_loop_entry_edges_find, flow_loops_find,
	flow_loop_outside_edge_p, get_loop_body,
	get_loop_body_in_dom_order, get_loop_body_in_bfs_order,
	get_loop_exit_edges, num_loop_branches, cancel_loop,
	verify_loop_structure): Likewise.
	cfgloopmanip.c (find_path, remove_path, loop_delete_branch_edge,
	duplicate_loop_to_header_edge, create_preheader,
	create_loop_notes): Likewise.
	* cfgrtl.c (delete_insn, try_redirect_by_replacing_jump,
	edirect_branch_edge, force_nonfallthru_and_redirect,
	rtl_split_edge, insert_insn_on_edge, commit_one_edge_insertion,
	commit_edge_insertions, commit_edge_insertions_watch_calls,
	purge_dead_edges, cfg_layout_redirect_edge_and_branch,
	cfg_layout_redirect_edge_and_branch_force,
	cfg_layout_merge_blocks, rtl_flow_call_edges_add): Likewise.
	* cgraph.c (cgraph_node, cgraph_create_edge, cgraph_remove_edge,
	cgraph_redirect_edge_callee, cgraph_global_info, cgraph_rtl_info,
	cgraph_varpool_node): Likewise.
	* cgraphunit.c (cgraph_finalize_function,
	cgraph_finalize_compilation_unit, cgraph_mark_functions_to_output,
	cgraph_expand_function, cgraph_remove_unreachable_nodes,
	cgraph_clone_inlined_nodes, cgraph_mark_inline_edge,
	cgraph_mark_inline, cgraph_expand_all_functions,
	cgraph_build_static_cdtor): Likewise.
	* combine.c  (do_SUBST, try_combine, subst, combine_simplify_rtx,
	simplify_logical, distribute_notes, insn_cuid): Likewise.
	* conflict.c (conflict_graph_add, print_conflict): Likewise.
	* coverage.c (rtl_coverage_counter_ref, tree_coverage_counter_ref,
	coverage_checksum_string): Likewise.
	* cse.c (make_new_qty, make_regs_eqv, insert, invalidate,
	hash_rtx, exp_equiv_p, cse_basic_block, count_reg_usage,
	cse_cc_succs, cse_condition_code_reg): Likewise.
	* cselib.c (entry_and_rtx_equal_p, remove_useless_values,
	rtx_equal_for_cselib_p, wrap_constant, cselib_hash_rtx,
	new_cselib_val, cselib_subst_to_values, cselib_invalidate_regno,
	cselib_record_set): Likewise.

From-SVN: r87145
This commit is contained in:
Nathan Sidwell 2004-09-07 15:46:53 +00:00 committed by Nathan Sidwell
parent 6b094f38d7
commit 341c100fc5
19 changed files with 451 additions and 497 deletions

View File

@ -1,3 +1,61 @@
2004-09-07 Nathan Sidwell <nathan@codesourcery.com>
* cfganal.c (flow_depth_first_order_compute, dfs_enumerate_from,
cfgbuild.c, inside_basic_block_p, control_flow_insn_p,
make_label_edge, make_edges, find_basic_blocks_1): Use gcc_assert
or gcc_unreachable.
* cfg.c (clear_edges, initialize_bb_rbi, compact_blocks,
remove_edge, alloc_aux_for_blocks, free_aux_for_blocks,
alloc_aux_for_edges, free_aux_for_edges): Likewise.
* cfgcleanup.c (try_forward_edges,
merge_blocks_move_predecessor_nojumps,
merge_blocks_move_successor_nojumps): Likewise.
* cfgexpand.c (expand_gimple_cond_expr,
expand_gimple_tailcall): Likewise.
* cfghooks.c (duplicate_block): Likewise.
* cfglayout.c (record_effective_endpoints,
insn_locators_initialize, change_scope, fixup_reorder_chain,
verify_insn_chain, fixup_fallthru_exit_predecessor,
duplicate_insn_chain, cfg_layout_finalize): Likewise.
* cfgloopanal.c (check_irred): Likewise.
* cfgloop.c (superloop_at_depth, flow_loops_free,
flow_loop_entry_edges_find, flow_loops_find,
flow_loop_outside_edge_p, get_loop_body,
get_loop_body_in_dom_order, get_loop_body_in_bfs_order,
get_loop_exit_edges, num_loop_branches, cancel_loop,
verify_loop_structure): Likewise.
cfgloopmanip.c (find_path, remove_path, loop_delete_branch_edge,
duplicate_loop_to_header_edge, create_preheader,
create_loop_notes): Likewise.
* cfgrtl.c (delete_insn, try_redirect_by_replacing_jump,
edirect_branch_edge, force_nonfallthru_and_redirect,
rtl_split_edge, insert_insn_on_edge, commit_one_edge_insertion,
commit_edge_insertions, commit_edge_insertions_watch_calls,
purge_dead_edges, cfg_layout_redirect_edge_and_branch,
cfg_layout_redirect_edge_and_branch_force,
cfg_layout_merge_blocks, rtl_flow_call_edges_add): Likewise.
* cgraph.c (cgraph_node, cgraph_create_edge, cgraph_remove_edge,
cgraph_redirect_edge_callee, cgraph_global_info, cgraph_rtl_info,
cgraph_varpool_node): Likewise.
* cgraphunit.c (cgraph_finalize_function,
cgraph_finalize_compilation_unit, cgraph_mark_functions_to_output,
cgraph_expand_function, cgraph_remove_unreachable_nodes,
cgraph_clone_inlined_nodes, cgraph_mark_inline_edge,
cgraph_mark_inline, cgraph_expand_all_functions,
cgraph_build_static_cdtor): Likewise.
* combine.c (do_SUBST, try_combine, subst, combine_simplify_rtx,
simplify_logical, distribute_notes, insn_cuid): Likewise.
* conflict.c (conflict_graph_add, print_conflict): Likewise.
* coverage.c (rtl_coverage_counter_ref, tree_coverage_counter_ref,
coverage_checksum_string): Likewise.
* cse.c (make_new_qty, make_regs_eqv, insert, invalidate,
hash_rtx, exp_equiv_p, cse_basic_block, count_reg_usage,
cse_cc_succs, cse_condition_code_reg): Likewise.
* cselib.c (entry_and_rtx_equal_p, remove_useless_values,
rtx_equal_for_cselib_p, wrap_constant, cselib_hash_rtx,
new_cselib_val, cselib_subst_to_values, cselib_invalidate_regno,
cselib_record_set): Likewise.
2004-09-07 Jan Hubicka <jh@suse.cz>
* tree-ssa-loop-ivopts.c (iv_value): Avoid invalid sharing on niter.

View File

@ -173,8 +173,7 @@ clear_edges (void)
EXIT_BLOCK_PTR->pred = NULL;
ENTRY_BLOCK_PTR->succ = NULL;
if (n_edges)
abort ();
gcc_assert (!n_edges);
}
/* Allocate memory for basic_block. */
@ -211,8 +210,7 @@ free_rbi_pool (void)
void
initialize_bb_rbi (basic_block bb)
{
if (bb->rbi)
abort ();
gcc_assert (!bb->rbi);
bb->rbi = pool_alloc (rbi_pool);
memset (bb->rbi, 0, sizeof (struct reorder_block_def));
}
@ -252,8 +250,7 @@ compact_blocks (void)
i++;
}
if (i != n_basic_blocks)
abort ();
gcc_assert (i == n_basic_blocks);
for (; i < last_basic_block; i++)
BASIC_BLOCK (i) = NULL;
@ -377,8 +374,7 @@ remove_edge (edge e)
for (tmp = src->succ; tmp && tmp != e; tmp = tmp->succ_next)
last_succ = tmp;
if (!tmp)
abort ();
gcc_assert (tmp);
if (last_succ)
last_succ->succ_next = e->succ_next;
else
@ -387,8 +383,7 @@ remove_edge (edge e)
for (tmp = dest->pred; tmp && tmp != e; tmp = tmp->pred_next)
last_pred = tmp;
if (!tmp)
abort ();
gcc_assert (tmp);
if (last_pred)
last_pred->pred_next = e->pred_next;
else
@ -696,8 +691,7 @@ inline void
alloc_aux_for_block (basic_block bb, int size)
{
/* Verify that aux field is clear. */
if (bb->aux || !first_block_aux_obj)
abort ();
gcc_assert (!bb->aux && first_block_aux_obj);
bb->aux = obstack_alloc (&block_aux_obstack, size);
memset (bb->aux, 0, size);
}
@ -715,10 +709,10 @@ alloc_aux_for_blocks (int size)
gcc_obstack_init (&block_aux_obstack);
initialized = 1;
}
/* Check whether AUX data are still allocated. */
else if (first_block_aux_obj)
abort ();
else
/* Check whether AUX data are still allocated. */
gcc_assert (!first_block_aux_obj);
first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
if (size)
{
@ -746,8 +740,7 @@ clear_aux_for_blocks (void)
void
free_aux_for_blocks (void)
{
if (!first_block_aux_obj)
abort ();
gcc_assert (first_block_aux_obj);
obstack_free (&block_aux_obstack, first_block_aux_obj);
first_block_aux_obj = NULL;
@ -761,8 +754,7 @@ inline void
alloc_aux_for_edge (edge e, int size)
{
/* Verify that aux field is clear. */
if (e->aux || !first_edge_aux_obj)
abort ();
gcc_assert (!e->aux && first_edge_aux_obj);
e->aux = obstack_alloc (&edge_aux_obstack, size);
memset (e->aux, 0, size);
}
@ -780,10 +772,9 @@ alloc_aux_for_edges (int size)
gcc_obstack_init (&edge_aux_obstack);
initialized = 1;
}
/* Check whether AUX data are still allocated. */
else if (first_edge_aux_obj)
abort ();
else
/* Check whether AUX data are still allocated. */
gcc_assert (!first_edge_aux_obj);
first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
if (size)
@ -821,8 +812,7 @@ clear_aux_for_edges (void)
void
free_aux_for_edges (void)
{
if (!first_edge_aux_obj)
abort ();
gcc_assert (first_edge_aux_obj);
obstack_free (&edge_aux_obstack, first_edge_aux_obj);
first_edge_aux_obj = NULL;

View File

@ -762,14 +762,8 @@ flow_depth_first_order_compute (int *dfs_order, int *rc_order)
free (stack);
sbitmap_free (visited);
/* The number of nodes visited should not be greater than
n_basic_blocks. */
if (dfsnum > n_basic_blocks)
abort ();
/* There are some nodes left in the CFG that are unreachable. */
if (dfsnum < n_basic_blocks)
abort ();
/* The number of nodes visited should be the number of blocks. */
gcc_assert (dfsnum == n_basic_blocks);
return dfsnum;
}
@ -1019,8 +1013,7 @@ dfs_enumerate_from (basic_block bb, int reverse,
for (e = lbb->pred; e; e = e->pred_next)
if (!(e->src->flags & BB_VISITED) && predicate (e->src, data))
{
if (tv == rslt_max)
abort ();
gcc_assert (tv != rslt_max);
rslt[tv++] = st[sp++] = e->src;
e->src->flags |= BB_VISITED;
}
@ -1030,8 +1023,7 @@ dfs_enumerate_from (basic_block bb, int reverse,
for (e = lbb->succ; e; e = e->succ_next)
if (!(e->dest->flags & BB_VISITED) && predicate (e->dest, data))
{
if (tv == rslt_max)
abort ();
gcc_assert (tv != rslt_max);
rslt[tv++] = st[sp++] = e->dest;
e->dest->flags |= BB_VISITED;
}

View File

@ -83,7 +83,7 @@ inside_basic_block_p (rtx insn)
return false;
default:
abort ();
gcc_unreachable ();
}
}
@ -131,7 +131,7 @@ control_flow_insn_p (rtx insn)
return false;
default:
abort ();
gcc_unreachable ();
}
}
@ -183,8 +183,7 @@ count_basic_blocks (rtx f)
static void
make_label_edge (sbitmap *edge_cache, basic_block src, rtx label, int flags)
{
if (!LABEL_P (label))
abort ();
gcc_assert (LABEL_P (label));
/* If the label was never emitted, this insn is junk, but avoid a
crash trying to refer to BLOCK_FOR_INSN (label). This can happen
@ -345,8 +344,7 @@ make_edges (basic_block min, basic_block max, int update_p)
/* Otherwise, we have a plain conditional or unconditional jump. */
else
{
if (! JUMP_LABEL (insn))
abort ();
gcc_assert (JUMP_LABEL (insn));
make_label_edge (edge_cache, bb, JUMP_LABEL (insn), 0);
}
}
@ -490,7 +488,7 @@ find_basic_blocks_1 (rtx f)
break;
default:
abort ();
gcc_unreachable ();
}
}
@ -499,8 +497,7 @@ find_basic_blocks_1 (rtx f)
else if (bb_note)
delete_insn (bb_note);
if (last_basic_block != n_basic_blocks)
abort ();
gcc_assert (last_basic_block == n_basic_blocks);
clear_aux_for_blocks ();
}

View File

@ -519,8 +519,7 @@ try_forward_edges (int mode, basic_block b)
if (t->dest == b)
break;
if (nthreaded_edges >= n_basic_blocks)
abort ();
gcc_assert (nthreaded_edges < n_basic_blocks);
threaded_edges[nthreaded_edges++] = t;
new_target = t->dest;
@ -625,11 +624,10 @@ try_forward_edges (int mode, basic_block b)
{
edge e;
int prob;
if (n >= nthreaded_edges)
abort ();
gcc_assert (n < nthreaded_edges);
t = threaded_edges [n++];
if (t->src != first)
abort ();
gcc_assert (t->src == first);
if (first->frequency)
prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
else
@ -686,6 +684,7 @@ static void
merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
{
rtx barrier;
bool only_notes;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
@ -703,8 +702,7 @@ merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
return;
barrier = next_nonnote_insn (BB_END (a));
if (!BARRIER_P (barrier))
abort ();
gcc_assert (BARRIER_P (barrier));
delete_insn (barrier);
/* Move block and loop notes out of the chain so that we do not
@ -714,8 +712,8 @@ merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
and adjust the block trees appropriately. Even better would be to have
a tighter connection between block trees and rtl so that this is not
necessary. */
if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
abort ();
only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
gcc_assert (!only_notes);
/* Scramble the insn chain. */
if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
@ -744,6 +742,7 @@ merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
{
rtx barrier, real_b_end;
rtx label, table;
bool only_notes;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
@ -782,8 +781,9 @@ merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
and adjust the block trees appropriately. Even better would be to have
a tighter connection between block trees and rtl so that this is not
necessary. */
if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
abort ();
only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
gcc_assert (!only_notes);
/* Scramble the insn chain. */
reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));

View File

@ -833,8 +833,8 @@ expand_gimple_cond_expr (basic_block bb, tree stmt)
jumpifnot (pred, label_rtx (GOTO_DESTINATION (else_exp)));
return NULL;
}
if (TREE_CODE (then_exp) != GOTO_EXPR || TREE_CODE (else_exp) != GOTO_EXPR)
abort ();
gcc_assert (TREE_CODE (then_exp) == GOTO_EXPR
&& TREE_CODE (else_exp) == GOTO_EXPR);
jumpif (pred, label_rtx (GOTO_DESTINATION (then_exp)));
last = get_last_insn ();
@ -936,8 +936,7 @@ expand_gimple_tailcall (basic_block bb, tree stmt, bool *can_fallthru)
after the sibcall (to perform the function return). These confuse the
find_sub_basic_blocks code, so we need to get rid of these. */
last = NEXT_INSN (last);
if (!BARRIER_P (last))
abort ();
gcc_assert (BARRIER_P (last));
*can_fallthru = false;
while (NEXT_INSN (last))

View File

@ -678,11 +678,9 @@ duplicate_block (basic_block bb, edge e)
if (bb->count < new_count)
new_count = bb->count;
if (!bb->pred)
abort ();
gcc_assert (bb->pred);
#ifdef ENABLE_CHECKING
if (!can_duplicate_block_p (bb))
abort ();
gcc_assert (can_duplicate_block_p (bb));
#endif
new_bb = cfg_hooks->duplicate_block (bb);

View File

@ -199,8 +199,9 @@ record_effective_endpoints (void)
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK;
insn = NEXT_INSN (insn))
continue;
if (!insn)
abort (); /* No basic blocks at all? */
/* No basic blocks at all? */
gcc_assert (insn);
if (PREV_INSN (insn))
cfg_layout_function_header =
unlink_insn_chain (get_insns (), PREV_INSN (insn));
@ -273,21 +274,14 @@ insn_locators_initialize (void)
if (NOTE_P (insn))
{
switch (NOTE_LINE_NUMBER (insn))
gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_BLOCK_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_BLOCK_END);
if (NOTE_LINE_NUMBER (insn) > 0)
{
case NOTE_INSN_BLOCK_BEG:
case NOTE_INSN_BLOCK_END:
abort ();
default:
if (NOTE_LINE_NUMBER (insn) > 0)
{
expanded_location xloc;
NOTE_EXPANDED_LOCATION (xloc, insn);
line_number = xloc.line;
file_name = xloc.file;
}
break;
expanded_location xloc;
NOTE_EXPANDED_LOCATION (xloc, insn);
line_number = xloc.line;
file_name = xloc.file;
}
}
else
@ -377,8 +371,7 @@ change_scope (rtx orig_insn, tree s1, tree s2)
while (ts1 != ts2)
{
if (ts1 == NULL || ts2 == NULL)
abort ();
gcc_assert (ts1 && ts2);
if (BLOCK_NUMBER (ts1) > BLOCK_NUMBER (ts2))
ts1 = BLOCK_SUPERCONTEXT (ts1);
else if (BLOCK_NUMBER (ts1) < BLOCK_NUMBER (ts2))
@ -615,8 +608,7 @@ fixup_reorder_chain (void)
}
}
if (index != n_basic_blocks)
abort ();
gcc_assert (index == n_basic_blocks);
NEXT_INSN (insn) = cfg_layout_function_footer;
if (cfg_layout_function_footer)
@ -675,11 +667,14 @@ fixup_reorder_chain (void)
{
rtx note;
edge e_fake;
bool redirected;
e_fake = unchecked_make_edge (bb, e_fall->dest, 0);
if (!redirect_jump (BB_END (bb), block_label (bb), 0))
abort ();
redirected = redirect_jump (BB_END (bb),
block_label (bb), 0);
gcc_assert (redirected);
note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
if (note)
{
@ -712,8 +707,8 @@ fixup_reorder_chain (void)
{
e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
if (!could_fall_through (e_taken->src, e_taken->dest))
abort ();
gcc_assert (could_fall_through
(e_taken->src, e_taken->dest));
#endif
e_taken->flags |= EDGE_FALLTHRU;
update_br_prob_note (bb);
@ -736,31 +731,30 @@ fixup_reorder_chain (void)
{
e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
if (!could_fall_through (e_taken->src, e_taken->dest))
abort ();
gcc_assert (could_fall_through
(e_taken->src, e_taken->dest));
#endif
e_taken->flags |= EDGE_FALLTHRU;
update_br_prob_note (bb);
continue;
}
}
else if (returnjump_p (bb_end_insn))
continue;
else
{
/* Otherwise we have some switch or computed jump. In the
99% case, there should not have been a fallthru edge. */
if (! e_fall)
#ifndef CASE_DROPS_THROUGH
/* Otherwise we have some return, switch or computed
jump. In the 99% case, there should not have been a
fallthru edge. */
gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
continue;
#else
if (returnjump_p (bb_end_insn) || !e_fall)
continue;
#ifdef CASE_DROPS_THROUGH
/* Except for VAX. Since we didn't have predication for the
tablejump, the fallthru block should not have moved. */
if (bb->rbi->next == e_fall->dest)
continue;
bb_end_insn = skip_insns_after_block (bb);
#else
abort ();
#endif
}
}
@ -903,20 +897,16 @@ verify_insn_chain (void)
for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
x != 0;
prevx = x, insn_cnt1++, x = NEXT_INSN (x))
if (PREV_INSN (x) != prevx)
abort ();
gcc_assert (PREV_INSN (x) == prevx);
if (prevx != get_last_insn ())
abort ();
gcc_assert (prevx == get_last_insn ());
for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
x != 0;
nextx = x, insn_cnt2++, x = PREV_INSN (x))
if (NEXT_INSN (x) != nextx)
abort ();
gcc_assert (NEXT_INSN (x) == nextx);
if (insn_cnt1 != insn_cnt2)
abort ();
gcc_assert (insn_cnt1 == insn_cnt2);
}
/* If we have assembler epilogues, the block falling through to exit must
@ -928,10 +918,10 @@ fixup_fallthru_exit_predecessor (void)
edge e;
basic_block bb = NULL;
/* This transformation is not valid before reload, because we might separate
a call from the instruction that copies the return value. */
if (! reload_completed)
abort ();
/* This transformation is not valid before reload, because we might
separate a call from the instruction that copies the return
value. */
gcc_assert (reload_completed);
for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
if (e->flags & EDGE_FALLTHRU)
@ -1058,31 +1048,23 @@ duplicate_insn_chain (rtx from, rtx to)
case NOTE_INSN_BASIC_BLOCK:
break;
/* There is no purpose to duplicate prologue. */
case NOTE_INSN_BLOCK_BEG:
case NOTE_INSN_BLOCK_END:
/* The BLOCK_BEG/BLOCK_END notes should be eliminated when BB
reordering is in the progress. */
case NOTE_INSN_EH_REGION_BEG:
case NOTE_INSN_EH_REGION_END:
/* Should never exist at BB duplication time. */
abort ();
break;
case NOTE_INSN_REPEATED_LINE_NUMBER:
case NOTE_INSN_UNLIKELY_EXECUTED_CODE:
emit_note_copy (insn);
break;
default:
if (NOTE_LINE_NUMBER (insn) < 0)
abort ();
/* All other notes should have already been eliminated.
*/
gcc_assert (NOTE_LINE_NUMBER (insn) >= 0);
/* It is possible that no_line_number is set and the note
won't be emitted. */
emit_note_copy (insn);
}
break;
default:
abort ();
gcc_unreachable ();
}
}
insn = NEXT_INSN (last);
@ -1217,7 +1199,7 @@ cfg_layout_finalize (void)
#ifdef ENABLE_CHECKING
verify_insn_chain ();
#endif
free_rbi_pool ();
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
bb->rbi = NULL;

View File

@ -107,8 +107,7 @@ flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
struct loop *
superloop_at_depth (struct loop *loop, unsigned depth)
{
if (depth > (unsigned) loop->depth)
abort ();
gcc_assert (depth <= (unsigned) loop->depth);
if (depth == (unsigned) loop->depth)
return loop;
@ -213,8 +212,7 @@ flow_loops_free (struct loops *loops)
{
unsigned i;
if (! loops->num)
abort ();
gcc_assert (loops->num);
/* Free the loop descriptors. */
for (i = 0; i < loops->num; i++)
@ -253,8 +251,7 @@ flow_loop_entry_edges_find (struct loop *loop)
num_entries++;
}
if (! num_entries)
abort ();
gcc_assert (num_entries);
loop->entry_edges = xmalloc (num_entries * sizeof (edge *));
@ -794,8 +791,7 @@ flow_loops_find (struct loops *loops, int flags)
/* This function cannot be repeatedly called with different
flags to build up the loop information. The loop tree
must always be built if this function is called. */
if (! (flags & LOOP_TREE))
abort ();
gcc_assert (flags & LOOP_TREE);
memset (loops, 0, sizeof *loops);
@ -837,8 +833,7 @@ flow_loops_find (struct loops *loops, int flags)
{
basic_block latch = e->src;
if (e->flags & EDGE_ABNORMAL)
abort ();
gcc_assert (!(e->flags & EDGE_ABNORMAL));
/* Look for back edges where a predecessor is dominated
by this block. A natural loop has a single entry
@ -849,8 +844,7 @@ flow_loops_find (struct loops *loops, int flags)
&& dominated_by_p (CDI_DOMINATORS, latch, header))
{
/* Shared headers should be eliminated by now. */
if (more_latches)
abort ();
gcc_assert (!more_latches);
more_latches = 1;
SET_BIT (headers, header->index);
num_loops++;
@ -984,8 +978,7 @@ flow_bb_inside_loop_p (const struct loop *loop, const basic_block bb)
bool
flow_loop_outside_edge_p (const struct loop *loop, edge e)
{
if (e->dest != loop->header)
abort ();
gcc_assert (e->dest == loop->header);
return !flow_bb_inside_loop_p (loop, e->src);
}
@ -1005,8 +998,7 @@ get_loop_body (const struct loop *loop)
basic_block *tovisit, bb;
unsigned tv = 0;
if (!loop->num_nodes)
abort ();
gcc_assert (loop->num_nodes);
tovisit = xcalloc (loop->num_nodes, sizeof (basic_block));
tovisit[tv++] = loop->header;
@ -1014,8 +1006,7 @@ get_loop_body (const struct loop *loop)
if (loop->latch == EXIT_BLOCK_PTR)
{
/* There may be blocks unreachable from EXIT_BLOCK. */
if (loop->num_nodes != (unsigned) n_basic_blocks + 2)
abort ();
gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks + 2);
FOR_EACH_BB (bb)
tovisit[tv++] = bb;
tovisit[tv++] = EXIT_BLOCK_PTR;
@ -1027,8 +1018,7 @@ get_loop_body (const struct loop *loop)
loop->header) + 1;
}
if (tv != loop->num_nodes)
abort ();
gcc_assert (tv == loop->num_nodes);
return tovisit;
}
@ -1071,19 +1061,16 @@ get_loop_body_in_dom_order (const struct loop *loop)
basic_block *tovisit;
int tv;
if (!loop->num_nodes)
abort ();
gcc_assert (loop->num_nodes);
tovisit = xcalloc (loop->num_nodes, sizeof (basic_block));
if (loop->latch == EXIT_BLOCK_PTR)
abort ();
gcc_assert (loop->latch != EXIT_BLOCK_PTR);
tv = 0;
fill_sons_in_loop (loop, loop->header, tovisit, &tv);
if (tv != (int) loop->num_nodes)
abort ();
gcc_assert (tv == (int) loop->num_nodes);
return tovisit;
}
@ -1099,11 +1086,8 @@ get_loop_body_in_bfs_order (const struct loop *loop)
unsigned int i = 0;
unsigned int vc = 1;
if (!loop->num_nodes)
abort ();
if (loop->latch == EXIT_BLOCK_PTR)
abort ();
gcc_assert (loop->num_nodes);
gcc_assert (loop->latch != EXIT_BLOCK_PTR);
blocks = xcalloc (loop->num_nodes, sizeof (basic_block));
visited = BITMAP_XMALLOC ();
@ -1132,8 +1116,7 @@ get_loop_body_in_bfs_order (const struct loop *loop)
}
}
if (i < vc)
abort ();
gcc_assert (i >= vc);
bb = blocks[vc++];
}
@ -1150,8 +1133,7 @@ get_loop_exit_edges (const struct loop *loop, unsigned int *n_edges)
unsigned i, n;
basic_block * body;
if (loop->latch == EXIT_BLOCK_PTR)
abort ();
gcc_assert (loop->latch != EXIT_BLOCK_PTR);
body = get_loop_body (loop);
n = 0;
@ -1179,8 +1161,7 @@ num_loop_branches (const struct loop *loop)
unsigned i, n;
basic_block * body;
if (loop->latch == EXIT_BLOCK_PTR)
abort ();
gcc_assert (loop->latch != EXIT_BLOCK_PTR);
body = get_loop_body (loop);
n = 0;
@ -1246,8 +1227,7 @@ cancel_loop (struct loops *loops, struct loop *loop)
basic_block *bbs;
unsigned i;
if (loop->inner)
abort ();
gcc_assert (!loop->inner);
/* Move blocks up one level (they should be removed as soon as possible). */
bbs = get_loop_body (loop);
@ -1492,8 +1472,7 @@ verify_loop_structure (struct loops *loops)
}
}
if (err)
abort ();
gcc_assert (!err);
free (sizes);
}

View File

@ -208,8 +208,7 @@ check_irred (struct graph *g, struct edge *e)
/* All edges should lead from a component with higher number to the
one with lower one. */
if (g->vertices[e->src].component < g->vertices[e->dest].component)
abort ();
gcc_assert (g->vertices[e->src].component >= g->vertices[e->dest].component);
if (g->vertices[e->src].component != g->vertices[e->dest].component)
return;

View File

@ -98,8 +98,7 @@ remove_bbs (basic_block *bbs, int nbbs)
static int
find_path (edge e, basic_block **bbs)
{
if (e->dest->pred->pred_next)
abort ();
gcc_assert (!e->dest->pred->pred_next);
/* Find bbs in the path. */
*bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
@ -323,6 +322,7 @@ remove_path (struct loops *loops, edge e)
basic_block *rem_bbs, *bord_bbs, *dom_bbs, from, bb;
int i, nrem, n_bord_bbs, n_dom_bbs;
sbitmap seen;
bool deleted;
if (!loop_delete_branch_edge (e, 0))
return false;
@ -367,8 +367,8 @@ remove_path (struct loops *loops, edge e)
/* Remove the path. */
from = e->src;
if (!loop_delete_branch_edge (e, 1))
abort ();
deleted = loop_delete_branch_edge (e, 1);
gcc_assert (deleted);
dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
/* Cancel loops contained in the path. */
@ -765,47 +765,37 @@ static bool
loop_delete_branch_edge (edge e, int really_delete)
{
basic_block src = e->src;
basic_block newdest;
int irr;
edge snd;
if (src->succ->succ_next)
{
basic_block newdest;
gcc_assert (src->succ->succ_next);
/* Cannot handle more than two exit edges. */
if (src->succ->succ_next->succ_next)
return false;
/* And it must be just a simple branch. */
if (!any_condjump_p (BB_END (src)))
return false;
/* Cannot handle more than two exit edges. */
if (src->succ->succ_next->succ_next)
return false;
/* And it must be just a simple branch. */
if (!any_condjump_p (BB_END (src)))
return false;
snd = e == src->succ ? src->succ->succ_next : src->succ;
newdest = snd->dest;
if (newdest == EXIT_BLOCK_PTR)
return false;
snd = e == src->succ ? src->succ->succ_next : src->succ;
newdest = snd->dest;
if (newdest == EXIT_BLOCK_PTR)
return false;
/* Hopefully the above conditions should suffice. */
if (!really_delete)
return true;
/* Hopefully the above conditions should suffice. */
if (!really_delete)
return true;
/* Redirecting behaves wrongly wrto this flag. */
irr = snd->flags & EDGE_IRREDUCIBLE_LOOP;
/* Redirecting behaves wrongly wrto this flag. */
irr = snd->flags & EDGE_IRREDUCIBLE_LOOP;
if (!redirect_edge_and_branch (e, newdest))
return false;
src->succ->flags &= ~EDGE_IRREDUCIBLE_LOOP;
src->succ->flags |= irr;
return true;
}
else
{
/* Cannot happen -- we are using this only to remove an edge
from branch. */
abort ();
}
return false; /* To avoid warning, cannot get here. */
if (!redirect_edge_and_branch (e, newdest))
return false;
src->succ->flags &= ~EDGE_IRREDUCIBLE_LOOP;
src->succ->flags |= irr;
return true;
}
/* Check whether LOOP's body can be duplicated. */
@ -880,18 +870,14 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops,
int prob_pass_thru, prob_pass_wont_exit, prob_pass_main;
int add_irreducible_flag;
if (e->dest != loop->header)
abort ();
if (ndupl <= 0)
abort ();
gcc_assert (e->dest == loop->header);
gcc_assert (ndupl > 0);
if (orig)
{
/* Orig must be edge out of the loop. */
if (!flow_bb_inside_loop_p (loop, orig->src))
abort ();
if (flow_bb_inside_loop_p (loop, orig->dest))
abort ();
gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
}
bbs = get_loop_body (loop);
@ -907,8 +893,7 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops,
/* In case we are doing loop peeling and the loop is in the middle of
irreducible region, the peeled copies will be inside it too. */
add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
if (is_latch && add_irreducible_flag)
abort ();
gcc_assert (!is_latch || !add_irreducible_flag);
/* Find edge from latch. */
latch_edge = loop_latch_edge (loop);
@ -960,11 +945,9 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops,
scale_act = REG_BR_PROB_BASE - prob_pass_thru;
}
for (i = 0; i < ndupl; i++)
if (scale_step[i] < 0 || scale_step[i] > REG_BR_PROB_BASE)
abort ();
if (scale_main < 0 || scale_main > REG_BR_PROB_BASE
|| scale_act < 0 || scale_act > REG_BR_PROB_BASE)
abort ();
gcc_assert (scale_step[i] >= 0 && scale_step[i] <= REG_BR_PROB_BASE);
gcc_assert (scale_main >= 0 && scale_main <= REG_BR_PROB_BASE
&& scale_act >= 0 && scale_act <= REG_BR_PROB_BASE);
}
/* Loop the new bbs will belong to. */
@ -1154,8 +1137,7 @@ create_preheader (struct loop *loop, int flags)
irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
nentry++;
}
if (!nentry)
abort ();
gcc_assert (nentry);
if (nentry == 1)
{
for (e = loop->header->pred; e->src == loop->latch; e = e->pred_next);
@ -1285,9 +1267,8 @@ create_loop_notes (void)
#ifdef ENABLE_CHECKING
/* Verify that there really are no loop notes. */
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (NOTE_P (insn)
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
abort ();
gcc_assert (!NOTE_P (insn) ||
NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
#endif
flow_loops_find (&loops, LOOP_TREE);
@ -1335,8 +1316,7 @@ create_loop_notes (void)
&& onlyjump_p (insn))
{
pbb = BLOCK_FOR_INSN (insn);
if (!pbb || !pbb->succ || pbb->succ->succ_next)
abort ();
gcc_assert (pbb && pbb->succ && !pbb->succ->succ_next);
if (!flow_bb_inside_loop_p (loop, pbb->succ->dest))
insn = BB_HEAD (first[loop->num]);

View File

@ -139,8 +139,7 @@ delete_insn (rtx insn)
if (really_delete)
{
/* If this insn has already been deleted, something is very wrong. */
if (INSN_DELETED_P (insn))
abort ();
gcc_assert (!INSN_DELETED_P (insn));
remove_insn (insn);
INSN_DELETED_P (insn) = 1;
}
@ -754,9 +753,8 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
INSN_UID (insn), e->dest->index, target->index);
if (!redirect_jump (insn, block_label (target), 0))
{
if (target == EXIT_BLOCK_PTR)
return NULL;
abort ();
gcc_assert (target == EXIT_BLOCK_PTR);
return NULL;
}
}
@ -923,17 +921,15 @@ redirect_branch_edge (edge e, basic_block target)
return NULL;
/* If the insn doesn't go where we think, we're confused. */
if (JUMP_LABEL (insn) != old_label)
abort ();
gcc_assert (JUMP_LABEL (insn) == old_label);
/* If the substitution doesn't succeed, die. This can happen
if the back end emitted unrecognizable instructions or if
target is exit block on some arches. */
if (!redirect_jump (insn, block_label (target), 0))
{
if (target == EXIT_BLOCK_PTR)
return NULL;
abort ();
gcc_assert (target == EXIT_BLOCK_PTR);
return NULL;
}
}
@ -1006,9 +1002,11 @@ force_nonfallthru_and_redirect (edge e, basic_block target)
{
rtx note;
edge b = unchecked_make_edge (e->src, target, 0);
bool redirected;
if (!redirect_jump (BB_END (e->src), block_label (target), 0))
abort ();
redirected = redirect_jump (BB_END (e->src), block_label (target), 0);
gcc_assert (redirected);
note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
if (note)
{
@ -1032,32 +1030,35 @@ force_nonfallthru_and_redirect (edge e, basic_block target)
We can't redirect abnormal edge, but we still can split the fallthru
one and create separate abnormal edge to original destination.
This allows bb-reorder to make such edge non-fallthru. */
if (e->dest != target)
abort ();
gcc_assert (e->dest == target);
abnormal_edge_flags = e->flags & ~(EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
e->flags &= EDGE_FALLTHRU | EDGE_CAN_FALLTHRU;
}
else if (!(e->flags & EDGE_FALLTHRU))
abort ();
else if (e->src == ENTRY_BLOCK_PTR)
else
{
/* We can't redirect the entry block. Create an empty block at the
start of the function which we use to add the new jump. */
edge *pe1;
basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
gcc_assert (e->flags & EDGE_FALLTHRU);
if (e->src == ENTRY_BLOCK_PTR)
{
/* We can't redirect the entry block. Create an empty block
at the start of the function which we use to add the new
jump. */
edge *pe1;
basic_block bb
= create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
/* Change the existing edge's source to be the new block, and add
a new edge from the entry block to the new block. */
e->src = bb;
for (pe1 = &ENTRY_BLOCK_PTR->succ; *pe1; pe1 = &(*pe1)->succ_next)
if (*pe1 == e)
{
*pe1 = e->succ_next;
break;
}
e->succ_next = 0;
bb->succ = e;
make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
/* Change the existing edge's source to be the new block, and add
a new edge from the entry block to the new block. */
e->src = bb;
for (pe1 = &ENTRY_BLOCK_PTR->succ; *pe1; pe1 = &(*pe1)->succ_next)
if (*pe1 == e)
{
*pe1 = e->succ_next;
break;
}
e->succ_next = 0;
bb->succ = e;
make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
}
}
if (e->src->succ->succ_next || abnormal_edge_flags)
@ -1138,7 +1139,7 @@ force_nonfallthru_and_redirect (edge e, basic_block target)
#ifdef HAVE_return
emit_jump_insn_after (gen_return (), BB_END (jump_block));
#else
abort ();
gcc_unreachable ();
#endif
}
else
@ -1295,8 +1296,7 @@ rtl_split_edge (edge edge_in)
rtx before;
/* Abnormal edges cannot be split. */
if ((edge_in->flags & EDGE_ABNORMAL) != 0)
abort ();
gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
/* We are going to place the new block in front of edge destination.
Avoid existence of fallthru predecessors. */
@ -1378,8 +1378,8 @@ rtl_split_edge (edge edge_in)
jump instruction to target our new block. */
if ((edge_in->flags & EDGE_FALLTHRU) == 0)
{
if (!redirect_edge_and_branch (edge_in, bb))
abort ();
edge redirected = redirect_edge_and_branch (edge_in, bb);
gcc_assert (redirected);
}
else
redirect_edge_succ (edge_in, bb);
@ -1396,8 +1396,7 @@ insert_insn_on_edge (rtx pattern, edge e)
{
/* We cannot insert instructions on an abnormal critical edge.
It will be easier to find the culprit if we die now. */
if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
abort ();
gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
if (e->insns.r == NULL_RTX)
start_sequence ();
@ -1584,9 +1583,9 @@ commit_one_edge_insertion (edge e, int watch_calls)
;
else
{
/* We'd better be fallthru, or we've lost track of what's what. */
if ((e->flags & EDGE_FALLTHRU) == 0)
abort ();
/* We'd better be fallthru, or we've lost track of
what's what. */
gcc_assert (e->flags & EDGE_FALLTHRU);
after = BB_END (bb);
}
@ -1647,9 +1646,8 @@ commit_one_edge_insertion (edge e, int watch_calls)
to EXIT. */
e = bb->succ;
if (e->dest != EXIT_BLOCK_PTR
|| e->succ_next != NULL || (e->flags & EDGE_FALLTHRU) == 0)
abort ();
gcc_assert (e->dest == EXIT_BLOCK_PTR
&& !e->succ_next && (e->flags & EDGE_FALLTHRU));
e->flags &= ~EDGE_FALLTHRU;
emit_barrier_after (last);
@ -1657,8 +1655,8 @@ commit_one_edge_insertion (edge e, int watch_calls)
if (before)
delete_insn (before);
}
else if (JUMP_P (last))
abort ();
else
gcc_assert (!JUMP_P (last));
/* Mark the basic block for find_sub_basic_blocks. */
bb->aux = &bb->aux;
@ -1703,8 +1701,7 @@ commit_edge_insertions (void)
SET_BIT (blocks, bb->index);
/* Check for forgotten bb->aux values before commit_edge_insertions
call. */
if (bb->aux != &bb->aux)
abort ();
gcc_assert (bb->aux == &bb->aux);
bb->aux = NULL;
}
find_many_sub_basic_blocks (blocks);
@ -1751,8 +1748,7 @@ commit_edge_insertions_watch_calls (void)
SET_BIT (blocks, bb->index);
/* Check for forgotten bb->aux values before commit_edge_insertions
call. */
if (bb->aux != &bb->aux)
abort ();
gcc_assert (bb->aux == &bb->aux);
bb->aux = NULL;
}
find_many_sub_basic_blocks (blocks);
@ -2397,10 +2393,8 @@ purge_dead_edges (basic_block bb)
from non-local gotos and the like. If there were, we shouldn't
have created the sibcall in the first place. Second, there
should of course never have been a fallthru edge. */
if (!bb->succ || bb->succ->succ_next)
abort ();
if (bb->succ->flags != (EDGE_SIBCALL | EDGE_ABNORMAL))
abort ();
gcc_assert (bb->succ && !bb->succ->succ_next);
gcc_assert (bb->succ->flags == (EDGE_SIBCALL | EDGE_ABNORMAL));
return 0;
}
@ -2428,8 +2422,7 @@ purge_dead_edges (basic_block bb)
}
}
if (!bb->succ || bb->succ->succ_next)
abort ();
gcc_assert (bb->succ && !bb->succ->succ_next);
bb->succ->probability = REG_BR_PROB_BASE;
bb->succ->count = bb->count;
@ -2533,13 +2526,15 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
&& label_is_jump_target_p (BB_HEAD (e->dest),
BB_END (src)))
{
edge redirected;
if (dump_file)
fprintf (dump_file, "Fallthru edge unified with branch "
"%i->%i redirected to %i\n",
e->src->index, e->dest->index, dest->index);
e->flags &= ~EDGE_FALLTHRU;
if (!redirect_branch_edge (e, dest))
abort ();
redirected = redirect_branch_edge (e, dest);
gcc_assert (redirected);
e->flags |= EDGE_FALLTHRU;
e->src->flags |= BB_DIRTY;
return e;
@ -2564,8 +2559,7 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
ret = redirect_branch_edge (e, dest);
/* We don't want simplejumps in the insn stream during cfglayout. */
if (simplejump_p (BB_END (src)))
abort ();
gcc_assert (!simplejump_p (BB_END (src)));
src->flags |= BB_DIRTY;
return ret;
@ -2575,8 +2569,9 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
static basic_block
cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
{
if (!cfg_layout_redirect_edge_and_branch (e, dest))
abort ();
edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
gcc_assert (redirected);
return NULL;
}
@ -2700,8 +2695,7 @@ static void
cfg_layout_merge_blocks (basic_block a, basic_block b)
{
#ifdef ENABLE_CHECKING
if (!cfg_layout_can_merge_blocks_p (a, b))
abort ();
gcc_assert (cfg_layout_can_merge_blocks_p (a, b));
#endif
/* If there was a CODE_LABEL beginning B, delete it. */
@ -2712,8 +2706,7 @@ cfg_layout_merge_blocks (basic_block a, basic_block b)
it cleaned up. */
if (JUMP_P (BB_END (a)))
try_redirect_by_replacing_jump (a->succ, b, true);
if (JUMP_P (BB_END (a)))
abort ();
gcc_assert (!JUMP_P (BB_END (a)));
/* Possible line number notes should appear in between. */
if (b->rbi->header)
@ -2734,8 +2727,7 @@ cfg_layout_merge_blocks (basic_block a, basic_block b)
/* Skip possible DELETED_LABEL insn. */
if (!NOTE_INSN_BASIC_BLOCK_P (first))
first = NEXT_INSN (first);
if (!NOTE_INSN_BASIC_BLOCK_P (first))
abort ();
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (first));
BB_HEAD (b) = NULL;
delete_insn (first);
}
@ -2752,8 +2744,7 @@ cfg_layout_merge_blocks (basic_block a, basic_block b)
/* Skip possible DELETED_LABEL insn. */
if (!NOTE_INSN_BASIC_BLOCK_P (insn))
insn = NEXT_INSN (insn);
if (!NOTE_INSN_BASIC_BLOCK_P (insn))
abort ();
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
BB_HEAD (b) = NULL;
BB_END (a) = BB_END (b);
delete_insn (insn);
@ -2963,8 +2954,7 @@ rtl_flow_call_edges_add (sbitmap blocks)
#ifdef ENABLE_CHECKING
if (split_at_insn == BB_END (bb))
for (e = bb->succ; e; e = e->succ_next)
if (e->dest == EXIT_BLOCK_PTR)
abort ();
gcc_assert (e->dest != EXIT_BLOCK_PTR);
#endif
/* Note that the following may create a new basic block

View File

@ -170,8 +170,7 @@ cgraph_node (tree decl)
{
struct cgraph_node key, *node, **slot;
if (TREE_CODE (decl) != FUNCTION_DECL)
abort ();
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
if (!cgraph_hash)
cgraph_hash = htab_create_ggc (10, hash_node, eq_node, NULL);
@ -223,12 +222,10 @@ cgraph_create_edge (struct cgraph_node *caller, struct cgraph_node *callee,
struct cgraph_edge *e;
for (e = caller->callees; e; e = e->next_callee)
if (e->call_expr == call_expr)
abort ();
gcc_assert (e->call_expr != call_expr);
#endif
if (TREE_CODE (call_expr) != CALL_EXPR)
abort ();
gcc_assert (TREE_CODE (call_expr) == CALL_EXPR);
if (!DECL_SAVED_TREE (callee->decl))
edge->inline_failed = N_("function body not available");
@ -262,14 +259,12 @@ cgraph_remove_edge (struct cgraph_edge *e)
for (edge = &e->callee->callers; *edge && *edge != e;
edge = &((*edge)->next_caller))
continue;
if (!*edge)
abort ();
gcc_assert (*edge);
*edge = (*edge)->next_caller;
for (edge2 = &e->caller->callees; *edge2 && *edge2 != e;
edge2 = &(*edge2)->next_callee)
continue;
if (!*edge2)
abort ();
gcc_assert (*edge2);
*edge2 = (*edge2)->next_callee;
}
@ -284,8 +279,7 @@ cgraph_redirect_edge_callee (struct cgraph_edge *e, struct cgraph_node *n)
for (edge = &e->callee->callers; *edge && *edge != e;
edge = &((*edge)->next_caller))
continue;
if (!*edge)
abort ();
gcc_assert (*edge);
*edge = (*edge)->next_caller;
e->callee = n;
e->next_caller = n->callers;
@ -412,8 +406,8 @@ struct cgraph_local_info *
cgraph_local_info (tree decl)
{
struct cgraph_node *node;
if (TREE_CODE (decl) != FUNCTION_DECL)
abort ();
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
node = cgraph_node (decl);
return &node->local;
}
@ -424,8 +418,8 @@ struct cgraph_global_info *
cgraph_global_info (tree decl)
{
struct cgraph_node *node;
if (TREE_CODE (decl) != FUNCTION_DECL || !cgraph_global_info_ready)
abort ();
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL && cgraph_global_info_ready);
node = cgraph_node (decl);
return &node->global;
}
@ -436,8 +430,8 @@ struct cgraph_rtl_info *
cgraph_rtl_info (tree decl)
{
struct cgraph_node *node;
if (TREE_CODE (decl) != FUNCTION_DECL)
abort ();
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
node = cgraph_node (decl);
if (decl != current_function_decl
&& !TREE_ASM_WRITTEN (node->decl))
@ -542,8 +536,7 @@ cgraph_varpool_node (tree decl)
{
struct cgraph_varpool_node key, *node, **slot;
if (!DECL_P (decl) || TREE_CODE (decl) == FUNCTION_DECL)
abort ();
gcc_assert (DECL_P (decl) && TREE_CODE (decl) != FUNCTION_DECL);
if (!cgraph_varpool_hash)
cgraph_varpool_hash = htab_create_ggc (10, hash_varpool_node,

View File

@ -329,8 +329,7 @@ cgraph_finalize_function (tree decl, bool nested)
case can be sort-of legitimately seen with real function
redefinition errors. I would argue that the front end should
never present us with such a case, but don't enforce that for now. */
if (node->output)
abort ();
gcc_assert (!node->output);
/* Reset our data structures so we can analyze the function again. */
memset (&node->local, 0, sizeof (node->local));
@ -697,8 +696,8 @@ cgraph_finalize_compilation_unit (void)
if (!DECL_SAVED_TREE (decl))
continue;
if (node->analyzed || !node->reachable || !DECL_SAVED_TREE (decl))
abort ();
gcc_assert (!node->analyzed && node->reachable);
gcc_assert (DECL_SAVED_TREE (decl));
cgraph_analyze_function (node);
@ -756,8 +755,8 @@ cgraph_mark_functions_to_output (void)
{
tree decl = node->decl;
struct cgraph_edge *e;
if (node->output)
abort ();
gcc_assert (!node->output);
for (e = node->callers; e; e = e->next_caller)
if (e->inline_failed)
@ -773,13 +772,10 @@ cgraph_mark_functions_to_output (void)
&& !TREE_ASM_WRITTEN (decl)
&& !DECL_EXTERNAL (decl))
node->output = 1;
/* We should've reclaimed all functions that are not needed. */
else if (!node->global.inlined_to && DECL_SAVED_TREE (decl)
&& !DECL_EXTERNAL (decl))
{
dump_cgraph_node (stderr, node);
abort ();
}
else
/* We should've reclaimed all functions that are not needed. */
gcc_assert (node->global.inlined_to || !DECL_SAVED_TREE (decl)
|| DECL_EXTERNAL (decl));
}
}
@ -791,8 +787,7 @@ cgraph_expand_function (struct cgraph_node *node)
tree decl = node->decl;
/* We ought to not compile any inline clones. */
if (node->global.inlined_to)
abort ();
gcc_assert (!node->global.inlined_to);
if (flag_unit_at_a_time)
announce_function (decl);
@ -802,8 +797,7 @@ cgraph_expand_function (struct cgraph_node *node)
/* Make sure that BE didn't give up on compiling. */
/* ??? Can happen with nested function of extern inline. */
if (!TREE_ASM_WRITTEN (node->decl))
abort ();
gcc_assert (TREE_ASM_WRITTEN (node->decl));
current_function_decl = NULL;
if (DECL_SAVED_TREE (node->decl)
@ -895,8 +889,7 @@ cgraph_remove_unreachable_nodes (void)
fprintf (cgraph_dump_file, "\nReclaiming functions:");
#ifdef ENABLE_CHECKING
for (node = cgraph_nodes; node; node = node->next)
if (node->aux)
abort ();
gcc_assert (!node->aux);
#endif
for (node = cgraph_nodes; node; node = node->next)
if (node->needed && !node->global.inlined_to
@ -905,8 +898,8 @@ cgraph_remove_unreachable_nodes (void)
node->aux = first;
first = node;
}
else if (node->aux)
abort ();
else
gcc_assert (!node->aux);
/* Perform reachability analysis. As a special case do not consider
extern inline functions not inlined as live because we won't output
@ -1040,8 +1033,7 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate)
&& duplicate
&& flag_unit_at_a_time)
{
if (e->callee->global.inlined_to)
abort ();
gcc_assert (!e->callee->global.inlined_to);
if (!DECL_EXTERNAL (e->callee->decl))
overall_insns -= e->callee->global.insns, nfunctions_inlined++;
duplicate = 0;
@ -1071,8 +1063,7 @@ cgraph_mark_inline_edge (struct cgraph_edge *e)
int old_insns = 0, new_insns = 0;
struct cgraph_node *to = NULL, *what;
if (!e->inline_failed)
abort ();
gcc_assert (e->inline_failed);
e->inline_failed = NULL;
if (!e->callee->global.inlined && flag_unit_at_a_time)
@ -1089,13 +1080,11 @@ cgraph_mark_inline_edge (struct cgraph_edge *e)
old_insns = e->caller->global.insns;
new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
what);
if (new_insns < 0)
abort ();
gcc_assert (new_insns >= 0);
to = e->caller;
to->global.insns = new_insns;
}
if (what->global.inlined_to != to)
abort ();
gcc_assert (what->global.inlined_to == to);
overall_insns += new_insns - old_insns;
ncalls_inlined++;
}
@ -1122,11 +1111,10 @@ cgraph_mark_inline (struct cgraph_edge *edge)
cgraph_mark_inline_edge (e);
if (e == edge)
edge = next;
times ++;
times++;
}
}
if (!times)
abort ();
gcc_assert (times);
return edge;
}
@ -1653,8 +1641,7 @@ cgraph_expand_all_functions (void)
cgraph_mark_functions_to_output ();
order_pos = cgraph_postorder (order);
if (order_pos != cgraph_n_nodes)
abort ();
gcc_assert (order_pos == cgraph_n_nodes);
/* Garbage collector may remove inline clones we eliminate during
optimization. So we must be sure to not reference them. */
@ -1667,8 +1654,7 @@ cgraph_expand_all_functions (void)
node = order[i];
if (node->output)
{
if (!node->reachable)
abort ();
gcc_assert (node->reachable);
node->output = 0;
cgraph_expand_function (node);
}
@ -1831,12 +1817,17 @@ cgraph_build_static_cdtor (char which, tree body, int priority)
DECL_SOURCE_LOCATION (decl) = input_location;
cfun->function_end_locus = input_location;
if (which == 'I')
DECL_STATIC_CONSTRUCTOR (decl) = 1;
else if (which == 'D')
DECL_STATIC_DESTRUCTOR (decl) = 1;
else
abort ();
switch (which)
{
case 'I':
DECL_STATIC_CONSTRUCTOR (decl) = 1;
break;
case 'D':
DECL_STATIC_DESTRUCTOR (decl) = 1;
break;
default:
gcc_unreachable ();
}
gimplify_function_tree (decl);

View File

@ -455,9 +455,8 @@ do_SUBST (rtx *into, rtx newval)
{
/* Sanity check that we're replacing oldval with a CONST_INT
that is a valid sign-extension for the original mode. */
if (INTVAL (newval) != trunc_int_for_mode (INTVAL (newval),
GET_MODE (oldval)))
abort ();
gcc_assert (INTVAL (newval)
== trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
/* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
CONST_INT is not valid, because after the replacement, the
@ -465,11 +464,10 @@ do_SUBST (rtx *into, rtx newval)
when do_SUBST is called to replace the operand thereof, so we
perform this test on oldval instead, checking whether an
invalid replacement took place before we got here. */
if ((GET_CODE (oldval) == SUBREG
&& GET_CODE (SUBREG_REG (oldval)) == CONST_INT)
|| (GET_CODE (oldval) == ZERO_EXTEND
&& GET_CODE (XEXP (oldval, 0)) == CONST_INT))
abort ();
gcc_assert (!(GET_CODE (oldval) == SUBREG
&& GET_CODE (SUBREG_REG (oldval)) == CONST_INT));
gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
&& GET_CODE (XEXP (oldval, 0)) == CONST_INT));
}
if (undobuf.frees)
@ -1746,8 +1744,7 @@ try_combine (rtx i3, rtx i2, rtx i1, int *new_direct_jump_p)
{
/* We don't handle the case of the target word being wider
than a host wide int. */
if (HOST_BITS_PER_WIDE_INT < BITS_PER_WORD)
abort ();
gcc_assert (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD);
lo &= ~(UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1);
lo |= (INTVAL (SET_SRC (PATTERN (i3)))
@ -1770,7 +1767,7 @@ try_combine (rtx i3, rtx i2, rtx i1, int *new_direct_jump_p)
else
/* We don't handle the case of the higher word not fitting
entirely in either hi or lo. */
abort ();
gcc_unreachable ();
combine_merges++;
subst_insn = i3;
@ -3639,8 +3636,7 @@ subst (rtx x, rtx from, rtx to, int in_dest, int unique_copy)
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
new, GET_MODE (XEXP (x, 0)));
if (! x)
abort ();
gcc_assert (x);
}
else
SUBST (XEXP (x, i), new);
@ -4693,8 +4689,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest)
rtx op1 = XEXP (x, 1);
int len;
if (GET_CODE (op1) != PARALLEL)
abort ();
gcc_assert (GET_CODE (op1) == PARALLEL);
len = XVECLEN (op1, 0);
if (len == 1
&& GET_CODE (XVECEXP (op1, 0, 0)) == CONST_INT
@ -5699,7 +5694,7 @@ simplify_logical (rtx x)
break;
default:
abort ();
gcc_unreachable ();
}
return x;
@ -11705,10 +11700,11 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2)
case REG_NON_LOCAL_GOTO:
if (JUMP_P (i3))
place = i3;
else if (i2 && JUMP_P (i2))
place = i2;
else
abort ();
{
gcc_assert (i2 && JUMP_P (i2));
place = i2;
}
break;
case REG_EH_REGION:
@ -11717,8 +11713,9 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2)
place = i3;
else if (i2 && CALL_P (i2))
place = i2;
else if (flag_non_call_exceptions)
else
{
gcc_assert (flag_non_call_exceptions);
if (may_trap_p (i3))
place = i3;
else if (i2 && may_trap_p (i2))
@ -11727,8 +11724,6 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2)
can now prove that the instructions can't trap. Drop the
note in this case. */
}
else
abort ();
break;
case REG_ALWAYS_RETURN:
@ -11738,10 +11733,11 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2)
possible for both I2 and I3 to be a call. */
if (CALL_P (i3))
place = i3;
else if (i2 && CALL_P (i2))
place = i2;
else
abort ();
{
gcc_assert (i2 && CALL_P (i2));
place = i2;
}
break;
case REG_UNUSED:
@ -11848,22 +11844,30 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2)
a JUMP_LABEL instead or decrement LABEL_NUSES. */
if (place && JUMP_P (place))
{
if (!JUMP_LABEL (place))
rtx label = JUMP_LABEL (place);
if (!label)
JUMP_LABEL (place) = XEXP (note, 0);
else if (JUMP_LABEL (place) != XEXP (note, 0))
abort ();
else if (LABEL_P (JUMP_LABEL (place)))
LABEL_NUSES (JUMP_LABEL (place))--;
else
{
gcc_assert (label == XEXP (note, 0));
if (LABEL_P (label))
LABEL_NUSES (label)--;
}
place = 0;
}
if (place2 && JUMP_P (place2))
{
if (!JUMP_LABEL (place2))
rtx label = JUMP_LABEL (place2);
if (!label)
JUMP_LABEL (place2) = XEXP (note, 0);
else if (JUMP_LABEL (place2) != XEXP (note, 0))
abort ();
else if (LABEL_P (JUMP_LABEL (place2)))
LABEL_NUSES (JUMP_LABEL (place2))--;
else
{
gcc_assert (label == XEXP (note, 0));
if (LABEL_P (label))
LABEL_NUSES (label)--;
}
place2 = 0;
}
break;
@ -12192,7 +12196,7 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2)
default:
/* Any other notes should not be present at this point in the
compilation. */
abort ();
gcc_unreachable ();
}
if (place)
@ -12348,8 +12352,7 @@ insn_cuid (rtx insn)
&& NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE)
insn = NEXT_INSN (insn);
if (INSN_UID (insn) > max_uid_cuid)
abort ();
gcc_assert (INSN_UID (insn) <= max_uid_cuid);
return INSN_CUID (insn);
}

View File

@ -190,8 +190,7 @@ conflict_graph_add (conflict_graph graph, int reg1, int reg2)
void **slot;
/* A reg cannot conflict with itself. */
if (reg1 == reg2)
abort ();
gcc_assert (reg1 != reg2);
dummy.smaller = smaller;
dummy.larger = larger;
@ -324,10 +323,11 @@ print_conflict (int reg1, int reg2, void *contextp)
is the interesting one. */
if (reg1 == context->reg)
reg = reg2;
else if (reg2 == context->reg)
reg = reg1;
else
abort ();
{
gcc_assert (reg2 == context->reg);
reg = reg1;
}
/* Print the conflict. */
fprintf (context->fp, " %d", reg);

View File

@ -403,8 +403,7 @@ rtl_coverage_counter_ref (unsigned counter, unsigned no)
enum machine_mode mode = mode_for_size (gcov_size, MODE_INT, 0);
rtx ref;
if (no >= fn_n_ctrs[counter] - fn_b_ctrs[counter])
abort ();
gcc_assert (no < fn_n_ctrs[counter] - fn_b_ctrs[counter]);
no += prg_n_ctrs[counter] + fn_b_ctrs[counter];
if (!ctr_labels[counter])
{
@ -428,8 +427,7 @@ tree_coverage_counter_ref (unsigned counter, unsigned no)
{
tree domain_type = TYPE_DOMAIN (TREE_TYPE (tree_ctr_tables[counter]));
if (no >= fn_n_ctrs[counter] - fn_b_ctrs[counter])
abort ();
gcc_assert (no < fn_n_ctrs[counter] - fn_b_ctrs[counter]);
no += prg_n_ctrs[counter] + fn_b_ctrs[counter];
/* "no" here is an array index, scaled to bytes later. */
@ -462,6 +460,7 @@ coverage_checksum_string (unsigned chksum, const char *string)
{
int y;
unsigned seed;
int scan;
for (y = 1; y < 9; y++)
if (!(string[i + y] >= '0' && string[i + y] <= '9')
@ -475,8 +474,8 @@ coverage_checksum_string (unsigned chksum, const char *string)
break;
if (y != 18)
continue;
if (!sscanf (string + i + 10, "%X", &seed))
abort ();
scan = sscanf (string + i + 10, "%X", &seed);
gcc_assert (scan);
if (seed != crc32_string (0, flag_random_seed))
continue;
string = dup = xstrdup (string);

View File

@ -925,8 +925,7 @@ make_new_qty (unsigned int reg, enum machine_mode mode)
struct qty_table_elem *ent;
struct reg_eqv_elem *eqv;
if (next_qty >= max_qty)
abort ();
gcc_assert (next_qty < max_qty);
q = REG_QTY (reg) = next_qty++;
ent = &qty_table[q];
@ -953,8 +952,7 @@ make_regs_eqv (unsigned int new, unsigned int old)
ent = &qty_table[q];
/* Nothing should become eqv until it has a "non-invalid" qty number. */
if (! REGNO_QTY_VALID_P (old))
abort ();
gcc_assert (REGNO_QTY_VALID_P (old));
REG_QTY (new) = q;
firstr = ent->first_reg;
@ -1424,8 +1422,7 @@ insert (rtx x, struct table_elt *classp, unsigned int hash, enum machine_mode mo
/* If X is a register and we haven't made a quantity for it,
something is wrong. */
if (REG_P (x) && ! REGNO_QTY_VALID_P (REGNO (x)))
abort ();
gcc_assert (!REG_P (x) || REGNO_QTY_VALID_P (REGNO (x)));
/* If X is a hard register, show it is being put in the table. */
if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
@ -1832,7 +1829,7 @@ invalidate (rtx x, enum machine_mode full_mode)
return;
default:
abort ();
gcc_unreachable ();
}
}
@ -2334,8 +2331,9 @@ hash_rtx (rtx x, enum machine_mode mode, int *do_not_record_p,
fmt = GET_RTX_FORMAT (code);
for (; i >= 0; i--)
{
if (fmt[i] == 'e')
switch (fmt[i])
{
case 'e':
/* If we are about to do the last recursive call
needed at this level, change it into iteration.
This function is called enough to be worth it. */
@ -2347,24 +2345,29 @@ hash_rtx (rtx x, enum machine_mode mode, int *do_not_record_p,
hash += hash_rtx (XEXP (x, i), 0, do_not_record_p,
hash_arg_in_memory_p, have_reg_qty);
}
break;
else if (fmt[i] == 'E')
for (j = 0; j < XVECLEN (x, i); j++)
{
case 'E':
for (j = 0; j < XVECLEN (x, i); j++)
hash += hash_rtx (XVECEXP (x, i, j), 0, do_not_record_p,
hash_arg_in_memory_p, have_reg_qty);
}
break;
else if (fmt[i] == 's')
hash += hash_rtx_string (XSTR (x, i));
else if (fmt[i] == 'i')
hash += (unsigned int) XINT (x, i);
else if (fmt[i] == '0' || fmt[i] == 't')
/* Unused. */
;
else
abort ();
case 's':
hash += hash_rtx_string (XSTR (x, i));
break;
case 'i':
hash += (unsigned int) XINT (x, i);
break;
case '0': case 't':
/* Unused. */
break;
default:
gcc_unreachable ();
}
}
return hash;
@ -2573,7 +2576,7 @@ exp_equiv_p (rtx x, rtx y, int validate, bool for_gcse)
break;
default:
abort ();
gcc_unreachable ();
}
}
@ -6979,8 +6982,7 @@ cse_basic_block (rtx from, rtx to, struct branch_path *next_branch)
}
}
if (next_qty > max_qty)
abort ();
gcc_assert (next_qty <= max_qty);
free (qty_table + max_reg);
@ -7099,7 +7101,7 @@ count_reg_usage (rtx x, int *counts, int incr)
return;
case INSN_LIST:
abort ();
gcc_unreachable ();
default:
break;
@ -7458,8 +7460,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode)
if (mode != comp_mode)
{
if (! can_change_mode)
abort ();
gcc_assert (can_change_mode);
mode = comp_mode;
PUT_MODE (cc_src, mode);
}
@ -7507,8 +7508,7 @@ cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode)
submode = cse_cc_succs (e->dest, cc_reg, cc_src, false);
if (submode != VOIDmode)
{
if (submode != mode)
abort ();
gcc_assert (submode == mode);
found_equiv = true;
can_change_mode = false;
}
@ -7636,8 +7636,7 @@ cse_condition_code_reg (void)
mode = cse_cc_succs (bb, cc_reg, cc_src, true);
if (mode != VOIDmode)
{
if (mode != GET_MODE (cc_src))
abort ();
gcc_assert (mode == GET_MODE (cc_src));
if (mode != orig_mode)
{
rtx newreg = gen_rtx_REG (mode, REGNO (cc_reg));

View File

@ -235,9 +235,9 @@ entry_and_rtx_equal_p (const void *entry, const void *x_arg)
rtx x = (rtx) x_arg;
enum machine_mode mode = GET_MODE (x);
if (GET_CODE (x) == CONST_INT
|| (mode == VOIDmode && GET_CODE (x) == CONST_DOUBLE))
abort ();
gcc_assert (GET_CODE (x) != CONST_INT
&& (mode != VOIDmode || GET_CODE (x) != CONST_DOUBLE));
if (mode != GET_MODE (v->u.val_rtx))
return 0;
@ -370,8 +370,7 @@ remove_useless_values (void)
htab_traverse (hash_table, discard_useless_values, 0);
if (n_useless_values != 0)
abort ();
gcc_assert (!n_useless_values);
}
/* Return the mode in which a register was last set. If X is not a
@ -524,7 +523,7 @@ rtx_equal_for_cselib_p (rtx x, rtx y)
contain anything but integers and other rtx's,
except for within LABEL_REFs and SYMBOL_REFs. */
default:
abort ();
gcc_unreachable ();
}
}
return 1;
@ -539,8 +538,7 @@ wrap_constant (enum machine_mode mode, rtx x)
if (GET_CODE (x) != CONST_INT
&& (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != VOIDmode))
return x;
if (mode == VOIDmode)
abort ();
gcc_assert (mode != VOIDmode);
return gen_rtx_CONST (mode, x);
}
@ -643,40 +641,54 @@ cselib_hash_rtx (rtx x, enum machine_mode mode, int create)
fmt = GET_RTX_FORMAT (code);
for (; i >= 0; i--)
{
if (fmt[i] == 'e')
switch (fmt[i])
{
rtx tem = XEXP (x, i);
unsigned int tem_hash = cselib_hash_rtx (tem, 0, create);
if (tem_hash == 0)
return 0;
hash += tem_hash;
}
else if (fmt[i] == 'E')
for (j = 0; j < XVECLEN (x, i); j++)
case 'e':
{
unsigned int tem_hash = cselib_hash_rtx (XVECEXP (x, i, j), 0, create);
rtx tem = XEXP (x, i);
unsigned int tem_hash = cselib_hash_rtx (tem, 0, create);
if (tem_hash == 0)
return 0;
hash += tem_hash;
}
else if (fmt[i] == 's')
{
const unsigned char *p = (const unsigned char *) XSTR (x, i);
break;
case 'E':
for (j = 0; j < XVECLEN (x, i); j++)
{
unsigned int tem_hash
= cselib_hash_rtx (XVECEXP (x, i, j), 0, create);
if (tem_hash == 0)
return 0;
hash += tem_hash;
}
break;
if (p)
while (*p)
hash += *p++;
case 's':
{
const unsigned char *p = (const unsigned char *) XSTR (x, i);
if (p)
while (*p)
hash += *p++;
break;
}
case 'i':
hash += XINT (x, i);
break;
case '0':
case 't':
/* unused */
break;
default:
gcc_unreachable ();
}
else if (fmt[i] == 'i')
hash += XINT (x, i);
else if (fmt[i] == '0' || fmt[i] == 't')
/* unused */;
else
abort ();
}
return hash ? hash : 1 + (unsigned int) GET_CODE (x);
@ -690,10 +702,7 @@ new_cselib_val (unsigned int value, enum machine_mode mode)
{
cselib_val *e = pool_alloc (cselib_val_pool);
#ifdef ENABLE_CHECKING
if (value == 0)
abort ();
#endif
gcc_assert (value);
e->value = value;
/* We use custom method to allocate this RTL construct because it accounts
@ -799,7 +808,7 @@ cselib_subst_to_values (rtx x)
if (GET_MODE (l->elt->u.val_rtx) == GET_MODE (x))
return l->elt->u.val_rtx;
abort ();
gcc_unreachable ();
case MEM:
e = cselib_lookup_mem (x, 0);
@ -963,9 +972,8 @@ cselib_invalidate_regno (unsigned int regno, enum machine_mode mode)
unsigned int i;
/* If we see pseudos after reload, something is _wrong_. */
if (reload_completed && regno >= FIRST_PSEUDO_REGISTER
&& reg_renumber[regno] >= 0)
abort ();
gcc_assert (!reload_completed || regno < FIRST_PSEUDO_REGISTER
|| reg_renumber[regno] < 0);
/* Determine the range of registers that must be invalidated. For
pseudos, only REGNO is affected. For hard regs, we must take MODE
@ -973,8 +981,7 @@ cselib_invalidate_regno (unsigned int regno, enum machine_mode mode)
if they contain values that overlap REGNO. */
if (regno < FIRST_PSEUDO_REGISTER)
{
if (mode == VOIDmode)
abort ();
gcc_assert (mode != VOIDmode);
if (regno < max_value_regs)
i = 0;
@ -1188,11 +1195,9 @@ cselib_record_set (rtx dest, cselib_val *src_elt, cselib_val *dest_addr_elt)
}
else
{
if (REG_VALUES (dreg)->elt == 0)
REG_VALUES (dreg)->elt = src_elt;
else
/* The register should have been invalidated. */
abort ();
/* The register should have been invalidated. */
gcc_assert (REG_VALUES (dreg)->elt == 0);
REG_VALUES (dreg)->elt = src_elt;
}
if (src_elt->locs == 0)