lambda-mat.c (lambda_matrix_inverse_hard): Use gcc_assert and gcc_unreachable instead of abort.
* lambda-mat.c (lambda_matrix_inverse_hard): Use gcc_assert and gcc_unreachable instead of abort. * lambda.h (lambda_vector_min_nz): Likewise. * langhooks.c lhd_set_decl_assembler_name, lhd_can_use_bit_fields_p, lhd_incomplete_type_error, lhd_expand_expr, lhd_types_compatible_p, lhd_gimplify_expr): Likewise. * lcm.c (optimize_mode_switching): Likewise. * local-alloc.c (update_equiv_regs, block_alloc, find_free_reg): Likewise. * loop-doloop.c (doloop_modify): Likewise. * loop-invariant.c (record_use): Likewise. * loop-iv.c (get_biv_step_1, get_biv_step, iv_analyze, get_iv_value, canon_condition, simplify_using_condition, simplify_using_initial_values, shorten_into_mode, canonicalize_iv_subregs, iv_number_of_iterations): Likewise. * loop-unroll.c (unroll_and_peel_loops, peel_loop_completely, unroll_loop_constant_iterations, unroll_loop_runtime_iterations, peel_loop_simple, unroll_loop_stupid): Likewise. * loop-unswitch.c (compare_and_jump_seq, unswitch_single_loop, unswitch_loop): Likewise. * loop.c (gen_prefetch, loop_optimize, rtx_equal_for_loop_p, move_movables, replace_call_address, find_and_verify_loops, rtx_equal_for_prefetch_p, record_giv, general_induction_var, simplify_giv_expr, check_ext_dependent_givs, try_copy_prop, loop_giv_dump): Likewise. * loop.h (INSN_LUID): Likewise. * modulo-sched.c (normalize_sched_times, generate_prolog_epilog, sms_schedule, check_nodes_order): Likewise. * optabs.c (gen_conditional, add_equal_note, expand_simple_binop, expand_twoval_unop, expand_twoval_binop, expand_twoval_binop_libfunc, expand_simple_unop, emit_no_conflict_block, prepare_cmp_insn, emit_cmp_and_jump_insn_1, emit_cmp_and_jump_insns, prepare_float_lib_cmp, gen_add2_insn, have_add2_insn, gen_sub2_insn, have_sub2_insn, expand_float, expand_fix, debug_optab_libfuncs): Likewise. * opts.c (common_handle_option): Likewise. * params.c (set_param_value): Likewise. * passes.c (open_dump_file, rest_of_handle_final): Likewise. * postreload-gcse.c (expr_equiv_p, oprs_unchanged_p,hash_scan_set, reg_set_between_after_reload_p, reg_used_between_after_reload_p, get_avail_load_store_reg, eliminate_partially_redundant_load): Likewise. * postreload.c (reload_cse_simplify_set, reload_combine_note_use): Likewise. * predict.c (tree_predicted_by_p, expected_value_to_br_prob, propagate_freq, expensive_function_p): Likewise. * pretty-print.c (pp_base_format_text) * profile.c (instrument_edges, instrument_values, compute_branch_probabilities, branch_prob, union_groups, end_branch_prob, tree_register_profile_hooks): Likewise. From-SVN: r87285
This commit is contained in:
parent
b829f3fac7
commit
1c43d3ca81
@ -1,3 +1,56 @@
|
||||
2004-09-10 Giovanni Bajo <giovannibajo@gcc.gnu.org>
|
||||
|
||||
* lambda-mat.c (lambda_matrix_inverse_hard): Use gcc_assert
|
||||
and gcc_unreachable instead of abort.
|
||||
* lambda.h (lambda_vector_min_nz): Likewise.
|
||||
* langhooks.c lhd_set_decl_assembler_name, lhd_can_use_bit_fields_p,
|
||||
lhd_incomplete_type_error, lhd_expand_expr, lhd_types_compatible_p,
|
||||
lhd_gimplify_expr): Likewise.
|
||||
* lcm.c (optimize_mode_switching): Likewise.
|
||||
* local-alloc.c (update_equiv_regs, block_alloc, find_free_reg):
|
||||
Likewise.
|
||||
* loop-doloop.c (doloop_modify): Likewise.
|
||||
* loop-invariant.c (record_use): Likewise.
|
||||
* loop-iv.c (get_biv_step_1, get_biv_step, iv_analyze, get_iv_value,
|
||||
canon_condition, simplify_using_condition,
|
||||
simplify_using_initial_values, shorten_into_mode,
|
||||
canonicalize_iv_subregs, iv_number_of_iterations): Likewise.
|
||||
* loop-unroll.c (unroll_and_peel_loops, peel_loop_completely,
|
||||
unroll_loop_constant_iterations, unroll_loop_runtime_iterations,
|
||||
peel_loop_simple, unroll_loop_stupid): Likewise.
|
||||
* loop-unswitch.c (compare_and_jump_seq, unswitch_single_loop,
|
||||
unswitch_loop): Likewise.
|
||||
* loop.c (gen_prefetch, loop_optimize, rtx_equal_for_loop_p,
|
||||
move_movables, replace_call_address, find_and_verify_loops,
|
||||
rtx_equal_for_prefetch_p, record_giv, general_induction_var,
|
||||
simplify_giv_expr, check_ext_dependent_givs, try_copy_prop,
|
||||
loop_giv_dump): Likewise.
|
||||
* loop.h (INSN_LUID): Likewise.
|
||||
* modulo-sched.c (normalize_sched_times, generate_prolog_epilog,
|
||||
sms_schedule, check_nodes_order): Likewise.
|
||||
* optabs.c (gen_conditional, add_equal_note, expand_simple_binop,
|
||||
expand_twoval_unop, expand_twoval_binop, expand_twoval_binop_libfunc,
|
||||
expand_simple_unop, emit_no_conflict_block, prepare_cmp_insn,
|
||||
emit_cmp_and_jump_insn_1, emit_cmp_and_jump_insns,
|
||||
prepare_float_lib_cmp, gen_add2_insn, have_add2_insn, gen_sub2_insn,
|
||||
have_sub2_insn, expand_float, expand_fix, debug_optab_libfuncs):
|
||||
Likewise.
|
||||
* opts.c (common_handle_option): Likewise.
|
||||
* params.c (set_param_value): Likewise.
|
||||
* passes.c (open_dump_file, rest_of_handle_final): Likewise.
|
||||
* postreload-gcse.c (expr_equiv_p, oprs_unchanged_p,hash_scan_set,
|
||||
reg_set_between_after_reload_p, reg_used_between_after_reload_p,
|
||||
get_avail_load_store_reg, eliminate_partially_redundant_load):
|
||||
Likewise.
|
||||
* postreload.c (reload_cse_simplify_set, reload_combine_note_use):
|
||||
Likewise.
|
||||
* predict.c (tree_predicted_by_p, expected_value_to_br_prob,
|
||||
propagate_freq, expensive_function_p): Likewise.
|
||||
* pretty-print.c (pp_base_format_text)
|
||||
* profile.c (instrument_edges, instrument_values,
|
||||
compute_branch_probabilities, branch_prob, union_groups,
|
||||
end_branch_prob, tree_register_profile_hooks): Likewise.
|
||||
|
||||
2004-09-10 Jan Hubicka <jh@suse.cz>
|
||||
|
||||
* tree-ssa-dce.c (remove_dead_stmt): Update profile.
|
||||
@ -7111,7 +7164,7 @@
|
||||
* config/i386/xmmintrin.h: Include <mm_malloc.h>.
|
||||
|
||||
2004-08-03 H.J. Lu <hongjiu.lu@intel.com>
|
||||
Tanguy Fautrà <tfautre@pandora.be>
|
||||
Tanguy Fautrà <tfautre@pandora.be>
|
||||
|
||||
* config/i386/pmm_malloc.h: New file.
|
||||
|
||||
|
@ -379,8 +379,7 @@ lambda_matrix_inverse_hard (lambda_matrix mat, lambda_matrix inv, int n)
|
||||
diagonal = row[j];
|
||||
|
||||
/* If the matrix is singular, abort. */
|
||||
if (diagonal == 0)
|
||||
abort ();
|
||||
gcc_assert (diagonal != 0);
|
||||
|
||||
determinant = determinant * diagonal;
|
||||
|
||||
|
@ -290,10 +290,7 @@ lambda_vector_min_nz (lambda_vector vec1, int n, int start)
|
||||
{
|
||||
int j;
|
||||
int min = -1;
|
||||
#ifdef ENABLE_CHECKING
|
||||
if (start > n)
|
||||
abort ();
|
||||
#endif
|
||||
gcc_assert (start <= n);
|
||||
for (j = start; j < n; j++)
|
||||
{
|
||||
if (vec1[j])
|
||||
@ -301,8 +298,7 @@ lambda_vector_min_nz (lambda_vector vec1, int n, int start)
|
||||
min = j;
|
||||
}
|
||||
|
||||
if (min < 0)
|
||||
abort ();
|
||||
gcc_assert (min >= 0);
|
||||
|
||||
return min;
|
||||
}
|
||||
|
@ -156,38 +156,35 @@ lhd_set_decl_assembler_name (tree decl)
|
||||
/* The language-independent code should never use the
|
||||
DECL_ASSEMBLER_NAME for lots of DECLs. Only FUNCTION_DECLs and
|
||||
VAR_DECLs for variables with static storage duration need a real
|
||||
DECL_ASSEMBLER_NAME. */
|
||||
if (TREE_CODE (decl) == FUNCTION_DECL
|
||||
|| (TREE_CODE (decl) == VAR_DECL
|
||||
&& (TREE_STATIC (decl)
|
||||
|| DECL_EXTERNAL (decl)
|
||||
|| TREE_PUBLIC (decl))))
|
||||
{
|
||||
/* By default, assume the name to use in assembly code is the
|
||||
same as that used in the source language. (That's correct
|
||||
for C, and GCC used to set DECL_ASSEMBLER_NAME to the same
|
||||
value as DECL_NAME in build_decl, so this choice provides
|
||||
backwards compatibility with existing front-ends.
|
||||
DECL_ASSEMBLER_NAME.
|
||||
Nobody should ever be asking for the DECL_ASSEMBLER_NAME of
|
||||
these DECLs -- unless they're in language-dependent code, in
|
||||
which case set_decl_assembler_name hook should handle things. */
|
||||
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL
|
||||
|| (TREE_CODE (decl) == VAR_DECL
|
||||
&& (TREE_STATIC (decl)
|
||||
|| DECL_EXTERNAL (decl)
|
||||
|| TREE_PUBLIC (decl))));
|
||||
|
||||
Can't use just the variable's own name for a variable whose
|
||||
scope is less than the whole compilation. Concatenate a
|
||||
distinguishing number - we use the DECL_UID. */
|
||||
if (TREE_PUBLIC (decl) || DECL_CONTEXT (decl) == NULL_TREE)
|
||||
SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl));
|
||||
else
|
||||
{
|
||||
const char *name = IDENTIFIER_POINTER (DECL_NAME (decl));
|
||||
char *label;
|
||||
/* By default, assume the name to use in assembly code is the
|
||||
same as that used in the source language. (That's correct
|
||||
for C, and GCC used to set DECL_ASSEMBLER_NAME to the same
|
||||
value as DECL_NAME in build_decl, so this choice provides
|
||||
backwards compatibility with existing front-ends.
|
||||
|
||||
ASM_FORMAT_PRIVATE_NAME (label, name, DECL_UID (decl));
|
||||
SET_DECL_ASSEMBLER_NAME (decl, get_identifier (label));
|
||||
}
|
||||
}
|
||||
Can't use just the variable's own name for a variable whose
|
||||
scope is less than the whole compilation. Concatenate a
|
||||
distinguishing number - we use the DECL_UID. */
|
||||
if (TREE_PUBLIC (decl) || DECL_CONTEXT (decl) == NULL_TREE)
|
||||
SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl));
|
||||
else
|
||||
/* Nobody should ever be asking for the DECL_ASSEMBLER_NAME of
|
||||
these DECLs -- unless they're in language-dependent code, in
|
||||
which case set_decl_assembler_name hook should handle things. */
|
||||
abort ();
|
||||
{
|
||||
const char *name = IDENTIFIER_POINTER (DECL_NAME (decl));
|
||||
char *label;
|
||||
|
||||
ASM_FORMAT_PRIVATE_NAME (label, name, DECL_UID (decl));
|
||||
SET_DECL_ASSEMBLER_NAME (decl, get_identifier (label));
|
||||
}
|
||||
}
|
||||
|
||||
/* By default we always allow bit-field based optimizations. */
|
||||
@ -201,7 +198,7 @@ lhd_can_use_bit_fields_p (void)
|
||||
tree
|
||||
lhd_type_promotes_to (tree ARG_UNUSED (type))
|
||||
{
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
/* Registration of machine- or os-specific builtin types. */
|
||||
@ -215,10 +212,7 @@ lhd_register_builtin_type (tree ARG_UNUSED (type),
|
||||
void
|
||||
lhd_incomplete_type_error (tree ARG_UNUSED (value), tree type)
|
||||
{
|
||||
if (TREE_CODE (type) == ERROR_MARK)
|
||||
return;
|
||||
|
||||
abort ();
|
||||
gcc_assert (TREE_CODE (type) == ERROR_MARK);
|
||||
}
|
||||
|
||||
/* Provide a default routine for alias sets that always returns -1. This
|
||||
@ -247,7 +241,7 @@ lhd_expand_expr (tree ARG_UNUSED (t), rtx ARG_UNUSED (r),
|
||||
int ARG_UNUSED (em),
|
||||
rtx * ARG_UNUSED (a))
|
||||
{
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
/* The default language-specific function for expanding a decl. After
|
||||
@ -287,7 +281,7 @@ lhd_types_compatible_p (tree x, tree y)
|
||||
information associated to common tree codes. If a tree node is
|
||||
completely handled within this function, it should set *SUBTREES to
|
||||
0, so that generic handling isn't attempted. For language-specific
|
||||
tree codes, generic handling would abort(), so make sure it is set
|
||||
tree codes, generic handling would assert out, so make sure it is set
|
||||
properly. Both SUBTREES and *SUBTREES is guaranteed to be nonzero
|
||||
when the function is called. */
|
||||
|
||||
@ -442,8 +436,7 @@ lhd_gimplify_expr (tree *expr_p ATTRIBUTE_UNUSED, tree *pre_p ATTRIBUTE_UNUSED,
|
||||
size_t
|
||||
lhd_tree_size (enum tree_code c ATTRIBUTE_UNUSED)
|
||||
{
|
||||
abort ();
|
||||
return 0;
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
/* Return true if decl, which is a function decl, may be called by a
|
||||
|
32
gcc/lcm.c
32
gcc/lcm.c
@ -1029,8 +1029,7 @@ optimize_mode_switching (FILE *file)
|
||||
{
|
||||
regset live_at_end = eg->src->global_live_at_end;
|
||||
|
||||
if (pre_exit)
|
||||
abort ();
|
||||
gcc_assert (!pre_exit);
|
||||
pre_exit = split_edge (eg);
|
||||
COPY_REG_SET (pre_exit->global_live_at_start, live_at_end);
|
||||
COPY_REG_SET (pre_exit->global_live_at_end, live_at_end);
|
||||
@ -1208,21 +1207,22 @@ optimize_mode_switching (FILE *file)
|
||||
emited = true;
|
||||
if (JUMP_P (BB_END (src_bb)))
|
||||
emit_insn_before (mode_set, BB_END (src_bb));
|
||||
/* It doesn't make sense to switch to normal mode
|
||||
after a CALL_INSN, so we're going to abort if we
|
||||
find one. The cases in which a CALL_INSN may
|
||||
have an abnormal edge are sibcalls and EH edges.
|
||||
In the case of sibcalls, the dest basic-block is
|
||||
the EXIT_BLOCK, that runs in normal mode; it is
|
||||
assumed that a sibcall insn requires normal mode
|
||||
itself, so no mode switch would be required after
|
||||
the call (it wouldn't make sense, anyway). In
|
||||
the case of EH edges, EH entry points also start
|
||||
in normal mode, so a similar reasoning applies. */
|
||||
else if (NONJUMP_INSN_P (BB_END (src_bb)))
|
||||
emit_insn_after (mode_set, BB_END (src_bb));
|
||||
else
|
||||
abort ();
|
||||
{
|
||||
/* It doesn't make sense to switch to normal mode
|
||||
after a CALL_INSN, so we're going to abort if we
|
||||
find one. The cases in which a CALL_INSN may
|
||||
have an abnormal edge are sibcalls and EH edges.
|
||||
In the case of sibcalls, the dest basic-block is
|
||||
the EXIT_BLOCK, that runs in normal mode; it is
|
||||
assumed that a sibcall insn requires normal mode
|
||||
itself, so no mode switch would be required after
|
||||
the call (it wouldn't make sense, anyway). In
|
||||
the case of EH edges, EH entry points also start
|
||||
in normal mode, so a similar reasoning applies. */
|
||||
gcc_assert (NONJUMP_INSN_P (BB_END (src_bb)));
|
||||
emit_insn_after (mode_set, BB_END (src_bb));
|
||||
}
|
||||
bb_info[j][src_bb->index].computing = mode;
|
||||
RESET_BIT (transp[src_bb->index], j);
|
||||
}
|
||||
|
@ -1028,9 +1028,9 @@ update_equiv_regs (void)
|
||||
once and used once. (If it were only set, but not used,
|
||||
flow would have deleted the setting insns.) Hence
|
||||
there can only be one insn in reg_equiv[REGNO].init_insns. */
|
||||
if (reg_equiv[regno].init_insns == NULL_RTX
|
||||
|| XEXP (reg_equiv[regno].init_insns, 1) != NULL_RTX)
|
||||
abort ();
|
||||
gcc_assert (reg_equiv[regno].init_insns != NULL_RTX);
|
||||
gcc_assert (XEXP (reg_equiv[regno].init_insns, 1)
|
||||
== NULL_RTX);
|
||||
equiv_insn = XEXP (reg_equiv[regno].init_insns, 0);
|
||||
|
||||
/* We may not move instructions that can throw, since
|
||||
@ -1188,8 +1188,10 @@ block_alloc (int b)
|
||||
while (1)
|
||||
{
|
||||
if (!NOTE_P (insn))
|
||||
if (++insn_count > max_uid)
|
||||
abort ();
|
||||
{
|
||||
++insn_count;
|
||||
gcc_assert (insn_count <= max_uid);
|
||||
}
|
||||
if (insn == BB_HEAD (BASIC_BLOCK (b)))
|
||||
break;
|
||||
insn = PREV_INSN (insn);
|
||||
@ -2110,8 +2112,8 @@ find_free_reg (enum reg_class class, enum machine_mode mode, int qtyno,
|
||||
#endif
|
||||
|
||||
/* Validate our parameters. */
|
||||
if (born_index < 0 || born_index > dead_index)
|
||||
abort ();
|
||||
gcc_assert (born_index >= 0);
|
||||
gcc_assert (born_index < dead_index);
|
||||
|
||||
/* Don't let a pseudo live in a reg across a function call
|
||||
if we might get a nonlocal goto. */
|
||||
|
@ -303,16 +303,16 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
|
||||
increment_count = true;
|
||||
noloop = const1_rtx;
|
||||
}
|
||||
else if (XEXP (condition, 1) == const0_rtx)
|
||||
noloop = const0_rtx;
|
||||
else
|
||||
abort ();
|
||||
{
|
||||
gcc_assert (XEXP (condition, 1) == const0_rtx);
|
||||
noloop = const0_rtx;
|
||||
}
|
||||
break;
|
||||
|
||||
case GE:
|
||||
/* Currently only GE tests against zero are supported. */
|
||||
if (XEXP (condition, 1) != const0_rtx)
|
||||
abort ();
|
||||
gcc_assert (XEXP (condition, 1) == const0_rtx);
|
||||
|
||||
noloop = constm1_rtx;
|
||||
|
||||
@ -327,9 +327,9 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
|
||||
nonneg = 1;
|
||||
break;
|
||||
|
||||
/* Abort if an invalid doloop pattern has been generated. */
|
||||
default:
|
||||
abort ();
|
||||
/* Abort if an invalid doloop pattern has been generated. */
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
if (increment_count)
|
||||
|
@ -353,8 +353,7 @@ record_use (struct def *def, rtx *use, rtx insn)
|
||||
|
||||
if (GET_CODE (*use) == SUBREG)
|
||||
use = &SUBREG_REG (*use);
|
||||
if (!REG_P (*use))
|
||||
abort ();
|
||||
gcc_assert (REG_P (*use));
|
||||
|
||||
u->pos = use;
|
||||
u->insn = insn;
|
||||
|
@ -793,16 +793,15 @@ get_biv_step_1 (rtx insn, rtx reg,
|
||||
|
||||
case SIGN_EXTEND:
|
||||
case ZERO_EXTEND:
|
||||
if (GET_MODE (op0) != *inner_mode
|
||||
|| *extend != UNKNOWN
|
||||
|| *outer_step != const0_rtx)
|
||||
abort ();
|
||||
gcc_assert (GET_MODE (op0) == *inner_mode);
|
||||
gcc_assert (*extend == UNKNOWN);
|
||||
gcc_assert (*outer_step == const0_rtx);
|
||||
|
||||
*extend = code;
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -826,17 +825,9 @@ get_biv_step (rtx reg, rtx *inner_step, enum machine_mode *inner_mode,
|
||||
outer_step))
|
||||
return false;
|
||||
|
||||
if (*inner_mode != *outer_mode
|
||||
&& *extend == UNKNOWN)
|
||||
abort ();
|
||||
|
||||
if (*inner_mode == *outer_mode
|
||||
&& *extend != UNKNOWN)
|
||||
abort ();
|
||||
|
||||
if (*inner_mode == *outer_mode
|
||||
&& *outer_step != const0_rtx)
|
||||
abort ();
|
||||
gcc_assert (*inner_mode == *outer_mode || *extend != UNKNOWN);
|
||||
gcc_assert (*inner_mode != *outer_mode || *extend == UNKNOWN);
|
||||
gcc_assert (*inner_mode != *outer_mode || *outer_step == const0_rtx);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1085,8 +1076,7 @@ iv_analyze (rtx insn, rtx def, struct rtx_iv *iv)
|
||||
mby = XEXP (rhs, 1);
|
||||
if (!CONSTANT_P (mby))
|
||||
{
|
||||
if (!CONSTANT_P (op0))
|
||||
abort ();
|
||||
gcc_assert (CONSTANT_P (op0));
|
||||
tmp = op0;
|
||||
op0 = mby;
|
||||
mby = tmp;
|
||||
@ -1094,14 +1084,13 @@ iv_analyze (rtx insn, rtx def, struct rtx_iv *iv)
|
||||
break;
|
||||
|
||||
case ASHIFT:
|
||||
if (CONSTANT_P (XEXP (rhs, 0)))
|
||||
abort ();
|
||||
gcc_assert (!CONSTANT_P (XEXP (rhs, 0)));
|
||||
op0 = XEXP (rhs, 0);
|
||||
mby = XEXP (rhs, 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
amode = GET_MODE (rhs);
|
||||
@ -1192,8 +1181,7 @@ get_iv_value (struct rtx_iv *iv, rtx iteration)
|
||||
|
||||
/* We would need to generate some if_then_else patterns, and so far
|
||||
it is not needed anywhere. */
|
||||
if (iv->first_special)
|
||||
abort ();
|
||||
gcc_assert (!iv->first_special);
|
||||
|
||||
if (iv->step != const0_rtx && iteration != const0_rtx)
|
||||
val = simplify_gen_binary (PLUS, iv->extend_mode, iv->base,
|
||||
@ -1529,8 +1517,7 @@ canon_condition (rtx cond)
|
||||
mode = GET_MODE (op0);
|
||||
if (mode == VOIDmode)
|
||||
mode = GET_MODE (op1);
|
||||
if (mode == VOIDmode)
|
||||
abort ();
|
||||
gcc_assert (mode != VOIDmode);
|
||||
|
||||
if (GET_CODE (op1) == CONST_INT
|
||||
&& GET_MODE_CLASS (mode) != MODE_CC
|
||||
@ -1659,20 +1646,21 @@ simplify_using_condition (rtx cond, rtx *expr, regset altered)
|
||||
static void
|
||||
eliminate_implied_condition (enum rtx_code op, rtx a, rtx *b)
|
||||
{
|
||||
if (op == AND)
|
||||
switch (op)
|
||||
{
|
||||
case AND:
|
||||
/* If A implies *B, we may replace *B by true. */
|
||||
if (implies_p (a, *b))
|
||||
*b = const_true_rtx;
|
||||
}
|
||||
else if (op == IOR)
|
||||
{
|
||||
break;
|
||||
case IOR:
|
||||
/* If *B implies A, we may replace *B by false. */
|
||||
if (implies_p (*b, a))
|
||||
*b = const0_rtx;
|
||||
break;
|
||||
default:
|
||||
gcc_unreachable ();
|
||||
}
|
||||
else
|
||||
abort ();
|
||||
}
|
||||
|
||||
/* Eliminates the conditions in TAIL that are implied by HEAD. OP is the
|
||||
@ -1714,18 +1702,19 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
|
||||
|
||||
eliminate_implied_conditions (op, &head, tail);
|
||||
|
||||
if (op == AND)
|
||||
switch (op)
|
||||
{
|
||||
case AND:
|
||||
neutral = const_true_rtx;
|
||||
aggr = const0_rtx;
|
||||
}
|
||||
else if (op == IOR)
|
||||
{
|
||||
break;
|
||||
case IOR:
|
||||
neutral = const0_rtx;
|
||||
aggr = const_true_rtx;
|
||||
break;
|
||||
default:
|
||||
gcc_unreachable ();
|
||||
}
|
||||
else
|
||||
abort ();
|
||||
|
||||
simplify_using_initial_values (loop, UNKNOWN, &head);
|
||||
if (head == aggr)
|
||||
@ -1753,8 +1742,7 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
|
||||
return;
|
||||
}
|
||||
|
||||
if (op != UNKNOWN)
|
||||
abort ();
|
||||
gcc_assert (op == UNKNOWN);
|
||||
|
||||
e = loop_preheader_edge (loop);
|
||||
if (e->src == ENTRY_BLOCK_PTR)
|
||||
@ -1856,7 +1844,7 @@ shorten_into_mode (struct rtx_iv *iv, enum machine_mode mode,
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
iv->mode = mode;
|
||||
@ -1914,7 +1902,7 @@ canonicalize_iv_subregs (struct rtx_iv *iv0, struct rtx_iv *iv1,
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
/* Values of both variables should be computed in the same mode. These
|
||||
@ -2013,15 +2001,13 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition,
|
||||
desc->niter_max = 0;
|
||||
|
||||
cond = GET_CODE (condition);
|
||||
if (!COMPARISON_P (condition))
|
||||
abort ();
|
||||
gcc_assert (COMPARISON_P (condition));
|
||||
|
||||
mode = GET_MODE (XEXP (condition, 0));
|
||||
if (mode == VOIDmode)
|
||||
mode = GET_MODE (XEXP (condition, 1));
|
||||
/* The constant comparisons should be folded. */
|
||||
if (mode == VOIDmode)
|
||||
abort ();
|
||||
gcc_assert (mode != VOIDmode);
|
||||
|
||||
/* We only handle integers or pointers. */
|
||||
if (GET_MODE_CLASS (mode) != MODE_INT
|
||||
|
@ -116,7 +116,7 @@ unroll_and_peel_loops (struct loops *loops, int flags)
|
||||
{
|
||||
case LPT_PEEL_COMPLETELY:
|
||||
/* Already done. */
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
case LPT_PEEL_SIMPLE:
|
||||
peel_loop_simple (loops, loop);
|
||||
break;
|
||||
@ -133,7 +133,7 @@ unroll_and_peel_loops (struct loops *loops, int flags)
|
||||
check = false;
|
||||
break;
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
if (check)
|
||||
{
|
||||
@ -433,6 +433,8 @@ peel_loop_completely (struct loops *loops, struct loop *loop)
|
||||
|
||||
if (npeel)
|
||||
{
|
||||
int ok;
|
||||
|
||||
wont_exit = sbitmap_alloc (npeel + 1);
|
||||
sbitmap_ones (wont_exit);
|
||||
RESET_BIT (wont_exit, 0);
|
||||
@ -442,11 +444,12 @@ peel_loop_completely (struct loops *loops, struct loop *loop)
|
||||
remove_edges = xcalloc (npeel, sizeof (edge));
|
||||
n_remove_edges = 0;
|
||||
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, npeel,
|
||||
wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, npeel,
|
||||
wont_exit, desc->out_edge,
|
||||
remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
free (wont_exit);
|
||||
|
||||
@ -597,11 +600,12 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop)
|
||||
unsigned max_unroll = loop->lpt_decision.times;
|
||||
struct niter_desc *desc = get_simple_loop_desc (loop);
|
||||
bool exit_at_end = loop_exit_at_end_p (loop);
|
||||
int ok;
|
||||
|
||||
niter = desc->niter;
|
||||
|
||||
if (niter <= max_unroll + 1)
|
||||
abort (); /* Should not get here (such loop should be peeled instead). */
|
||||
/* Should not assert out here (such loop should be peeled instead). */
|
||||
gcc_assert (niter > max_unroll + 1);
|
||||
|
||||
exit_mod = niter % (max_unroll + 1);
|
||||
|
||||
@ -627,12 +631,14 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop)
|
||||
|
||||
if (exit_mod)
|
||||
{
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
int ok;
|
||||
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, exit_mod,
|
||||
wont_exit, desc->out_edge,
|
||||
remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
desc->noloop_assumptions = NULL_RTX;
|
||||
desc->niter -= exit_mod;
|
||||
@ -655,15 +661,18 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop)
|
||||
if (exit_mod != max_unroll
|
||||
|| desc->noloop_assumptions)
|
||||
{
|
||||
int ok;
|
||||
|
||||
RESET_BIT (wont_exit, 0);
|
||||
if (desc->noloop_assumptions)
|
||||
RESET_BIT (wont_exit, 1);
|
||||
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, exit_mod + 1,
|
||||
wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, exit_mod + 1,
|
||||
wont_exit, desc->out_edge,
|
||||
remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
desc->niter -= exit_mod + 1;
|
||||
desc->niter_max -= exit_mod + 1;
|
||||
@ -677,11 +686,12 @@ unroll_loop_constant_iterations (struct loops *loops, struct loop *loop)
|
||||
}
|
||||
|
||||
/* Now unroll the loop. */
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
|
||||
loops, max_unroll,
|
||||
wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
|
||||
loops, max_unroll,
|
||||
wont_exit, desc->out_edge,
|
||||
remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
free (wont_exit);
|
||||
|
||||
@ -842,6 +852,7 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
|
||||
unsigned max_unroll = loop->lpt_decision.times;
|
||||
struct niter_desc *desc = get_simple_loop_desc (loop);
|
||||
bool exit_at_end = loop_exit_at_end_p (loop);
|
||||
int ok;
|
||||
|
||||
/* Remember blocks whose dominators will have to be updated. */
|
||||
dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
|
||||
@ -916,11 +927,12 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
|
||||
&& !desc->noloop_assumptions)
|
||||
SET_BIT (wont_exit, 1);
|
||||
ezc_swtch = loop_preheader_edge (loop)->src;
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, 1,
|
||||
wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, 1,
|
||||
wont_exit, desc->out_edge,
|
||||
remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
/* Record the place where switch will be built for preconditioning. */
|
||||
swtch = loop_split_edge_with (loop_preheader_edge (loop),
|
||||
@ -932,11 +944,12 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
|
||||
sbitmap_zero (wont_exit);
|
||||
if (i != n_peel - 1 || !last_may_exit)
|
||||
SET_BIT (wont_exit, 1);
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, 1,
|
||||
wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, 1,
|
||||
wont_exit, desc->out_edge,
|
||||
remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
/* Create item for switch. */
|
||||
j = n_peel - i - (extra_zero_check ? 0 : 1);
|
||||
@ -979,11 +992,12 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
|
||||
sbitmap_ones (wont_exit);
|
||||
RESET_BIT (wont_exit, may_exit_copy);
|
||||
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
|
||||
loops, max_unroll,
|
||||
wont_exit, desc->out_edge, remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
|
||||
loops, max_unroll,
|
||||
wont_exit, desc->out_edge,
|
||||
remove_edges, &n_remove_edges,
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
free (wont_exit);
|
||||
|
||||
@ -1013,8 +1027,7 @@ unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop)
|
||||
preconditioning and the fact that the value must be valid at entry
|
||||
of the loop. After passing through the above code, we see that
|
||||
the correct new number of iterations is this: */
|
||||
if (desc->const_iter)
|
||||
abort ();
|
||||
gcc_assert (!desc->const_iter);
|
||||
desc->niter_expr =
|
||||
simplify_gen_binary (UDIV, desc->mode, old_niter, GEN_INT (max_unroll + 1));
|
||||
desc->niter_max /= max_unroll + 1;
|
||||
@ -1138,14 +1151,16 @@ peel_loop_simple (struct loops *loops, struct loop *loop)
|
||||
sbitmap wont_exit;
|
||||
unsigned npeel = loop->lpt_decision.times;
|
||||
struct niter_desc *desc = get_simple_loop_desc (loop);
|
||||
int ok;
|
||||
|
||||
wont_exit = sbitmap_alloc (npeel + 1);
|
||||
sbitmap_zero (wont_exit);
|
||||
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, npeel, wont_exit, NULL, NULL, NULL,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
|
||||
loops, npeel, wont_exit,
|
||||
NULL, NULL, NULL,
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
free (wont_exit);
|
||||
|
||||
@ -1271,14 +1286,16 @@ unroll_loop_stupid (struct loops *loops, struct loop *loop)
|
||||
sbitmap wont_exit;
|
||||
unsigned nunroll = loop->lpt_decision.times;
|
||||
struct niter_desc *desc = get_simple_loop_desc (loop);
|
||||
int ok;
|
||||
|
||||
wont_exit = sbitmap_alloc (nunroll + 1);
|
||||
sbitmap_zero (wont_exit);
|
||||
|
||||
if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
|
||||
loops, nunroll, wont_exit, NULL, NULL, NULL,
|
||||
DLTHE_FLAG_UPDATE_FREQ))
|
||||
abort ();
|
||||
ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
|
||||
loops, nunroll, wont_exit,
|
||||
NULL, NULL, NULL,
|
||||
DLTHE_FLAG_UPDATE_FREQ);
|
||||
gcc_assert (ok);
|
||||
|
||||
free (wont_exit);
|
||||
|
||||
|
@ -103,13 +103,11 @@ compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp, rtx label, int prob,
|
||||
{
|
||||
/* A hack -- there seems to be no easy generic way how to make a
|
||||
conditional jump from a ccmode comparison. */
|
||||
if (!cinsn)
|
||||
abort ();
|
||||
gcc_assert (cinsn);
|
||||
cond = XEXP (SET_SRC (pc_set (cinsn)), 0);
|
||||
if (GET_CODE (cond) != comp
|
||||
|| !rtx_equal_p (op0, XEXP (cond, 0))
|
||||
|| !rtx_equal_p (op1, XEXP (cond, 1)))
|
||||
abort ();
|
||||
gcc_assert (GET_CODE (cond) == comp);
|
||||
gcc_assert (rtx_equal_p (op0, XEXP (cond, 0)));
|
||||
gcc_assert (rtx_equal_p (op1, XEXP (cond, 1)));
|
||||
emit_jump_insn (copy_insn (PATTERN (cinsn)));
|
||||
jump = get_last_insn ();
|
||||
JUMP_LABEL (jump) = JUMP_LABEL (cinsn);
|
||||
@ -118,8 +116,7 @@ compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp, rtx label, int prob,
|
||||
}
|
||||
else
|
||||
{
|
||||
if (cinsn)
|
||||
abort ();
|
||||
gcc_assert (!cinsn);
|
||||
|
||||
op0 = force_operand (op0, NULL_RTX);
|
||||
op1 = force_operand (op1, NULL_RTX);
|
||||
@ -379,8 +376,7 @@ unswitch_single_loop (struct loops *loops, struct loop *loop,
|
||||
|
||||
/* Unswitch the loop on this condition. */
|
||||
nloop = unswitch_loop (loops, loop, bbs[i], cond, cinsn);
|
||||
if (!nloop)
|
||||
abort ();
|
||||
gcc_assert (nloop);
|
||||
|
||||
/* Invoke itself on modified loops. */
|
||||
unswitch_single_loop (loops, nloop, rconds, num + 1);
|
||||
@ -412,19 +408,17 @@ unswitch_loop (struct loops *loops, struct loop *loop, basic_block unswitch_on,
|
||||
rtx seq;
|
||||
|
||||
/* Some sanity checking. */
|
||||
if (!flow_bb_inside_loop_p (loop, unswitch_on))
|
||||
abort ();
|
||||
if (!unswitch_on->succ || !unswitch_on->succ->succ_next ||
|
||||
unswitch_on->succ->succ_next->succ_next)
|
||||
abort ();
|
||||
if (!just_once_each_iteration_p (loop, unswitch_on))
|
||||
abort ();
|
||||
if (loop->inner)
|
||||
abort ();
|
||||
if (!flow_bb_inside_loop_p (loop, unswitch_on->succ->dest))
|
||||
abort ();
|
||||
if (!flow_bb_inside_loop_p (loop, unswitch_on->succ->succ_next->dest))
|
||||
abort ();
|
||||
gcc_assert (flow_bb_inside_loop_p (loop, unswitch_on));
|
||||
|
||||
gcc_assert (unswitch_on->succ);
|
||||
gcc_assert (unswitch_on->succ->succ_next);
|
||||
gcc_assert (!unswitch_on->succ->succ_next->succ_next);
|
||||
|
||||
gcc_assert (just_once_each_iteration_p (loop, unswitch_on));
|
||||
gcc_assert (!loop->inner);
|
||||
gcc_assert (flow_bb_inside_loop_p (loop, unswitch_on->succ->dest));
|
||||
gcc_assert (flow_bb_inside_loop_p (loop,
|
||||
unswitch_on->succ->succ_next->dest));
|
||||
|
||||
entry = loop_preheader_edge (loop);
|
||||
|
||||
|
95
gcc/loop.c
95
gcc/loop.c
@ -77,7 +77,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
||||
#ifndef HAVE_prefetch
|
||||
#define HAVE_prefetch 0
|
||||
#define CODE_FOR_prefetch 0
|
||||
#define gen_prefetch(a,b,c) (abort(), NULL_RTX)
|
||||
#define gen_prefetch(a,b,c) (gcc_unreachable(), NULL_RTX)
|
||||
#endif
|
||||
|
||||
/* Give up the prefetch optimizations once we exceed a given threshold.
|
||||
@ -501,8 +501,8 @@ loop_optimize (rtx f, FILE *dumpfile, int flags)
|
||||
|
||||
/* See if we went too far. Note that get_max_uid already returns
|
||||
one more that the maximum uid of all insn. */
|
||||
if (get_max_uid () > max_uid_for_loop)
|
||||
abort ();
|
||||
gcc_assert (get_max_uid () <= max_uid_for_loop);
|
||||
|
||||
/* Now reset it to the actual size we need. See above. */
|
||||
max_uid_for_loop = get_max_uid ();
|
||||
|
||||
@ -1746,7 +1746,7 @@ rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
|
||||
contain anything but integers and other rtx's,
|
||||
except for within LABEL_REFs and SYMBOL_REFs. */
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
@ -1940,21 +1940,24 @@ move_movables (struct loop *loop, struct loop_movables *movables,
|
||||
|
||||
for (count = m->consec; count >= 0; count--)
|
||||
{
|
||||
/* If this is the first insn of a library call sequence,
|
||||
something is very wrong. */
|
||||
if (!NOTE_P (p)
|
||||
&& (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
|
||||
abort ();
|
||||
|
||||
/* If this is the last insn of a libcall sequence, then
|
||||
delete every insn in the sequence except the last.
|
||||
The last insn is handled in the normal manner. */
|
||||
if (!NOTE_P (p)
|
||||
&& (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
|
||||
if (!NOTE_P (p))
|
||||
{
|
||||
temp = XEXP (temp, 0);
|
||||
while (temp != p)
|
||||
temp = delete_insn (temp);
|
||||
/* If this is the first insn of a library call
|
||||
sequence, something is very wrong. */
|
||||
gcc_assert (!find_reg_note (p, REG_LIBCALL,
|
||||
NULL_RTX));
|
||||
|
||||
/* If this is the last insn of a libcall sequence,
|
||||
then delete every insn in the sequence except
|
||||
the last. The last insn is handled in the
|
||||
normal manner. */
|
||||
temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
|
||||
if (temp)
|
||||
{
|
||||
temp = XEXP (temp, 0);
|
||||
while (temp != p)
|
||||
temp = delete_insn (temp);
|
||||
}
|
||||
}
|
||||
|
||||
temp = p;
|
||||
@ -2118,8 +2121,7 @@ move_movables (struct loop *loop, struct loop_movables *movables,
|
||||
<< GET_MODE_BITSIZE (m->savemode)))
|
||||
- 1),
|
||||
reg, 1, OPTAB_LIB_WIDEN);
|
||||
if (tem == 0)
|
||||
abort ();
|
||||
gcc_assert (tem != 0);
|
||||
if (tem != reg)
|
||||
emit_move_insn (reg, tem);
|
||||
sequence = get_insns ();
|
||||
@ -2407,8 +2409,7 @@ replace_call_address (rtx x, rtx reg, rtx addr)
|
||||
case MEM:
|
||||
/* If this MEM uses a reg other than the one we expected,
|
||||
something is wrong. */
|
||||
if (XEXP (x, 0) != reg)
|
||||
abort ();
|
||||
gcc_assert (XEXP (x, 0) == reg);
|
||||
XEXP (x, 0) = addr;
|
||||
return;
|
||||
|
||||
@ -2723,8 +2724,7 @@ find_and_verify_loops (rtx f, struct loops *loops)
|
||||
break;
|
||||
|
||||
case NOTE_INSN_LOOP_END:
|
||||
if (! current_loop)
|
||||
abort ();
|
||||
gcc_assert (current_loop);
|
||||
|
||||
current_loop->end = insn;
|
||||
current_loop = current_loop->outer;
|
||||
@ -2912,6 +2912,7 @@ find_and_verify_loops (rtx f, struct loops *loops)
|
||||
if (invert_jump (p, new_label, 1))
|
||||
{
|
||||
rtx q, r;
|
||||
bool fail;
|
||||
|
||||
/* If no suitable BARRIER was found, create a suitable
|
||||
one before TARGET. Since TARGET is a fall through
|
||||
@ -2936,8 +2937,8 @@ find_and_verify_loops (rtx f, struct loops *loops)
|
||||
|
||||
/* Include the BARRIER after INSN and copy the
|
||||
block after LOC. */
|
||||
if (squeeze_notes (&new_label, &last_insn_to_move))
|
||||
abort ();
|
||||
fail = squeeze_notes (&new_label, &last_insn_to_move);
|
||||
gcc_assert (!fail);
|
||||
reorder_insns (new_label, last_insn_to_move, loc);
|
||||
|
||||
/* All those insns are now in TARGET_LOOP. */
|
||||
@ -2972,8 +2973,7 @@ find_and_verify_loops (rtx f, struct loops *loops)
|
||||
|
||||
/* If we didn't find it, then something is
|
||||
wrong. */
|
||||
if (! r)
|
||||
abort ();
|
||||
gcc_assert (r);
|
||||
}
|
||||
|
||||
/* P is now a jump outside the loop, so it must be put
|
||||
@ -3683,7 +3683,7 @@ rtx_equal_for_prefetch_p (rtx x, rtx y)
|
||||
contain anything but integers and other rtx's,
|
||||
except for within LABEL_REFs and SYMBOL_REFs. */
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
@ -5721,19 +5721,14 @@ record_giv (const struct loop *loop, struct induction *v, rtx insn,
|
||||
/* Add the giv to the class of givs computed from one biv. */
|
||||
|
||||
bl = REG_IV_CLASS (ivs, REGNO (src_reg));
|
||||
if (bl)
|
||||
{
|
||||
v->next_iv = bl->giv;
|
||||
bl->giv = v;
|
||||
/* Don't count DEST_ADDR. This is supposed to count the number of
|
||||
insns that calculate givs. */
|
||||
if (type == DEST_REG)
|
||||
bl->giv_count++;
|
||||
bl->total_benefit += benefit;
|
||||
}
|
||||
else
|
||||
/* Fatal error, biv missing for this giv? */
|
||||
abort ();
|
||||
gcc_assert (bl); /* Fatal error, biv missing for this giv? */
|
||||
v->next_iv = bl->giv;
|
||||
bl->giv = v;
|
||||
/* Don't count DEST_ADDR. This is supposed to count the number of
|
||||
insns that calculate givs. */
|
||||
if (type == DEST_REG)
|
||||
bl->giv_count++;
|
||||
bl->total_benefit += benefit;
|
||||
|
||||
if (type == DEST_ADDR)
|
||||
{
|
||||
@ -6388,7 +6383,7 @@ general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
/* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
|
||||
@ -6507,7 +6502,7 @@ simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
|
||||
ext_val, benefit);
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
/* Each argument must be either REG, PLUS, or MULT. Convert REG to
|
||||
@ -6648,7 +6643,7 @@ simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
|
||||
ext_val, benefit);
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
case ASHIFT:
|
||||
@ -7400,7 +7395,7 @@ check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
if (ok)
|
||||
@ -10040,8 +10035,7 @@ try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
|
||||
&& REG_P (SET_DEST (set))
|
||||
&& REGNO (SET_DEST (set)) == regno)
|
||||
{
|
||||
if (init_insn)
|
||||
abort ();
|
||||
gcc_assert (!init_insn);
|
||||
|
||||
init_insn = insn;
|
||||
if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
|
||||
@ -10074,8 +10068,7 @@ try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (! init_insn)
|
||||
abort ();
|
||||
gcc_assert (init_insn);
|
||||
if (apply_change_group ())
|
||||
{
|
||||
if (loop_dump_stream)
|
||||
@ -10576,7 +10569,7 @@ loop_giv_dump (const struct induction *v, FILE *file, int verbose)
|
||||
fprintf (file, " ext tr");
|
||||
break;
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,8 +45,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
||||
of an insn added during loop, since these don't have LUIDs. */
|
||||
|
||||
#define INSN_LUID(INSN) \
|
||||
(INSN_UID (INSN) < max_uid_for_loop ? uid_luid[INSN_UID (INSN)] \
|
||||
: (abort (), -1))
|
||||
(gcc_assert (INSN_UID (INSN) < max_uid_for_loop), uid_luid[INSN_UID (INSN)])
|
||||
|
||||
#define REGNO_FIRST_LUID(REGNO) \
|
||||
(REGNO_FIRST_UID (REGNO) < max_uid_for_loop \
|
||||
|
@ -546,8 +546,7 @@ normalize_sched_times (partial_schedule_ptr ps)
|
||||
ddg_node_ptr u = &g->nodes[i];
|
||||
int normalized_time = SCHED_TIME (u) - amount;
|
||||
|
||||
if (normalized_time < 0)
|
||||
abort ();
|
||||
gcc_assert (normalized_time >= 0);
|
||||
|
||||
SCHED_TIME (u) = normalized_time;
|
||||
SCHED_ROW (u) = normalized_time % ii;
|
||||
@ -714,8 +713,8 @@ generate_prolog_epilog (partial_schedule_ptr ps, rtx orig_loop_beg,
|
||||
label = XEXP (SET_SRC (cmp), 1);
|
||||
cond = XEXP (SET_SRC (cmp), 0);
|
||||
|
||||
if (! c_reg || GET_CODE (cond) != NE)
|
||||
abort ();
|
||||
gcc_assert (c_reg);
|
||||
gcc_assert (GET_CODE (cond) == NE);
|
||||
|
||||
XEXP (label, 0) = precond_exit_label;
|
||||
JUMP_LABEL (orig_loop_bct) = precond_exit_label_insn;
|
||||
@ -1025,8 +1024,8 @@ sms_schedule (FILE *dump_file)
|
||||
}
|
||||
|
||||
/* Make sure this is a doloop. */
|
||||
if ( !(count_reg = doloop_register_get (tail, &comp)))
|
||||
abort ();
|
||||
count_reg = doloop_register_get (tail, &comp);
|
||||
gcc_assert (count_reg);
|
||||
|
||||
/* This should be NULL_RTX if the count is unknown at compile time. */
|
||||
count_init = const_iteration_count (count_reg, pre_header, &loop_count);
|
||||
@ -1457,8 +1456,9 @@ check_nodes_order (int *node_order, int num_nodes)
|
||||
{
|
||||
int u = node_order[i];
|
||||
|
||||
if (u >= num_nodes || u < 0 || TEST_BIT (tmp, u))
|
||||
abort ();
|
||||
gcc_assert (u < num_nodes);
|
||||
gcc_assert (u >= 0);
|
||||
gcc_assert (!TEST_BIT (tmp, u));
|
||||
|
||||
SET_BIT (tmp, u);
|
||||
}
|
||||
|
114
gcc/optabs.c
114
gcc/optabs.c
@ -119,7 +119,7 @@ static rtx expand_parity (enum machine_mode, rtx, rtx);
|
||||
|
||||
#ifndef HAVE_conditional_trap
|
||||
#define HAVE_conditional_trap 0
|
||||
#define gen_conditional_trap(a,b) (abort (), NULL_RTX)
|
||||
#define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
|
||||
#endif
|
||||
|
||||
/* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
|
||||
@ -138,10 +138,9 @@ add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
|
||||
rtx last_insn, insn, set;
|
||||
rtx note;
|
||||
|
||||
if (! insns
|
||||
|| ! INSN_P (insns)
|
||||
|| NEXT_INSN (insns) == NULL_RTX)
|
||||
abort ();
|
||||
gcc_assert (insns);
|
||||
gcc_assert (INSN_P (insns));
|
||||
gcc_assert (NEXT_INSN (insns) != NULL_RTX);
|
||||
|
||||
if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
|
||||
&& GET_RTX_CLASS (code) != RTX_BIN_ARITH
|
||||
@ -672,8 +671,7 @@ expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
|
||||
enum optab_methods methods)
|
||||
{
|
||||
optab binop = code_to_optab[(int) code];
|
||||
if (binop == 0)
|
||||
abort ();
|
||||
gcc_assert (binop != 0);
|
||||
|
||||
return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
|
||||
}
|
||||
@ -1712,9 +1710,8 @@ expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
|
||||
|
||||
/* We could handle this, but we should always be called with a pseudo
|
||||
for our targets and all insns should take them as outputs. */
|
||||
if (! (*insn_data[icode].operand[0].predicate) (targ0, mode)
|
||||
|| ! (*insn_data[icode].operand[1].predicate) (targ1, mode))
|
||||
abort ();
|
||||
gcc_assert ((*insn_data[icode].operand[0].predicate) (targ0, mode));
|
||||
gcc_assert ((*insn_data[icode].operand[1].predicate) (targ1, mode));
|
||||
|
||||
pat = GEN_FCN (icode) (targ0, targ1, xop0);
|
||||
if (pat)
|
||||
@ -1841,9 +1838,8 @@ expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
|
||||
|
||||
/* We could handle this, but we should always be called with a pseudo
|
||||
for our targets and all insns should take them as outputs. */
|
||||
if (! (*insn_data[icode].operand[0].predicate) (targ0, mode)
|
||||
|| ! (*insn_data[icode].operand[3].predicate) (targ1, mode))
|
||||
abort ();
|
||||
gcc_assert ((*insn_data[icode].operand[0].predicate) (targ0, mode));
|
||||
gcc_assert ((*insn_data[icode].operand[3].predicate) (targ1, mode));
|
||||
|
||||
pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
|
||||
if (pat)
|
||||
@ -1906,8 +1902,7 @@ expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
|
||||
rtx insns;
|
||||
|
||||
/* Exactly one of TARG0 or TARG1 should be non-NULL. */
|
||||
if (!((targ0 != NULL_RTX) ^ (targ1 != NULL_RTX)))
|
||||
abort ();
|
||||
gcc_assert ((targ0 != NULL_RTX) ^ (targ1 != NULL_RTX));
|
||||
|
||||
mode = GET_MODE (op0);
|
||||
if (!binoptab->handlers[(int) mode].libfunc)
|
||||
@ -1944,8 +1939,7 @@ expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
|
||||
rtx target, int unsignedp)
|
||||
{
|
||||
optab unop = code_to_optab[(int) code];
|
||||
if (unop == 0)
|
||||
abort ();
|
||||
gcc_assert (unop != 0);
|
||||
|
||||
return expand_unop (mode, unop, op0, target, unsignedp);
|
||||
}
|
||||
@ -2620,8 +2614,7 @@ emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
|
||||
}
|
||||
}
|
||||
|
||||
if (set == 0)
|
||||
abort ();
|
||||
gcc_assert (set != 0);
|
||||
|
||||
if (! reg_overlap_mentioned_p (target, SET_DEST (set)))
|
||||
{
|
||||
@ -2932,7 +2925,7 @@ prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
|
||||
|
||||
/* They could both be VOIDmode if both args are immediate constants,
|
||||
but we should fold that at an earlier stage.
|
||||
With no special code here, this will call abort,
|
||||
With no special code here, this will assert out,
|
||||
reminding the programmer to implement such folding. */
|
||||
|
||||
if (mode != BLKmode && flag_force_mem)
|
||||
@ -2961,11 +2954,10 @@ prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
|
||||
y = force_reg (mode, y);
|
||||
|
||||
#ifdef HAVE_cc0
|
||||
/* Abort if we have a non-canonical comparison. The RTL documentation
|
||||
/* Assert out if we have a non-canonical comparison. The RTL documentation
|
||||
states that canonical comparisons are required only for targets which
|
||||
have cc0. */
|
||||
if (CONSTANT_P (x) && ! CONSTANT_P (y))
|
||||
abort ();
|
||||
gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
|
||||
#endif
|
||||
|
||||
/* Don't let both operands fail to indicate the mode. */
|
||||
@ -2984,8 +2976,7 @@ prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
|
||||
rtx opalign
|
||||
= GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
|
||||
|
||||
if (size == 0)
|
||||
abort ();
|
||||
gcc_assert (size != 0);
|
||||
|
||||
/* Try to use a memory block compare insn - either cmpstr
|
||||
or cmpmem will do. */
|
||||
@ -3082,11 +3073,8 @@ prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
|
||||
return;
|
||||
}
|
||||
|
||||
if (class == MODE_FLOAT)
|
||||
prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
|
||||
|
||||
else
|
||||
abort ();
|
||||
gcc_assert (class == MODE_FLOAT);
|
||||
prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
|
||||
}
|
||||
|
||||
/* Before emitting an insn with code ICODE, make sure that X, which is going
|
||||
@ -3126,7 +3114,7 @@ emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
|
||||
enum machine_mode wider_mode = mode;
|
||||
|
||||
/* Try combined insns first. */
|
||||
do
|
||||
for (;;)
|
||||
{
|
||||
enum insn_code icode;
|
||||
PUT_MODE (test, wider_mode);
|
||||
@ -3169,15 +3157,12 @@ emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
|
||||
return;
|
||||
}
|
||||
|
||||
if (class != MODE_INT && class != MODE_FLOAT
|
||||
&& class != MODE_COMPLEX_FLOAT)
|
||||
break;
|
||||
gcc_assert (class == MODE_INT || class == MODE_FLOAT
|
||||
|| class == MODE_COMPLEX_FLOAT);
|
||||
|
||||
wider_mode = GET_MODE_WIDER_MODE (wider_mode);
|
||||
gcc_assert (wider_mode != VOIDmode);
|
||||
}
|
||||
while (wider_mode != VOIDmode);
|
||||
|
||||
abort ();
|
||||
}
|
||||
|
||||
/* Generate code to compare X with Y so that the condition codes are
|
||||
@ -3208,8 +3193,7 @@ emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
|
||||
{
|
||||
/* If we're not emitting a branch, this means some caller
|
||||
is out of sync. */
|
||||
if (! label)
|
||||
abort ();
|
||||
gcc_assert (label);
|
||||
|
||||
op0 = y, op1 = x;
|
||||
comparison = swap_condition (comparison);
|
||||
@ -3280,8 +3264,7 @@ prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
|
||||
}
|
||||
}
|
||||
|
||||
if (mode == VOIDmode)
|
||||
abort ();
|
||||
gcc_assert (mode != VOIDmode);
|
||||
|
||||
if (mode != orig_mode)
|
||||
{
|
||||
@ -3339,7 +3322,7 @@ prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
|
||||
equiv, true_rtx, false_rtx);
|
||||
@ -3641,13 +3624,12 @@ gen_add2_insn (rtx x, rtx y)
|
||||
{
|
||||
int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
|
||||
|
||||
if (! ((*insn_data[icode].operand[0].predicate)
|
||||
(x, insn_data[icode].operand[0].mode))
|
||||
|| ! ((*insn_data[icode].operand[1].predicate)
|
||||
(x, insn_data[icode].operand[1].mode))
|
||||
|| ! ((*insn_data[icode].operand[2].predicate)
|
||||
(y, insn_data[icode].operand[2].mode)))
|
||||
abort ();
|
||||
gcc_assert ((*insn_data[icode].operand[0].predicate)
|
||||
(x, insn_data[icode].operand[0].mode));
|
||||
gcc_assert ((*insn_data[icode].operand[1].predicate)
|
||||
(x, insn_data[icode].operand[1].mode));
|
||||
gcc_assert ((*insn_data[icode].operand[2].predicate)
|
||||
(y, insn_data[icode].operand[2].mode));
|
||||
|
||||
return (GEN_FCN (icode) (x, x, y));
|
||||
}
|
||||
@ -3676,8 +3658,7 @@ have_add2_insn (rtx x, rtx y)
|
||||
{
|
||||
int icode;
|
||||
|
||||
if (GET_MODE (x) == VOIDmode)
|
||||
abort ();
|
||||
gcc_assert (GET_MODE (x) != VOIDmode);
|
||||
|
||||
icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
|
||||
|
||||
@ -3702,13 +3683,12 @@ gen_sub2_insn (rtx x, rtx y)
|
||||
{
|
||||
int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
|
||||
|
||||
if (! ((*insn_data[icode].operand[0].predicate)
|
||||
(x, insn_data[icode].operand[0].mode))
|
||||
|| ! ((*insn_data[icode].operand[1].predicate)
|
||||
(x, insn_data[icode].operand[1].mode))
|
||||
|| ! ((*insn_data[icode].operand[2].predicate)
|
||||
(y, insn_data[icode].operand[2].mode)))
|
||||
abort ();
|
||||
gcc_assert ((*insn_data[icode].operand[0].predicate)
|
||||
(x, insn_data[icode].operand[0].mode));
|
||||
gcc_assert ((*insn_data[icode].operand[1].predicate)
|
||||
(x, insn_data[icode].operand[1].mode));
|
||||
gcc_assert ((*insn_data[icode].operand[2].predicate)
|
||||
(y, insn_data[icode].operand[2].mode));
|
||||
|
||||
return (GEN_FCN (icode) (x, x, y));
|
||||
}
|
||||
@ -3737,8 +3717,7 @@ have_sub2_insn (rtx x, rtx y)
|
||||
{
|
||||
int icode;
|
||||
|
||||
if (GET_MODE (x) == VOIDmode)
|
||||
abort ();
|
||||
gcc_assert (GET_MODE (x) != VOIDmode);
|
||||
|
||||
icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
|
||||
|
||||
@ -3864,8 +3843,7 @@ expand_float (rtx to, rtx from, int unsignedp)
|
||||
enum machine_mode fmode, imode;
|
||||
|
||||
/* Crash now, because we won't be able to decide which mode to use. */
|
||||
if (GET_MODE (from) == VOIDmode)
|
||||
abort ();
|
||||
gcc_assert (GET_MODE (from) != VOIDmode);
|
||||
|
||||
/* Look for an insn to do the conversion. Do it in the specified
|
||||
modes if possible; otherwise convert either input, output or both to
|
||||
@ -4026,8 +4004,7 @@ expand_float (rtx to, rtx from, int unsignedp)
|
||||
from = force_not_mem (from);
|
||||
|
||||
libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
|
||||
if (!libfunc)
|
||||
abort ();
|
||||
gcc_assert (libfunc);
|
||||
|
||||
start_sequence ();
|
||||
|
||||
@ -4210,8 +4187,7 @@ expand_fix (rtx to, rtx from, int unsignedp)
|
||||
|
||||
convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
|
||||
libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
|
||||
if (!libfunc)
|
||||
abort ();
|
||||
gcc_assert (libfunc);
|
||||
|
||||
if (flag_force_mem)
|
||||
from = force_not_mem (from);
|
||||
@ -4824,8 +4800,7 @@ debug_optab_libfuncs (void)
|
||||
h = &o->handlers[j];
|
||||
if (h->libfunc)
|
||||
{
|
||||
if (GET_CODE (h->libfunc) != SYMBOL_REF)
|
||||
abort ();
|
||||
gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
|
||||
fprintf (stderr, "%s\t%s:\t%s\n",
|
||||
GET_RTX_NAME (o->code),
|
||||
GET_MODE_NAME (j),
|
||||
@ -4845,8 +4820,7 @@ debug_optab_libfuncs (void)
|
||||
h = &o->handlers[j][k];
|
||||
if (h->libfunc)
|
||||
{
|
||||
if (GET_CODE (h->libfunc) != SYMBOL_REF)
|
||||
abort ();
|
||||
gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
|
||||
fprintf (stderr, "%s\t%s\t%s:\t%s\n",
|
||||
GET_RTX_NAME (o->code),
|
||||
GET_MODE_NAME (j),
|
||||
|
@ -1003,10 +1003,8 @@ common_handle_option (size_t scode, const char *arg, int value)
|
||||
default:
|
||||
/* If the flag was handled in a standard way, assume the lack of
|
||||
processing here is intentional. */
|
||||
if (cl_options[scode].flag_var)
|
||||
break;
|
||||
|
||||
abort ();
|
||||
gcc_assert (cl_options[scode].flag_var);
|
||||
break;
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -61,8 +61,7 @@ set_param_value (const char *name, int value)
|
||||
size_t i;
|
||||
|
||||
/* Make sure nobody tries to set a parameter to an invalid value. */
|
||||
if (value == INVALID_PARAM_VAL)
|
||||
abort ();
|
||||
gcc_assert (value != INVALID_PARAM_VAL);
|
||||
|
||||
/* Scan the parameter table to find a matching entry. */
|
||||
for (i = 0; i < num_compiler_params; ++i)
|
||||
|
10
gcc/passes.c
10
gcc/passes.c
@ -121,8 +121,8 @@ open_dump_file (enum tree_dump_index index, tree decl)
|
||||
|
||||
timevar_push (TV_DUMP);
|
||||
|
||||
if (dump_file != NULL || dump_file_name != NULL)
|
||||
abort ();
|
||||
gcc_assert (!dump_file);
|
||||
gcc_assert (!dump_file_name);
|
||||
|
||||
dump_file_name = get_dump_file_name (index);
|
||||
initializing_dump = !dump_initialized_p (index);
|
||||
@ -289,11 +289,9 @@ rest_of_handle_final (void)
|
||||
different from the DECL_NAME name used in the source file. */
|
||||
|
||||
x = DECL_RTL (current_function_decl);
|
||||
if (!MEM_P (x))
|
||||
abort ();
|
||||
gcc_assert (MEM_P (x));
|
||||
x = XEXP (x, 0);
|
||||
if (GET_CODE (x) != SYMBOL_REF)
|
||||
abort ();
|
||||
gcc_assert (GET_CODE (x) == SYMBOL_REF);
|
||||
fnname = XSTR (x, 0);
|
||||
|
||||
assemble_start_function (current_function_decl, fnname);
|
||||
|
@ -305,9 +305,7 @@ expr_equiv_p (const void *exp1p, const void *exp2p)
|
||||
struct expr *exp1 = (struct expr *) exp1p;
|
||||
struct expr *exp2 = (struct expr *) exp2p;
|
||||
int equiv_p = exp_equiv_p (exp1->expr, exp2->expr, 0, true);
|
||||
if (equiv_p
|
||||
&& exp1->hash != exp2->hash)
|
||||
abort ();
|
||||
gcc_assert (!equiv_p || exp1->hash == exp2->hash);
|
||||
return equiv_p;
|
||||
}
|
||||
|
||||
@ -485,11 +483,8 @@ oprs_unchanged_p (rtx x, rtx insn, bool after_insn)
|
||||
switch (code)
|
||||
{
|
||||
case REG:
|
||||
#ifdef ENABLE_CHECKING
|
||||
/* We are called after register allocation. */
|
||||
if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
|
||||
abort ();
|
||||
#endif
|
||||
gcc_assert (REGNO (x) < FIRST_PSEUDO_REGISTER);
|
||||
if (after_insn)
|
||||
/* If the last CUID setting the insn is less than the CUID of
|
||||
INSN, then reg X is not changed in or after INSN. */
|
||||
@ -785,8 +780,7 @@ hash_scan_set (rtx insn)
|
||||
|
||||
#ifdef ENABLE_CHEKCING
|
||||
/* We shouldn't have any EH_REGION notes post reload. */
|
||||
if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
|
||||
abort ();
|
||||
gcc_assert (!find_reg_note (insn, REG_EH_REGION, NULL_RTX));
|
||||
#endif
|
||||
|
||||
if (REG_P (dest))
|
||||
@ -934,11 +928,9 @@ reg_set_between_after_reload_p (rtx reg, rtx from_insn, rtx to_insn)
|
||||
rtx insn;
|
||||
int regno;
|
||||
|
||||
#ifdef ENABLE_CHECKING
|
||||
/* We are called after register allocation. */
|
||||
if (!REG_P (reg) || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
|
||||
abort ();
|
||||
#endif
|
||||
gcc_assert (REG_P (reg));
|
||||
gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
|
||||
|
||||
if (from_insn == to_insn)
|
||||
return NULL_RTX;
|
||||
@ -973,11 +965,9 @@ reg_used_between_after_reload_p (rtx reg, rtx from_insn, rtx to_insn)
|
||||
rtx insn;
|
||||
int regno;
|
||||
|
||||
#ifdef ENABLE_CHECKING
|
||||
/* We are called after register allocation. */
|
||||
if (!REG_P (reg) || REGNO (reg) >= FIRST_PSEUDO_REGISTER)
|
||||
abort ();
|
||||
#endif
|
||||
gcc_assert (REG_P (reg));
|
||||
gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
|
||||
|
||||
if (from_insn == to_insn)
|
||||
return NULL_RTX;
|
||||
@ -1027,7 +1017,7 @@ get_avail_load_store_reg (rtx insn)
|
||||
return SET_DEST(PATTERN(insn));
|
||||
if (REG_P (SET_SRC (PATTERN (insn)))) /* A store. */
|
||||
return SET_SRC (PATTERN (insn));
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
|
||||
/* Return nonzero if the predecessors of BB are "well behaved". */
|
||||
@ -1109,8 +1099,8 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn,
|
||||
{
|
||||
/* Check if the loaded register is not used. */
|
||||
avail_insn = a_occr->insn;
|
||||
if (! (avail_reg = get_avail_load_store_reg (avail_insn)))
|
||||
abort ();
|
||||
avail_reg = get_avail_load_store_reg (avail_insn);
|
||||
gcc_assert (avail_reg);
|
||||
/* Make sure we can generate a move from register avail_reg to
|
||||
dest. */
|
||||
extract_insn (gen_move_insn (copy_rtx (dest),
|
||||
@ -1181,8 +1171,7 @@ eliminate_partially_redundant_load (basic_block bb, rtx insn,
|
||||
/* Set avail_reg to be the register having the value of the
|
||||
memory. */
|
||||
avail_reg = get_avail_load_store_reg (avail_insn);
|
||||
if (! avail_reg)
|
||||
abort ();
|
||||
gcc_assert (avail_reg);
|
||||
|
||||
insert_insn_on_edge (gen_move_insn (copy_rtx (dest),
|
||||
copy_rtx (avail_reg)),
|
||||
|
@ -280,7 +280,7 @@ reload_cse_simplify_set (rtx set, rtx insn)
|
||||
if (this_val == trunc_int_for_mode (this_val, GET_MODE (src)))
|
||||
break;
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
this_rtx = GEN_INT (this_val);
|
||||
}
|
||||
@ -1057,8 +1057,7 @@ reload_combine_note_use (rtx *xp, rtx insn)
|
||||
if (REG_P (SET_DEST (x)))
|
||||
{
|
||||
/* No spurious CLOBBERs of pseudo registers may remain. */
|
||||
if (REGNO (SET_DEST (x)) >= FIRST_PSEUDO_REGISTER)
|
||||
abort ();
|
||||
gcc_assert (REGNO (SET_DEST (x)) < FIRST_PSEUDO_REGISTER);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
@ -1078,8 +1077,7 @@ reload_combine_note_use (rtx *xp, rtx insn)
|
||||
int nregs;
|
||||
|
||||
/* No spurious USEs of pseudo registers may remain. */
|
||||
if (regno >= FIRST_PSEUDO_REGISTER)
|
||||
abort ();
|
||||
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
|
||||
|
||||
nregs = hard_regno_nregs[regno][GET_MODE (x)];
|
||||
|
||||
|
@ -179,8 +179,7 @@ tree_predicted_by_p (basic_block bb, enum br_predictor predictor)
|
||||
void
|
||||
predict_insn (rtx insn, enum br_predictor predictor, int probability)
|
||||
{
|
||||
if (!any_condjump_p (insn))
|
||||
abort ();
|
||||
gcc_assert (any_condjump_p (insn));
|
||||
if (!flag_guess_branch_prob)
|
||||
return;
|
||||
|
||||
@ -1106,8 +1105,7 @@ expected_value_to_br_prob (void)
|
||||
cond = simplify_rtx (cond);
|
||||
|
||||
/* Turn the condition into a scaled branch probability. */
|
||||
if (cond != const_true_rtx && cond != const0_rtx)
|
||||
abort ();
|
||||
gcc_assert (cond == const_true_rtx || cond == const0_rtx);
|
||||
predict_insn_def (insn, PRED_BUILTIN_EXPECT,
|
||||
cond == const_true_rtx ? TAKEN : NOT_TAKEN);
|
||||
}
|
||||
@ -1208,8 +1206,8 @@ propagate_freq (struct loop *loop)
|
||||
{
|
||||
#ifdef ENABLE_CHECKING
|
||||
for (e = bb->pred; e; e = e->pred_next)
|
||||
if (BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK))
|
||||
abort ();
|
||||
gcc_assert (!BLOCK_INFO (e->src)->tovisit
|
||||
|| (e->flags & EDGE_DFS_BACK));
|
||||
#endif
|
||||
|
||||
for (e = bb->pred; e; e = e->pred_next)
|
||||
@ -1353,8 +1351,7 @@ expensive_function_p (int threshold)
|
||||
|
||||
/* We can not compute accurately for large thresholds due to scaled
|
||||
frequencies. */
|
||||
if (threshold > BB_FREQ_MAX)
|
||||
abort ();
|
||||
gcc_assert (threshold < BB_FREQ_MAX);
|
||||
|
||||
/* Frequencies are out of range. This either means that function contains
|
||||
internal loop executing more than BB_FREQ_MAX times or profile feedback
|
||||
|
@ -231,8 +231,7 @@ pp_base_format_text (pretty_printer *pp, text_info *text)
|
||||
break;
|
||||
}
|
||||
/* We don't support precision beyond that of "long long". */
|
||||
if (precision > 2)
|
||||
abort();
|
||||
gcc_assert (precision <= 2);
|
||||
|
||||
if (quoted)
|
||||
pp_string (pp, open_quote);
|
||||
@ -319,10 +318,10 @@ pp_base_format_text (pretty_printer *pp, text_info *text)
|
||||
int n;
|
||||
const char *s;
|
||||
/* We handle no precision specifier but '%.*s'. */
|
||||
if (*++text->format_spec != '*')
|
||||
abort ();
|
||||
else if (*++text->format_spec != 's')
|
||||
abort ();
|
||||
++text->format_spec;
|
||||
gcc_assert (*text->format_spec == '*');
|
||||
++text->format_spec;
|
||||
gcc_assert (*text->format_spec == 's');
|
||||
n = va_arg (*text->args_ptr, int);
|
||||
s = va_arg (*text->args_ptr, const char *);
|
||||
pp_append_text (pp, s, s + n);
|
||||
@ -330,14 +329,16 @@ pp_base_format_text (pretty_printer *pp, text_info *text)
|
||||
break;
|
||||
|
||||
default:
|
||||
if (!pp_format_decoder (pp) || !(*pp_format_decoder (pp)) (pp, text))
|
||||
{
|
||||
/* Hmmm. The client failed to install a format translator
|
||||
but called us with an unrecognized format. Or, maybe, the
|
||||
translated string just contains an invalid format, or
|
||||
has formats in the wrong order. Sorry. */
|
||||
abort ();
|
||||
}
|
||||
{
|
||||
bool ok;
|
||||
|
||||
/* Make sure there's a format translator. */
|
||||
gcc_assert (pp_format_decoder (pp));
|
||||
ok = pp_format_decoder (pp) (pp, text);
|
||||
/* and make sure it recognized the format. */
|
||||
gcc_assert (ok);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (quoted)
|
||||
pp_string (pp, close_quote);
|
||||
|
@ -149,8 +149,7 @@ instrument_edges (struct edge_list *el)
|
||||
|
||||
if (!inf->ignore && !inf->on_tree)
|
||||
{
|
||||
if (e->flags & EDGE_ABNORMAL)
|
||||
abort ();
|
||||
gcc_assert (!(e->flags & EDGE_ABNORMAL));
|
||||
if (dump_file)
|
||||
fprintf (dump_file, "Edge %d to %d instrumented%s\n",
|
||||
e->src->index, e->dest->index,
|
||||
@ -196,7 +195,7 @@ instrument_values (histogram_values values)
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
if (!coverage_counter_alloc (t, hist->n_counters))
|
||||
continue;
|
||||
@ -220,7 +219,7 @@ instrument_values (histogram_values values)
|
||||
break;
|
||||
|
||||
default:
|
||||
abort ();
|
||||
gcc_unreachable ();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -420,8 +419,7 @@ compute_branch_probabilities (void)
|
||||
/* Calculate count for remaining edge by conservation. */
|
||||
total = bb->count - total;
|
||||
|
||||
if (! e)
|
||||
abort ();
|
||||
gcc_assert (e);
|
||||
EDGE_INFO (e)->count_valid = 1;
|
||||
e->count = total;
|
||||
bi->succ_count--;
|
||||
@ -447,8 +445,7 @@ compute_branch_probabilities (void)
|
||||
/* Calculate count for remaining edge by conservation. */
|
||||
total = bb->count - total + e->count;
|
||||
|
||||
if (! e)
|
||||
abort ();
|
||||
gcc_assert (e);
|
||||
EDGE_INFO (e)->count_valid = 1;
|
||||
e->count = total;
|
||||
bi->pred_count--;
|
||||
@ -466,17 +463,18 @@ compute_branch_probabilities (void)
|
||||
if (dump_file)
|
||||
fprintf (dump_file, "Graph solving took %d passes.\n\n", passes);
|
||||
|
||||
#ifdef ENABLE_CHECKING
|
||||
/* If the graph has been correctly solved, every block will have a
|
||||
succ and pred count of zero. */
|
||||
FOR_EACH_BB (bb)
|
||||
{
|
||||
if (BB_INFO (bb)->succ_count || BB_INFO (bb)->pred_count)
|
||||
abort ();
|
||||
gcc_assert (!BB_INFO (bb)->succ_count);
|
||||
gcc_assert (!BB_INFO (bb)->pred_count);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* For every edge, calculate its branch probability and add a reg_note
|
||||
to the branch insn to indicate this. */
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
hist_br_prob[i] = 0;
|
||||
num_never_executed = 0;
|
||||
@ -984,8 +982,7 @@ branch_prob (void)
|
||||
{
|
||||
unsigned n_instrumented = instrument_edges (el);
|
||||
|
||||
if (n_instrumented != num_instrumented)
|
||||
abort ();
|
||||
gcc_assert (n_instrumented == num_instrumented);
|
||||
|
||||
if (flag_profile_values)
|
||||
instrument_values (values);
|
||||
@ -1044,8 +1041,7 @@ union_groups (basic_block bb1, basic_block bb2)
|
||||
|
||||
/* ??? I don't have a place for the rank field. OK. Lets go w/o it,
|
||||
this code is unlikely going to be performance problem anyway. */
|
||||
if (bb1g == bb2g)
|
||||
abort ();
|
||||
gcc_assert (bb1g != bb2g);
|
||||
|
||||
bb1g->aux = bb2g;
|
||||
}
|
||||
@ -1190,8 +1186,7 @@ void
|
||||
tree_register_profile_hooks (void)
|
||||
{
|
||||
profile_hooks = &tree_profile_hooks;
|
||||
if (!ir_type ())
|
||||
abort ();
|
||||
gcc_assert (ir_type ());
|
||||
}
|
||||
|
||||
/* Set up hooks to enable RTL-based profiling. */
|
||||
@ -1200,6 +1195,5 @@ void
|
||||
rtl_register_profile_hooks (void)
|
||||
{
|
||||
profile_hooks = &rtl_profile_hooks;
|
||||
if (ir_type ())
|
||||
abort ();
|
||||
gcc_assert (!ir_type ());
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user